summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/scripts/python')
0 files changed, 0 insertions, 0 deletions
r> -rw-r--r--drivers/accel/amdxdna/aie2_ctx.c65
-rw-r--r--drivers/accel/amdxdna/aie2_message.c6
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h10
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c13
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c22
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c411
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h24
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c11
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h2
-rw-r--r--drivers/accel/habanalabs/Kconfig2
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c2
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c4
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h1
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h1
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c12
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c20
-rw-r--r--drivers/accel/qaic/qaic_data.c8
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c2
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_extlog.c3
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_mrrm.c185
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/acpi_pcc.c13
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h6
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h10
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c1
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c9
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c6
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c8
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c53
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c4
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c54
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c13
-rw-r--r--drivers/acpi/acpica/rscalc.c22
-rw-r--r--drivers/acpi/acpica/rslist.c12
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c6
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c4
-rw-r--r--drivers/acpi/acpica/utcksum.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c9
-rw-r--r--drivers/acpi/acpica/utresrc.c14
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/einj-core.c62
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/battery.c23
-rw-r--r--drivers/acpi/bus.c6
-rw-r--r--drivers/acpi/cppc_acpi.c329
-rw-r--r--drivers/acpi/ec.c6
-rw-r--r--drivers/acpi/numa/srat.c15
-rw-r--r--drivers/acpi/osi.c1
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/platform_profile.c3
-rw-r--r--drivers/acpi/processor_idle.c14
-rw-r--r--drivers/acpi/processor_perflib.c1
-rw-r--r--drivers/acpi/processor_throttling.c5
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/tables.c8
-rw-r--r--drivers/acpi/thermal.c10
-rw-r--r--drivers/acpi/viot.c2
-rw-r--r--drivers/android/binderfs.c4
-rw-r--r--drivers/base/arch_topology.c52
-rw-r--r--drivers/base/auxiliary.c108
-rw-r--r--drivers/base/component.c3
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/devres.c20
-rw-r--r--drivers/base/faux.c22
-rw-r--r--drivers/base/firmware_loader/Kconfig4
-rw-r--r--drivers/base/firmware_loader/main.c34
-rw-r--r--drivers/base/memory.c51
-rw-r--r--drivers/base/node.c11
-rw-r--r--drivers/base/platform-msi.c1
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/main.c218
-rw-r--r--drivers/base/power/runtime.c46
-rw-r--r--drivers/base/power/sysfs.c15
-rw-r--r--drivers/base/power/wakeup.c14
-rw-r--r--drivers/base/power/wakeup_stats.c2
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/regcache.c13
-rw-r--r--drivers/base/regmap/regmap-irq.c103
-rw-r--r--drivers/base/swnode.c2
-rw-r--r--drivers/base/topology.c52
-rw-r--r--drivers/bcma/driver_gpio.c8
-rw-r--r--drivers/block/Kconfig19
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/brd.c225
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rnbd/rnbd-srv.c7
-rw-r--r--drivers/block/ublk_drv.c569
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/block/zloop.c1385
-rw-r--r--drivers/block/zram/zram_drv.c331
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/btintel.c13
-rw-r--r--drivers/bluetooth/btintel.h6
-rw-r--r--drivers/bluetooth/btintel_pcie.c141
-rw-r--r--drivers/bluetooth/btintel_pcie.h19
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/bluetooth/btmtksdio.c2
-rw-r--r--drivers/bluetooth/btnxpuart.c58
-rw-r--r--drivers/bluetooth/btusb.c204
-rw-r--r--drivers/bluetooth/hci_aml.c3
-rw-r--r--drivers/bus/brcmstb_gisb.c10
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c2
-rw-r--r--drivers/bus/fsl-mc/dprc.c4
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c21
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c12
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h2
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c11
-rw-r--r--drivers/bus/fsl-mc/mc-io.c19
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/moxtet.c6
-rw-r--r--drivers/bus/ti-sysc.c68
-rw-r--r--drivers/cache/sifive_ccache.c2
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/intel-gtt.c55
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/hw_random/atmel-rng.c11
-rw-r--r--drivers/char/hw_random/mtk-rng.c9
-rw-r--r--drivers/char/hw_random/npcm-rng.c9
-rw-r--r--drivers/char/hw_random/rockchip-rng.c73
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c761
-rw-r--r--drivers/char/ipmi/ipmi_si.h10
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c116
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c52
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c27
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c92
-rw-r--r--drivers/char/mem.c18
-rw-r--r--drivers/char/random.c56
-rw-r--r--drivers/char/tpm/Kconfig10
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c7
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.c74
-rw-r--r--drivers/char/tpm/tpm_svsm.c125
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/bcm/clk-kona.c18
-rw-r--r--drivers/clk/bcm/clk-kona.h2
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c3
-rw-r--r--drivers/clk/davinci/pll.c26
-rw-r--r--drivers/clk/meson/Kconfig16
-rw-r--r--drivers/clk/meson/g12a.c1
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c6
-rw-r--r--drivers/clk/qcom/camcc-sa8775p.c103
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c18
-rw-r--r--drivers/clk/qcom/clk-rpmh.c11
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c4
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c6
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c2
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c3
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c6
-rw-r--r--drivers/clk/renesas/Kconfig5
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c52
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c152
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c36
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c3
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c186
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h94
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-gate-grf.c105
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c24
-rw-r--r--drivers/clk/rockchip/clk-pll.c11
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c11
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c6
-rw-r--r--drivers/clk/rockchip/clk-rk3528.c83
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c3
-rw-r--r--drivers/clk/rockchip/clk-rk3576.c60
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c1
-rw-r--r--drivers/clk/rockchip/clk-rv1126.c2
-rw-r--r--drivers/clk/rockchip/clk.c38
-rw-r--r--drivers/clk/rockchip/clk.h75
-rw-r--r--drivers/clk/samsung/clk-exynos4.c74
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c338
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c6
-rw-r--r--drivers/clk/socfpga/clk-pll.c4
-rw-r--r--drivers/clk/sophgo/Kconfig19
-rw-r--r--drivers/clk/sophgo/Makefile2
-rw-r--r--drivers/clk/sophgo/clk-cv1800.c2
-rw-r--r--drivers/clk/sophgo/clk-sg2044-pll.c628
-rw-r--r--drivers/clk/sophgo/clk-sg2044.c1812
-rw-r--r--drivers/clk/spacemit/Kconfig18
-rw-r--r--drivers/clk/spacemit/Makefile5
-rw-r--r--drivers/clk/spacemit/ccu-k1.c1164
-rw-r--r--drivers/clk/spacemit/ccu_common.h48
-rw-r--r--drivers/clk/spacemit/ccu_ddn.c83
-rw-r--r--drivers/clk/spacemit/ccu_ddn.h48
-rw-r--r--drivers/clk/spacemit/ccu_mix.c268
-rw-r--r--drivers/clk/spacemit/ccu_mix.h218
-rw-r--r--drivers/clk/spacemit/ccu_pll.c157
-rw-r--r--drivers/clk/spacemit/ccu_pll.h86
-rw-r--r--drivers/clk/sunxi-ng/Kconfig48
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c25
-rw-r--r--drivers/clk/sunxi/Kconfig10
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c196
-rw-r--r--drivers/clocksource/Kconfig20
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/renesas-ostm.c4
-rw-r--r--drivers/clocksource/timer-econet-en751221.c216
-rw-r--r--drivers/clocksource/timer-nxp-stm.c495
-rw-r--r--drivers/clocksource/timer-tegra186.c100
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c10
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c29
-rw-r--r--drivers/cpufreq/amd-pstate.c147
-rw-r--r--drivers/cpufreq/amd-pstate.h3
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c109
-rw-r--r--drivers/cpufreq/cpufreq.c463
-rw-r--r--drivers/cpufreq/e_powersaver.c6
-rw-r--r--drivers/cpufreq/elanfreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c289
-rw-r--r--drivers/cpufreq/longhaul.c24
-rw-r--r--drivers/cpufreq/powernow-k7.c14
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs226
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c36
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci.c82
-rw-r--r--drivers/cpuidle/cpuidle-psci.h4
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/cpuidle/governors/teo.c4
-rw-r--r--drivers/crypto/Kconfig7
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c56
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c17
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c177
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c49
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c110
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c45
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h17
-rw-r--r--drivers/crypto/atmel-aes.c5
-rw-r--r--drivers/crypto/atmel-sha.c6
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/caam/ctrl.c1
-rw-r--r--drivers/crypto/cavium/Makefile3
-rw-r--r--drivers/crypto/cavium/zip/Makefile12
-rw-r--r--drivers/crypto/cavium/zip/common.h222
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c261
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h68
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c200
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c202
-rw-r--r--drivers/crypto/cavium/zip/zip_device.h108
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c223
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c603
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h120
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.c114
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.h78
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h1347
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c15
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c13
-rw-r--r--drivers/crypto/ccp/ccp-ops.c11
-rw-r--r--drivers/crypto/ccp/sev-dev.c254
-rw-r--r--drivers/crypto/ccp/sp-pci.c3
-rw-r--r--drivers/crypto/hisilicon/qm.c4
-rw-r--r--drivers/crypto/img-hash.c41
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.c20
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c87
-rw-r--r--drivers/crypto/intel/qat/Kconfig12
-rw-r--r--drivers/crypto/intel/qat/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c8
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c10
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c12
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c14
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c845
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h148
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_drv.c226
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h24
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.c (renamed from drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c)50
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.h17
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c57
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c83
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h28
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c818
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h504
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c49
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h15
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.c (renamed from drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c)18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.h (renamed from drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h)10
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h99
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h318
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c13
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c449
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h9
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c3
-rw-r--r--drivers/crypto/marvell/cesa/hash.c2
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c53
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c89
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h35
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c25
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h12
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c18
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c19
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c1
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c8
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c128
-rw-r--r--drivers/crypto/nx/nx-sha256.c130
-rw-r--r--drivers/crypto/nx/nx-sha512.c143
-rw-r--r--drivers/crypto/nx/nx.c19
-rw-r--r--drivers/crypto/nx/nx.h11
-rw-r--r--drivers/crypto/omap-aes.c14
-rw-r--r--drivers/crypto/omap-sham.c14
-rw-r--r--drivers/crypto/padlock-sha.c478
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c52
-rw-r--r--drivers/crypto/s5p-sss.c24
-rw-r--r--drivers/crypto/sa2ul.c63
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c52
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c98
-rw-r--r--drivers/dax/kmem.c10
-rw-r--r--drivers/dma-buf/dma-buf.c265
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c51
-rw-r--r--drivers/dma-buf/heaps/system_heap.c3
-rw-r--r--drivers/dma-buf/st-dma-fence.c2
-rw-r--r--drivers/dma-buf/sw_sync.c16
-rw-r--r--drivers/dma-buf/sync_debug.c70
-rw-r--r--drivers/dma-buf/sync_debug.h2
-rw-r--r--drivers/dma-buf/udmabuf.c1
-rw-r--r--drivers/dma/idxd/init.c41
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/edac/altera_edac.c10
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/bluefield_edac.c20
-rw-r--r--drivers/edac/i10nm_base.c479
-rw-r--r--drivers/edac/ie31200_edac.c7
-rw-r--r--drivers/edac/igen6_edac.c86
-rw-r--r--drivers/edac/mce_amd.c1
-rw-r--r--drivers/edac/skx_common.c1
-rw-r--r--drivers/edac/skx_common.h61
-rw-r--r--drivers/firewire/core-transaction.c2
-rw-r--r--drivers/firmware/Kconfig18
-rw-r--r--drivers/firmware/arm_scmi/Kconfig13
-rw-r--r--drivers/firmware/arm_scmi/Makefile1
-rw-r--r--drivers/firmware/arm_scmi/bus.c79
-rw-r--r--drivers/firmware/arm_scmi/clock.c33
-rw-r--r--drivers/firmware/arm_scmi/common.h1
-rw-r--r--drivers/firmware/arm_scmi/driver.c119
-rw-r--r--drivers/firmware/arm_scmi/protocols.h2
-rw-r--r--drivers/firmware/arm_scmi/quirks.c322
-rw-r--r--drivers/firmware/arm_scmi/quirks.h52
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c72
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig24
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c276
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c263
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst828
-rw-r--r--drivers/firmware/arm_sdei.c11
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c6
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c15
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c4
-rw-r--r--drivers/firmware/efi/Kconfig24
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/libstub/Makefile1
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c1
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-mixed.S253
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S32
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds11
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c4
-rw-r--r--drivers/firmware/imx/Kconfig22
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/sm-cpu.c85
-rw-r--r--drivers/firmware/imx/sm-lmm.c91
-rw-r--r--drivers/firmware/psci/psci.c4
-rw-r--r--drivers/firmware/psci/psci_checker.c2
-rw-r--r--drivers/firmware/qcom/qcom_scm.c3
-rw-r--r--drivers/firmware/qcom/qcom_scm.h3
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c1
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c16
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c58
-rw-r--r--drivers/firmware/sysfb_simplefb.c31
-rw-r--r--drivers/firmware/ti_sci.c14
-rw-r--r--drivers/firmware/turris-mox-rwtm.c260
-rw-r--r--drivers/gpio/Kconfig64
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/TODO7
-rw-r--r--drivers/gpio/gpio-aggregator.c1104
-rw-r--r--drivers/gpio/gpio-bcm-kona.c1
-rw-r--r--drivers/gpio/gpio-blzp1600.c281
-rw-r--r--drivers/gpio/gpio-brcmstb.c2
-rw-r--r--drivers/gpio/gpio-davinci.c34
-rw-r--r--drivers/gpio/gpio-dln2.c7
-rw-r--r--drivers/gpio/gpio-ds4520.c6
-rw-r--r--drivers/gpio/gpio-eic-sprd.c5
-rw-r--r--drivers/gpio/gpio-em.c11
-rw-r--r--drivers/gpio/gpio-exar.c16
-rw-r--r--drivers/gpio/gpio-f7188x.c13
-rw-r--r--drivers/gpio/gpio-graniterapids.c6
-rw-r--r--drivers/gpio/gpio-grgpio.c9
-rw-r--r--drivers/gpio/gpio-gw-pld.c6
-rw-r--r--drivers/gpio/gpio-htc-egpio.c16
-rw-r--r--drivers/gpio/gpio-ich.c12
-rw-r--r--drivers/gpio/gpio-idt3243x.c2
-rw-r--r--drivers/gpio/gpio-imx-scu.c47
-rw-r--r--drivers/gpio/gpio-it87.c11
-rw-r--r--drivers/gpio/gpio-janz-ttl.c6
-rw-r--r--drivers/gpio/gpio-kempld.c7
-rw-r--r--drivers/gpio/gpio-ljca.c13
-rw-r--r--drivers/gpio/gpio-logicvc.c11
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c6
-rw-r--r--drivers/gpio/gpio-loongson.c8
-rw-r--r--drivers/gpio/gpio-lp3943.c13
-rw-r--r--drivers/gpio/gpio-lp873x.c12
-rw-r--r--drivers/gpio/gpio-lp87565.c15
-rw-r--r--drivers/gpio/gpio-lpc18xx.c29
-rw-r--r--drivers/gpio/gpio-lpc32xx.c28
-rw-r--r--drivers/gpio/gpio-madera.c18
-rw-r--r--drivers/gpio/gpio-max3191x.c16
-rw-r--r--drivers/gpio/gpio-max730x.c9
-rw-r--r--drivers/gpio/gpio-max732x.c15
-rw-r--r--drivers/gpio/gpio-max77620.c13
-rw-r--r--drivers/gpio/gpio-max77759.c530
-rw-r--r--drivers/gpio/gpio-mb86s7x.c6
-rw-r--r--drivers/gpio/gpio-mc33880.c9
-rw-r--r--drivers/gpio/gpio-ml-ioh.c6
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c8
-rw-r--r--drivers/gpio/gpio-mvebu.c17
-rw-r--r--drivers/gpio/gpio-mxc.c11
-rw-r--r--drivers/gpio/gpio-mxs.c4
-rw-r--r--drivers/gpio/gpio-pca953x.c32
-rw-r--r--drivers/gpio/gpio-pxa.c14
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sodaville.c2
-rw-r--r--drivers/gpio/gpio-spacemit-k1.c293
-rw-r--r--drivers/gpio/gpio-tb10x.c2
-rw-r--r--drivers/gpio/gpio-timberdale.c10
-rw-r--r--drivers/gpio/gpio-twl4030.c5
-rw-r--r--drivers/gpio/gpio-vf610.c4
-rw-r--r--drivers/gpio/gpio-xgene-sb.c26
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c (renamed from drivers/gpio/gpiolib-acpi.c)522
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c363
-rw-r--r--drivers/gpio/gpiolib-acpi.h15
-rw-r--r--drivers/gpio/gpiolib-cdev.c3
-rw-r--r--drivers/gpio/gpiolib-devres.c89
-rw-r--r--drivers/gpio/gpiolib-of.c17
-rw-r--r--drivers/gpio/gpiolib-of.h6
-rw-r--r--drivers/gpio/gpiolib-sysfs.c8
-rw-r--r--drivers/gpio/gpiolib.c173
-rw-r--r--drivers/gpu/drm/Kconfig115
-rw-r--r--drivers/gpu/drm/Kconfig.debug116
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c500
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c233
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c1106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c924
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c968
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c732
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c616
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c355
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c)30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c271
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c413
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h234
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h1555
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c265
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c145
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c95
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c142
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c41
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c742
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h34
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c56
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile41
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c185
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h347
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c219
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c4346
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c142
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c362
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c380
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h171
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h159
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c137
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c349
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c21
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c48
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c73
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h38
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h188
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h23
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/include/v11_structs.h8
-rw-r--r--drivers/gpu/drm/amd/include/v12_structs.h8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c43
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c45
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c358
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c42
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c36
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c27
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c123
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c38
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c155
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h29
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c45
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h17
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c26
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c77
-rw-r--r--drivers/gpu/drm/ast/ast_post.c24
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c41
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c33
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c194
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c52
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c47
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c3
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c207
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c77
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c6
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c5
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c9
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c9
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.h5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c9
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c32
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c22
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c3
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c7
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c47
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c16
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c3
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c3
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c3
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c5
-rw-r--r--drivers/gpu/drm/bridge/panel.c3
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c3
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c10
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c6
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c1
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c41
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c45
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c8
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c38
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c115
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c3
-rw-r--r--drivers/gpu/drm/ci/arm64.config2
-rw-r--r--drivers/gpu/drm/ci/build-igt.sh2
-rw-r--r--drivers/gpu/drm/ci/build.sh20
-rw-r--r--drivers/gpu/drm/ci/build.yml14
-rw-r--r--drivers/gpu/drm/ci/container.yml24
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml55
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh11
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml4
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh5
-rw-r--r--drivers/gpu/drm/ci/test.yml76
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt313
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt30
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt2
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c160
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c37
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c467
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c116
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c20
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c168
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c294
-rw-r--r--drivers/gpu/drm/drm_atomic.c59
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c3
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bridge.c167
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c58
-rw-r--r--drivers/gpu/drm/drm_client.c10
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c257
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_debugfs.c38
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h31
-rw-r--r--drivers/gpu/drm/drm_draw.c100
-rw-r--r--drivers/gpu/drm/drm_drv.c81
-rw-r--r--drivers/gpu/drm/drm_edid.c101
-rw-r--r--drivers/gpu/drm/drm_file.c34
-rw-r--r--drivers/gpu/drm/drm_format_helper.c378
-rw-r--r--drivers/gpu/drm/drm_format_internal.h160
-rw-r--r--drivers/gpu/drm/drm_gem.c26
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c147
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c4
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c37
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_panel.c146
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/drm_panic.c142
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs104
-rw-r--r--drivers/gpu/drm/drm_plane.c52
-rw-r--r--drivers/gpu/drm/drm_prime.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c11
-rw-r--r--drivers/gpu/drm/drm_syncobj.c47
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/mmu.c41
-rw-r--r--drivers/gpu/drm/gma500/mmu.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c31
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c33
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h1
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c16
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c91
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h36
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c94
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h130
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c71
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c74
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c87
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h12
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c62
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c32
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c8
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c73
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1222
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h18
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c102
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c191
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c629
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c924
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c211
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c969
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h82
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c240
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c138
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c330
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c218
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c124
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c181
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c276
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c670
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c672
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c161
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h58
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c204
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c140
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c91
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c524
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c288
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga_regs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c171
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c4
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c21
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c741
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h24
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c160
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h6
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c118
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c35
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c54
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c49
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c158
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h150
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c1
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c15
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h3
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h11
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c17
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c316
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h56
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c147
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h40
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c16
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c40
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c26
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_riscv.c165
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.c17
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c31
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_util.c66
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c136
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h3
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h153
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_riscv.h41
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c5
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c10
-rw-r--r--drivers/gpu/drm/mediatek/Makefile8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c120
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c31
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c413
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c3
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c44
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c96
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c73
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h433
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c156
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c34
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c50
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c51
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c131
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h27
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c146
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c55
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c17
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c50
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c133
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c107
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c73
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c89
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c6
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c1
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c13
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/coreca7d.c122
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcca7d.c98
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c50
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headca7d.c297
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c209
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c93
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h868
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h173
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/chan.h76
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push906f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/pushc97b.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h132
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h166
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h335
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h162
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h95
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h148
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h79
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h82
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h119
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h124
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h174
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c213
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c156
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan506f.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan906f.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chanc36f.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvif/conn.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/user.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c508
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c)43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c)394
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c)417
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c)1559
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c)37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h741
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h260
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h350
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h)64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h825
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h)55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c)34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c691
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c)118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c217
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h355
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h)241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h634
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c271
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c306
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c2
-rw-r--r--drivers/gpu/drm/nova/Kconfig14
-rw-r--r--drivers/gpu/drm/nova/Makefile3
-rw-r--r--drivers/gpu/drm/nova/driver.rs69
-rw-r--r--drivers/gpu/drm/nova/file.rs74
-rw-r--r--drivers/gpu/drm/nova/gem.rs49
-rw-r--r--drivers/gpu/drm/nova/nova.rs18
-rw-r--r--drivers/gpu/drm/nova/uapi.rs61
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig39
-rw-r--r--drivers/gpu/drm/panel/Makefile4
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c11
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c11
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c125
-rw-r--r--drivers/gpu/drm/panel/panel-boe-td4320.c247
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c11
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c19
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c1296
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c441
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c1683
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt37801.c340
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c238
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c108
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c41
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c104
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c68
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c6
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-g2647fb105.c280
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c71
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c152
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h36
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c13
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h13
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c76
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c10
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c227
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h82
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c6
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c19
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c13
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h3
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/radeon/cik.c42
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c42
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c5
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c10
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig15
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c120
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c9
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c5
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c103
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c55
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c315
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c94
-rw-r--r--drivers/gpu/drm/scheduler/.kunitconfig12
-rw-r--r--drivers/gpu/drm/scheduler/Makefile2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c35
-rw-r--r--drivers/gpu/drm/scheduler/tests/Makefile7
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c359
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h226
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c476
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig51
-rw-r--r--drivers/gpu/drm/sitronix/Makefile3
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c1000
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c (renamed from drivers/gpu/drm/tiny/st7586.c)0
-rw-r--r--drivers/gpu/drm/sitronix/st7735r.c (renamed from drivers/gpu/drm/tiny/st7735r.c)0
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c13
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c13
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c14
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c15
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c15
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c14
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c14
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c14
-rw-r--r--drivers/gpu/drm/stm/lvds.c11
-rw-r--r--drivers/gpu/drm/sysfb/Kconfig76
-rw-r--r--drivers/gpu/drm/sysfb/Makefile12
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb.c35
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h184
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c320
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c107
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c389
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c (renamed from drivers/gpu/drm/tiny/ofdrm.c)376
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c (renamed from drivers/gpu/drm/tiny/simpledrm.c)258
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c554
-rw-r--r--drivers/gpu/drm/tegra/dc.c17
-rw-r--r--drivers/gpu/drm/tegra/dp.c67
-rw-r--r--drivers/gpu/drm/tegra/dp.h2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c11
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/falcon.c20
-rw-r--r--drivers/gpu/drm/tegra/falcon.h1
-rw-r--r--drivers/gpu/drm/tegra/gem.c1
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/rgb.c14
-rw-r--r--drivers/gpu/drm/tegra/sor.c4
-rw-r--r--drivers/gpu/drm/tests/Makefile2
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_test.c153
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c417
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c3
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c28
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c158
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c61
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c3
-rw-r--r--drivers/gpu/drm/tiny/Kconfig62
-rw-r--r--drivers/gpu/drm/tiny/Makefile4
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c30
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c145
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c46
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c24
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h20
-rw-r--r--drivers/gpu/drm/udl/udl_main.c191
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c22
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c126
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c62
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h22
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c27
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c64
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h26
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c6
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c62
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c154
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h7
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c8
-rw-r--r--drivers/gpu/drm/vkms/Kconfig15
-rw-r--r--drivers/gpu/drm/vkms/Makefile5
-rw-r--r--drivers/gpu/drm/vkms/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile3
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c951
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c640
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h437
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c61
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h26
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c45
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h17
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c176
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c844
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c874
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c85
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c7
-rw-r--r--drivers/gpu/drm/xe/Kconfig16
-rw-r--r--drivers/gpu/drm/xe/Makefile7
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h48
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h76
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c10
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c45
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c71
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c6
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c133
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c2
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_alu_commands.h79
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h5
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h13
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c11
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c515
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h27
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c393
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c250
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h24
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c64
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device.c34
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c105
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h38
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c4
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c4
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c9
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c84
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c30
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c82
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c20
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c66
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c90
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c48
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c100
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c159
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c203
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c1
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c125
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c20
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c2
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c238
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h4
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c11
-rw-r--r--drivers/gpu/drm/xe/xe_module.c9
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c7
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c31
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c21
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h8
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c84
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c77
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c238
-rw-r--r--drivers/gpu/drm/xe/xe_query.c2
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c3
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c3
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c69
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h1
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c22
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h82
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c94
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c48
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c5
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c12
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules2
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c9
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c5
-rw-r--r--drivers/gpu/host1x/bus.c11
-rw-r--r--drivers/gpu/host1x/cdma.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/nova-core/Kconfig1
-rw-r--r--drivers/gpu/nova-core/driver.rs9
-rw-r--r--drivers/gpu/nova-core/firmware.rs44
-rw-r--r--drivers/gpu/nova-core/gpu.rs86
-rw-r--r--drivers/gpu/nova-core/nova_core.rs2
-rw-r--r--drivers/gpu/nova-core/regs.rs82
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs380
-rw-r--r--drivers/hid/hid-asus.c111
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/Kconfig11
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/fam15h_power.c6
-rw-r--r--drivers/hwmon/hwmon-vid.c4
-rw-r--r--drivers/hwmon/k10temp.c2
-rw-r--r--drivers/hwmon/oxp-sensors.c716
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c3
-rw-r--r--drivers/i2c/busses/Kconfig15
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c3
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c223
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-amdisp.c205
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c12
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c7
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c102
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c87
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c18
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c166
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h13
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c119
-rw-r--r--drivers/i2c/busses/i2c-pasemi-pci.c10
-rw-r--r--drivers/i2c/busses/i2c-piix4.c20
-rw-r--r--drivers/i2c/busses/i2c-powermac.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c19
-rw-r--r--drivers/i2c/busses/i2c-riic.c53
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c5
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c5
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c3
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c24
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c24
-rw-r--r--drivers/i2c/busses/i2c-via.c15
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c20
-rw-r--r--drivers/i2c/busses/i2c-viapro.c33
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c17
-rw-r--r--drivers/i2c/busses/i2c-virtio.c7
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c57
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/busses/scx200_acb.c6
-rw-r--r--drivers/i2c/i2c-atr.c570
-rw-r--r--drivers/i2c/i2c-core-base.c67
-rw-r--r--drivers/i2c/i2c-core-of.c1
-rw-r--r--drivers/i2c/i2c-core-slave.c12
-rw-r--r--drivers/i2c/i2c-core-smbus.c3
-rw-r--r--drivers/i2c/i2c-smbus.c21
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c6
-rw-r--r--drivers/i3c/master/Kconfig4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c32
-rw-r--r--drivers/i3c/master/svc-i3c-master.c109
-rw-r--r--drivers/idle/intel_idle.c139
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c4
-rw-r--r--drivers/iio/adc/stm32-adc-core.c7
-rw-r--r--drivers/infiniband/core/cm.c78
-rw-r--r--drivers/infiniband/core/cm_trace.h2
-rw-r--r--drivers/infiniband/core/cma.c25
-rw-r--r--drivers/infiniband/core/cma_trace.h2
-rw-r--r--drivers/infiniband/core/iwcm.c29
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c271
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c20
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h1
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c10
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c18
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c2
-rw-r--r--drivers/infiniband/hw/hns/Makefile1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_trace.h216
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.c125
-rw-r--r--drivers/infiniband/hw/irdma/main.h3
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h2
-rw-r--r--drivers/infiniband/hw/irdma/pble.c2
-rw-r--r--drivers/infiniband/hw/irdma/type.h4
-rw-r--r--drivers/infiniband/hw/mana/cq.c4
-rw-r--r--drivers/infiniband/hw/mana/device.c174
-rw-r--r--drivers/infiniband/hw/mana/main.c92
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h7
-rw-r--r--drivers/infiniband/hw/mana/mr.c29
-rw-r--r--drivers/infiniband/hw/mana/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c8
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c58
-rw-r--r--drivers/infiniband/hw/mlx5/main.c29
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h13
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c65
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c30
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c66
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c144
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h2
-rw-r--r--drivers/infiniband/sw/siw/Kconfig1
-rw-r--r--drivers/infiniband/sw/siw/siw.h24
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c28
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c65
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c127
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/input/joystick/xpad.c3
-rw-r--r--drivers/input/rmi4/rmi_f34.c135
-rw-r--r--drivers/iommu/Kconfig158
-rw-r--r--drivers/iommu/Makefile6
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd/init.c94
-rw-r--r--drivers/iommu/amd/io_pgtable.c38
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c12
-rw-r--r--drivers/iommu/amd/iommu.c94
-rw-r--r--drivers/iommu/amd/ppr.c2
-rw-r--r--drivers/iommu/apple-dart.c3
-rw-r--r--drivers/iommu/arm/Kconfig144
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c86
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c138
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h39
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c9
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c44
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c6
-rw-r--r--drivers/iommu/dma-iommu.c493
-rw-r--r--drivers/iommu/exynos-iommu.c12
-rw-r--r--drivers/iommu/fsl_pamu_domain.c2
-rw-r--r--drivers/iommu/intel/Makefile7
-rw-r--r--drivers/iommu/intel/dmar.c14
-rw-r--r--drivers/iommu/intel/iommu.c244
-rw-r--r--drivers/iommu/intel/iommu.h62
-rw-r--r--drivers/iommu/intel/irq_remapping.c12
-rw-r--r--drivers/iommu/intel/nested.c20
-rw-r--r--drivers/iommu/intel/pasid.c13
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/prq.c7
-rw-r--r--drivers/iommu/intel/svm.c9
-rw-r--r--drivers/iommu/io-pgtable-arm.c58
-rw-r--r--drivers/iommu/io-pgtable-dart.c23
-rw-r--r--drivers/iommu/iommu-pages.c119
-rw-r--r--drivers/iommu/iommu-pages.h195
-rw-r--r--drivers/iommu/iommu-sva.c18
-rw-r--r--drivers/iommu/iommu.c234
-rw-r--r--drivers/iommu/iommufd/device.c59
-rw-r--r--drivers/iommu/iommufd/eventq.c48
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h6
-rw-r--r--drivers/iommu/iommufd/selftest.c57
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/mtk_iommu.c37
-rw-r--r--drivers/iommu/riscv/Makefile2
-rw-r--r--drivers/iommu/riscv/iommu.c43
-rw-r--r--drivers/iommu/rockchip-iommu.c14
-rw-r--r--drivers/iommu/s390-iommu.c345
-rw-r--r--drivers/iommu/sun50i-iommu.c6
-rw-r--r--drivers/iommu/tegra-smmu.c111
-rw-r--r--drivers/iommu/virtio-iommu.c187
-rw-r--r--drivers/irqchip/Kconfig5
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/exynos-combiner.c2
-rw-r--r--drivers/irqchip/irq-al-fic.c20
-rw-r--r--drivers/irqchip/irq-alpine-msi.c7
-rw-r--r--drivers/irqchip/irq-apple-aic.c4
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c12
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c2
-rw-r--r--drivers/irqchip/irq-aspeed-intc.c2
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c2
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c4
-rw-r--r--drivers/irqchip/irq-ath79-misc.c4
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic.c19
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c28
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c6
-rw-r--r--drivers/irqchip/irq-bcm2835.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c24
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c10
-rw-r--r--drivers/irqchip/irq-clps711x.c4
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c5
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c6
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c5
-rw-r--r--drivers/irqchip/irq-econet-en751221.c310
-rw-r--r--drivers/irqchip/irq-ftintc010.c5
-rw-r--r--drivers/irqchip/irq-gic-v2m.c18
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c39
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c76
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c18
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c7
-rw-r--r--drivers/irqchip/irq-hip04.c6
-rw-r--r--drivers/irqchip/irq-i8259.c4
-rw-r--r--drivers/irqchip/irq-idt3243x.c2
-rw-r--r--drivers/irqchip/irq-imgpdc.c2
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c4
-rw-r--r--drivers/irqchip/irq-imx-intmux.c2
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c2
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c2
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c13
-rw-r--r--drivers/irqchip/irq-ingenic.c4
-rw-r--r--drivers/irqchip/irq-ixp4xx.c2
-rw-r--r--drivers/irqchip/irq-jcore-aic.c5
-rw-r--r--drivers/irqchip/irq-keystone.c4
-rw-r--r--drivers/irqchip/irq-lan966x-oic.c20
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c2
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c2
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c2
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c11
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c2
-rw-r--r--drivers/irqchip/irq-lpc32xx.c4
-rw-r--r--drivers/irqchip/irq-ls-extirq.c4
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c10
-rw-r--r--drivers/irqchip/irq-ls1x.c4
-rw-r--r--drivers/irqchip/irq-mchp-eic.c5
-rw-r--r--drivers/irqchip/irq-meson-gpio.c2
-rw-r--r--drivers/irqchip/irq-mips-cpu.c13
-rw-r--r--drivers/irqchip/irq-mips-gic.c15
-rw-r--r--drivers/irqchip/irq-mmp.c12
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c7
-rw-r--r--drivers/irqchip/irq-msi-lib.c9
-rw-r--r--drivers/irqchip/irq-msi-lib.h27
-rw-r--r--drivers/irqchip/irq-mst-intc.c4
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c5
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c26
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c2
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c27
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c4
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c24
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-nvic.c2
-rw-r--r--drivers/irqchip/irq-omap-intc.c4
-rw-r--r--drivers/irqchip/irq-or1k-pic.c4
-rw-r--r--drivers/irqchip/irq-orion.c6
-rw-r--r--drivers/irqchip/irq-owl-sirq.c4
-rw-r--r--drivers/irqchip/irq-pic32-evic.c6
-rw-r--r--drivers/irqchip/irq-pruss-intc.c7
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c2
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c6
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c4
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c6
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c6
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c5
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c2
-rw-r--r--drivers/irqchip/irq-riscv-intc.c2
-rw-r--r--drivers/irqchip/irq-sa11x0.c2
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c154
-rw-r--r--drivers/irqchip/irq-sni-exiu.c6
-rw-r--r--drivers/irqchip/irq-sp7021-intc.c4
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c25
-rw-r--r--drivers/irqchip/irq-stm32mp-exti.c9
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-sun6i-r.c4
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c11
-rw-r--r--drivers/irqchip/irq-tb10x.c21
-rw-r--r--drivers/irqchip/irq-tegra.c5
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c10
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c7
-rw-r--r--drivers/irqchip/irq-ts4800.c2
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c4
-rw-r--r--drivers/irqchip/irq-vf610-mscm-ir.c6
-rw-r--r--drivers/irqchip/irq-vic.c5
-rw-r--r--drivers/irqchip/irq-vt8500.c153
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c2
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c4
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c5
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c4
-rw-r--r--drivers/irqchip/irq-zevio.c4
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/mailbox/qcom-ipcc.c4
-rw-r--r--drivers/md/bcache/btree.c3
-rw-r--r--drivers/md/bcache/super.c6
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-integrity.c16
-rw-r--r--drivers/md/dm-raid.c3
-rw-r--r--drivers/md/md.c190
-rw-r--r--drivers/md/md.h18
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c5
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c11
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c4
-rw-r--r--drivers/media/dvb-frontends/dib8000.c5
-rw-r--r--drivers/media/i2c/Kconfig43
-rw-r--r--drivers/media/i2c/Makefile4
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c4
-rw-r--r--drivers/media/i2c/ccs-pll.c53
-rw-r--r--drivers/media/i2c/ccs-pll.h29
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c55
-rw-r--r--drivers/media/i2c/ccs/ccs-quirk.c3
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.c9
-rw-r--r--drivers/media/i2c/ccs/ccs.h2
-rw-r--r--drivers/media/i2c/ds90ub913.c92
-rw-r--r--drivers/media/i2c/ds90ub953.c252
-rw-r--r--drivers/media/i2c/ds90ub953.h104
-rw-r--r--drivers/media/i2c/ds90ub960.c2210
-rw-r--r--drivers/media/i2c/imx219.c38
-rw-r--r--drivers/media/i2c/imx283.c2
-rw-r--r--drivers/media/i2c/imx334.c1035
-rw-r--r--drivers/media/i2c/imx335.c5
-rw-r--r--drivers/media/i2c/lt6911uxe.c4
-rw-r--r--drivers/media/i2c/max96714.c2
-rw-r--r--drivers/media/i2c/max96717.c2
-rw-r--r--drivers/media/i2c/ov02c10.c1013
-rw-r--r--drivers/media/i2c/ov02e10.c969
-rw-r--r--drivers/media/i2c/ov08x40.c1324
-rw-r--r--drivers/media/i2c/ov13b10.c176
-rw-r--r--drivers/media/i2c/ov2740.c4
-rw-r--r--drivers/media/i2c/ov5675.c5
-rw-r--r--drivers/media/i2c/ov8856.c9
-rw-r--r--drivers/media/i2c/rdacm20.c7
-rw-r--r--drivers/media/i2c/rdacm21.c7
-rw-r--r--drivers/media/i2c/tc358743.c4
-rw-r--r--drivers/media/i2c/vd55g1.c1965
-rw-r--r--drivers/media/i2c/vd56g3.c1586
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c5
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.h7
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.h5
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.c4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.h3
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c45
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.h10
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c5
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.h8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c13
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c21
-rw-r--r--drivers/media/pci/pt3/pt3.c17
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c4
-rw-r--r--drivers/media/pci/sta2x11/Kconfig16
-rw-r--r--drivers/media/pci/sta2x11/Makefile2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c1270
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.h29
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c13
-rw-r--r--drivers/media/pci/zoran/zoran_card.c2
-rw-r--r--drivers/media/pci/zoran/zr36016.c2
-rw-r--r--drivers/media/pci/zoran/zr36050.c2
-rw-r--r--drivers/media/pci/zoran/zr36060.c2
-rw-r--r--drivers/media/platform/amlogic/Kconfig1
-rw-r--r--drivers/media/platform/amlogic/Makefile2
-rw-r--r--drivers/media/platform/amlogic/c3/Kconfig5
-rw-r--r--drivers/media/platform/amlogic/c3/Makefile5
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Kconfig18
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Makefile10
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c804
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-common.h340
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-core.c641
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c421
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-params.c1008
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h618
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c892
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c326
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c842
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c828
-rw-r--r--drivers/media/platform/amphion/vdec.c2
-rw-r--r--drivers/media/platform/amphion/vpu.h1
-rw-r--r--drivers/media/platform/amphion/vpu_core.c7
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c39
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c8
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c9
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c5
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h4
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c73
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h1
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h8
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c33
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h7
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c19
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c652
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c2
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c19
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c8
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h1
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c132
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h5
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c14
-rw-r--r--drivers/media/platform/qcom/camss/Makefile2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-680.c422
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c131
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c28
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-680.c244
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss.c359
-rw-r--r--drivers/media/platform/qcom/camss/camss.h1
-rw-r--r--drivers/media/platform/qcom/iris/Makefile4
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c4
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h4
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c (renamed from drivers/media/platform/qcom/iris/iris_platform_sm8550.c)119
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h124
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8650.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c59
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c1
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3.c122
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3x.c275
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c4
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h3
-rw-r--r--drivers/media/platform/qcom/venus/core.c16
-rw-r--r--drivers/media/platform/qcom/venus/core.h2
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c38
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c18
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c1
-rw-r--r--drivers/media/platform/renesas/Kconfig18
-rw-r--r--drivers/media/platform/renesas/Makefile2
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c8
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Kconfig18
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Makefile4
-rw-r--r--drivers/media/platform/renesas/rcar-isp/csisp.c (renamed from drivers/media/platform/renesas/rcar-isp.c)57
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c8
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c182
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c23
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h41
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c139
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h91
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h39
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c165
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c13
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c295
-rw-r--r--drivers/media/platform/renesas/vsp1/Makefile2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.c7
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c30
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.h8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c70
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c30
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.h3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.c11
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.c121
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.h29
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c187
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.h6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c38
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c51
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c50
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c53
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h7
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c6
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c8
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.h4
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c12
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c14
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c5
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-debug.c8
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c14
-rw-r--r--drivers/media/platform/st/sti/delta/delta-debug.c8
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c18
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c10
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c14
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c2
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c266
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c157
-rw-r--r--drivers/media/platform/ti/cal/cal.c45
-rw-r--r--drivers/media/platform/ti/cal/cal.h3
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c4
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c8
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c6
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c19
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c4
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c1
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c4
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c34
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c42
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c2
-rw-r--r--drivers/media/test-drivers/vim2m.c327
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c20
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c8
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c3
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c167
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.h6
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c91
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c38
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c115
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h5
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c99
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c14
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c7
-rw-r--r--drivers/memory/Kconfig23
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/bt1-l2-ctl.c2
-rw-r--r--drivers/memory/mtk-smi.c52
-rw-r--r--drivers/memory/omap-gpmc.c21
-rw-r--r--drivers/memory/renesas-rpc-if-regs.h147
-rw-r--r--drivers/memory/renesas-rpc-if.c714
-rw-r--r--drivers/memory/renesas-xspi-if-regs.h105
-rw-r--r--drivers/memory/stm32_omm.c479
-rw-r--r--drivers/memory/tegra/Kconfig8
-rw-r--r--drivers/mfd/88pm860x-core.c4
-rw-r--r--drivers/mfd/Kconfig20
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/ab8500-core.c6
-rw-r--r--drivers/mfd/arizona-irq.c3
-rw-r--r--drivers/mfd/db8500-prcmu.c6
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c5
-rw-r--r--drivers/mfd/lp8788-irq.c2
-rw-r--r--drivers/mfd/max77759.c690
-rw-r--r--drivers/mfd/max8925-core.c4
-rw-r--r--drivers/mfd/max8997-irq.c4
-rw-r--r--drivers/mfd/max8998-irq.c2
-rw-r--r--drivers/mfd/mt6358-irq.c6
-rw-r--r--drivers/mfd/mt6397-irq.c6
-rw-r--r--drivers/mfd/qcom-pm8xxx.c6
-rw-r--r--drivers/mfd/stmfx.c2
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/tc3589x.c6
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/mfd/twl6030-irq.c5
-rw-r--r--drivers/mfd/wm831x-irq.c15
-rw-r--r--drivers/mfd/wm8994-irq.c4
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cs5535-mfgpt.c1
-rw-r--r--drivers/misc/hi6421v600-irq.c5
-rw-r--r--drivers/misc/lkdtm/heap.c17
-rw-r--r--drivers/misc/ti_fpc202.c438
-rw-r--r--drivers/misc/uacce/uacce.c40
-rw-r--r--drivers/mmc/core/block.c16
-rw-r--r--drivers/mmc/core/card.h6
-rw-r--r--drivers/mmc/core/core.c48
-rw-r--r--drivers/mmc/core/core.h10
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c103
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/mmc_ops.h2
-rw-r--r--drivers/mmc/core/mmc_test.c16
-rw-r--r--drivers/mmc/core/queue.c6
-rw-r--r--drivers/mmc/core/quirks.h10
-rw-r--r--drivers/mmc/core/sd.c65
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/slot-gpio.c8
-rw-r--r--drivers/mmc/host/Kconfig14
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/alcor.c3
-rw-r--r--drivers/mmc/host/bcm2835.c8
-rw-r--r--drivers/mmc/host/cavium-thunderx.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/mtk-sd.c219
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c20
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c243
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-k1.c304
-rw-r--r--drivers/mmc/host/sdhci-omap.c2
-rw-r--r--drivers/mmc/host/sdhci.c12
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sunplus-mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c6
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bareudp.c16
-rw-r--r--drivers/net/bonding/bond_alb.c8
-rw-r--r--drivers/net/bonding/bond_main.c180
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c8
-rw-r--r--drivers/net/can/dev/dev.c12
-rw-r--r--drivers/net/can/dev/netlink.c74
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c10
-rw-r--r--drivers/net/can/kvaser_pciefd.c6
-rw-r--r--drivers/net/can/m_can/m_can.c8
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c6
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c280
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c4
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c10
-rw-r--r--drivers/net/can/usb/esd_usb.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c4
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c6
-rw-r--r--drivers/net/can/usb/gs_usb.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c6
-rw-r--r--drivers/net/can/xilinx_can.c16
-rw-r--r--drivers/net/dsa/b53/b53_common.c28
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/b53/b53_regs.h7
-rw-r--r--drivers/net/dsa/bcm_sf2.c1
-rw-r--r--drivers/net/dsa/dsa_loop.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c24
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h5
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c194
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c139
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h44
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c30
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h7
-rw-r--r--drivers/net/dsa/mt7530-mmio.c1
-rw-r--r--drivers/net/dsa/mt7530.c270
-rw-r--r--drivers/net/dsa/mt7530.h60
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c11
-rw-r--r--drivers/net/dsa/ocelot/felix.c11
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c4
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c6
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c5
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c46
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h7
-rw-r--r--drivers/net/ethernet/airoha/Kconfig7
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c492
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h102
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c178
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h4
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c485
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h203
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c4
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h122
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c268
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c204
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h128
-rw-r--r--drivers/net/ethernet/apple/bmac.c60
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c176
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h78
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c36
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c13
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c214
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c277
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h32
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c23
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c18
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c37
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c14
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c41
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c41
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig12
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c123
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h51
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c369
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c78
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c107
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c93
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c462
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c53
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c9
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c8
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h113
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c24
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h58
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c62
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c414
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c354
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h105
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c78
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c233
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h41
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h82
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h120
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c341
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c670
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h135
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c29
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h76
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c358
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h65
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c75
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c81
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c53
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h19
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c14
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c67
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c75
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c9
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c873
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h362
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c171
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c161
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h84
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c615
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h314
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c78
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h55
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c81
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c90
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c247
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h52
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c557
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c1339
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c257
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c282
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h175
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c58
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c119
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h61
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c414
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1348
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c290
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c716
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c191
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h20
-rw-r--r--drivers/net/ethernet/meta/Kconfig1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h34
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c258
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c178
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c311
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h56
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c335
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h48
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c47
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c10
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c23
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c62
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h7
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c63
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c19
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c132
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c33
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c48
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c99
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c138
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig1
-rw-r--r--drivers/net/ethernet/realtek/r8169.h7
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c434
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c205
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h15
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c66
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c11
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c88
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c295
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c174
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c157
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c374
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c28
-rw-r--r--drivers/net/ethernet/ti/cpsw.c26
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h6
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c24
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h33
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c80
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c347
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c188
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c176
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h77
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c30
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c909
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h115
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c94
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c5
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c385
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h15
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c38
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c64
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c206
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c47
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h116
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c61
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/gtp.c18
-rw-r--r--drivers/net/hamradio/baycom_epp.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c6
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c1
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_mem.c21
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/mctp/mctp-usb.c2
-rw-r--r--drivers/net/mdio/Kconfig48
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c522
-rw-r--r--drivers/net/mdio/mdio-thunder.c10
-rw-r--r--drivers/net/mdio/of_mdio.c2
-rw-r--r--drivers/net/netdevsim/ipsec.c15
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c389
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c458
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c279
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c213
-rw-r--r--drivers/net/ovpn/netlink-gen.h41
-rw-r--r--drivers/net/ovpn/netlink.c1258
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1364
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c233
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c598
-rw-r--r--drivers/net/ovpn/tcp.h36
-rw-r--r--drivers/net/ovpn/udp.c449
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--drivers/net/pfcp.c23
-rw-r--r--drivers/net/phy/Kconfig29
-rw-r--r--drivers/net/phy/Makefile22
-rw-r--r--drivers/net/phy/air_en8811h.c103
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c6
-rw-r--r--drivers/net/phy/as21xxx.c1087
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/phy/dp83822.c33
-rw-r--r--drivers/net/phy/dp83867.c76
-rw-r--r--drivers/net/phy/fixed_phy.c40
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c111
-rw-r--r--drivers/net/phy/marvell10g.c12
-rw-r--r--drivers/net/phy/mdio_bus.c476
-rw-r--r--drivers/net/phy/mdio_bus_provider.c484
-rw-r--r--drivers/net/phy/mdio_device.c1
-rw-r--r--drivers/net/phy/mediatek/Kconfig20
-rw-r--r--drivers/net/phy/mediatek/Makefile3
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c321
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c91
-rw-r--r--drivers/net/phy/micrel.c23
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c5
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c20
-rw-r--r--drivers/net/phy/mxl-86110.c616
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c54
-rw-r--r--drivers/net/phy/nxp-tja11xx.c6
-rw-r--r--drivers/net/phy/phy_device.c102
-rw-r--r--drivers/net/phy/phylink.c7
-rw-r--r--drivers/net/phy/realtek/realtek_main.c337
-rw-r--r--drivers/net/phy/teranetics.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c25
-rw-r--r--drivers/net/tap.c14
-rw-r--r--drivers/net/team/team_core.c2
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/aqc111.c10
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c22
-rw-r--r--drivers/net/usb/asix_devices.c17
-rw-r--r--drivers/net/usb/lan78xx.c471
-rw-r--r--drivers/net/usb/r8152.c98
-rw-r--r--drivers/net/veth.c57
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c560
-rw-r--r--drivers/net/vxlan/vxlan_private.h11
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c20
-rw-r--r--drivers/net/wireguard/allowedips.c102
-rw-r--r--drivers/net/wireguard/allowedips.h4
-rw-r--r--drivers/net/wireguard/cookie.c4
-rw-r--r--drivers/net/wireguard/netlink.c47
-rw-r--r--drivers/net/wireguard/noise.c4
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c48
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c302
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h16
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c52
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c50
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1155
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c103
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c329
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h169
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c497
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c154
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h53
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c1097
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c596
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c209
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c153
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h13
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c121
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h27
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c511
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h30
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1439
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h56
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c66
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c238
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c526
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c558
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h119
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c308
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c378
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c168
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c228
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h498
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h)59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c237
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h241
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c65
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c105
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c204
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c)200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c187
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c292
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c174
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h1
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c42
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c21
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c44
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c51
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c196
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c195
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h51
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c35
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c61
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c27
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c63
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1037
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h190
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c418
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c493
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h144
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c174
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c480
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h94
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c58
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c38
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c36
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c131
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c147
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c30
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c296
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h31
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c12
-rw-r--r--drivers/nfc/s3fwrn5/core.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.h2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c2
-rw-r--r--drivers/nfc/s3fwrn5/nci.c2
-rw-r--r--drivers/nfc/s3fwrn5/nci.h2
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.c4
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.h4
-rw-r--r--drivers/nfc/s3fwrn5/s3fwrn5.h2
-rw-r--r--drivers/nfc/virtual_ncidev.c2
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/nvme/common/auth.c15
-rw-r--r--drivers/nvme/host/Kconfig4
-rw-r--r--drivers/nvme/host/auth.c30
-rw-r--r--drivers/nvme/host/core.c205
-rw-r--r--drivers/nvme/host/fc.c13
-rw-r--r--drivers/nvme/host/multipath.c206
-rw-r--r--drivers/nvme/host/nvme.h31
-rw-r--r--drivers/nvme/host/pci.c300
-rw-r--r--drivers/nvme/host/sysfs.c7
-rw-r--r--drivers/nvme/host/tcp.c138
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/admin-cmd.c31
-rw-r--r--drivers/nvme/target/auth.c21
-rw-r--r--drivers/nvme/target/core.c94
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c12
-rw-r--r--drivers/nvme/target/fc.c96
-rw-r--r--drivers/nvme/target/fcloop.c439
-rw-r--r--drivers/nvme/target/loop.c29
-rw-r--r--drivers/nvme/target/nvmet.h24
-rw-r--r--drivers/nvme/target/pci-epf.c14
-rw-r--r--drivers/nvme/target/rdma.c10
-rw-r--r--drivers/nvme/target/tcp.c102
-rw-r--r--drivers/nvmem/Kconfig12
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/max77759-nvmem.c145
-rw-r--r--drivers/of/device.c31
-rw-r--r--drivers/of/fdt.c34
-rw-r--r--drivers/of/kexec.c42
-rw-r--r--drivers/of/of_reserved_mem.c80
-rw-r--r--drivers/of/unittest.c10
-rw-r--r--drivers/opp/core.c428
-rw-r--r--drivers/opp/cpu.c30
-rw-r--r--drivers/opp/of.c205
-rw-r--r--drivers/opp/opp.h1
-rw-r--r--drivers/pci/controller/Kconfig3
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c4
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c2
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c11
-rw-r--r--drivers/pci/controller/pci-aardvark.c14
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c14
-rw-r--r--drivers/pci/controller/pci-mvebu.c6
-rw-r--r--drivers/pci/controller/pci-tegra.c63
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c53
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c4
-rw-r--r--drivers/pci/controller/pcie-altera.c2
-rw-r--r--drivers/pci/controller/pcie-apple.c69
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c4
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c9
-rw-r--r--drivers/pci/controller/pcie-mediatek.c6
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c4
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c10
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c14
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/controller/pcie-xilinx.c5
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c16
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c2
-rw-r--r--drivers/pci/msi/api.c8
-rw-r--r--drivers/pci/msi/msi.c179
-rw-r--r--drivers/pci/msi/msi.h2
-rw-r--r--drivers/pci/p2pdma.c38
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/tph.c44
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c3
-rw-r--r--drivers/perf/arm-cmn.c18
-rw-r--r--drivers/perf/arm-ni.c40
-rw-r--r--drivers/perf/arm_pmuv3.c3
-rw-r--r--drivers/perf/arm_v6_pmu.c3
-rw-r--r--drivers/perf/arm_v7_pmu.c3
-rw-r--r--drivers/perf/arm_xscale_pmu.c6
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c9
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c6
-rw-r--r--drivers/pinctrl/core.c29
-rw-r--r--drivers/pinctrl/freescale/Kconfig11
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx-scmi.c4
-rw-r--r--drivers/pinctrl/mediatek/Kconfig22
-rw-r--r--drivers/pinctrl/mediatek/Makefile2
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c35
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c19
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c18
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6893.c879
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8196.c1860
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c9
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c15
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h2283
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h3085
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c29
-rw-r--r--drivers/pinctrl/meson/Kconfig24
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c22
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c43
-rw-r--r--drivers/pinctrl/nomadik/Kconfig6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c12
-rw-r--r--drivers/pinctrl/pinconf.h17
-rw-r--r--drivers/pinctrl/pinctrl-amd.c56
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c30
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c20
-rw-r--r--drivers/pinctrl/pinctrl-at91.c21
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c35
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c17
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c8
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c8
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c17
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c8
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c26
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c9
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c13
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c23
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c70
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c7
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c1
-rw-r--r--drivers/pinctrl/renesas/Kconfig1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c299
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c52
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c294
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h28
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c34
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h8
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c10
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c7
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c7
-rw-r--r--drivers/pinctrl/uniphier/Kconfig2
-rw-r--r--drivers/platform/arm64/Kconfig2
-rw-r--r--drivers/platform/arm64/acer-aspire1-ec.c10
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c2
-rw-r--r--drivers/platform/chrome/Kconfig5
-rw-r--r--drivers/platform/chrome/Makefile3
-rw-r--r--drivers/platform/chrome/chromeos_of_hw_prober.c33
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c52
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c24
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test_util.h5
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c6
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c24
-rw-r--r--drivers/platform/cznic/Kconfig17
-rw-r--r--drivers/platform/cznic/Makefile3
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c4
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c21
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-keyctl.c162
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-trng.c17
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h33
-rw-r--r--drivers/platform/cznic/turris-signing-key.c193
-rw-r--r--drivers/platform/mellanox/Kconfig13
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlx-platform.c1546
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c155
-rw-r--r--drivers/platform/mellanox/mlxreg-dpu.c613
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c8
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c112
-rw-r--r--drivers/platform/surface/Kconfig2
-rw-r--r--drivers/platform/x86/Kconfig40
-rw-r--r--drivers/platform/x86/Makefile12
-rw-r--r--drivers/platform/x86/acerhdf.c4
-rw-r--r--drivers/platform/x86/amd/Kconfig11
-rw-r--r--drivers/platform/x86/amd/Makefile1
-rw-r--r--drivers/platform/x86/amd/amd_isp4.c311
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp/Makefile1
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c271
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c27
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h9
-rw-r--r--drivers/platform/x86/amd/hsmp/hwmon.c121
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c10
-rw-r--r--drivers/platform/x86/amd/pmc/mp1_stb.c2
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c3
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c2
-rw-r--r--drivers/platform/x86/amd/pmf/core.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c148
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c10
-rw-r--r--drivers/platform/x86/dasharo-acpi.c360
-rw-r--r--drivers/platform/x86/dell/Kconfig3
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c1097
-rw-r--r--drivers/platform/x86/dell/dell-pc.c67
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c246
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/intel/ifs/core.c5
-rw-r--r--drivers/platform/x86/intel/ifs/load.c21
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c17
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c9
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile3
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c173
-rw-r--r--drivers/platform/x86/intel/int3472/common.c9
-rw-r--r--drivers/platform/x86/intel/int3472/common.h131
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c69
-rw-r--r--drivers/platform/x86/intel/int3472/discrete_quirks.c21
-rw-r--r--drivers/platform/x86/intel/int3472/led.c3
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c3
-rw-r--r--drivers/platform/x86/intel/pmc/Kconfig4
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile8
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c13
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c7
-rw-r--r--drivers/platform/x86/intel/pmc/core.c250
-rw-r--r--drivers/platform/x86/intel/pmc/core.h22
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c332
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c10
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.c204
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.h24
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c38
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c15
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c106
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c38
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.h1
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c5
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c34
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h20
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c49
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c11
-rw-r--r--drivers/platform/x86/intel/vsec.c9
-rw-r--r--drivers/platform/x86/intel_ips.c36
-rw-r--r--drivers/platform/x86/oxpec.c1054
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/portwell-ec.c291
-rw-r--r--drivers/platform/x86/silicom-platform.c11
-rw-r--r--drivers/platform/x86/sony-laptop.c175
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c49
-rw-r--r--drivers/platform/x86/topstar-laptop.c4
-rw-r--r--drivers/platform/x86/tuxedo/Kconfig8
-rw-r--r--drivers/platform/x86/tuxedo/Makefile8
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Kconfig17
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Makefile10
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_ab.c923
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.c91
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.h109
-rw-r--r--drivers/platform/x86/xo15-ebook.c10
-rw-r--r--drivers/pmdomain/amlogic/meson-ee-pwrc.c78
-rw-r--r--drivers/pmdomain/arm/Kconfig6
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c16
-rw-r--r--drivers/pmdomain/core.c131
-rw-r--r--drivers/pmdomain/governor.c2
-rw-r--r--drivers/pmdomain/mediatek/mt6893-pm-domains.h585
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c17
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h2
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c16
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c48
-rw-r--r--drivers/pmdomain/sunxi/Kconfig10
-rw-r--r--drivers/pmdomain/sunxi/Makefile1
-rw-r--r--drivers/pmdomain/sunxi/sun50i-h6-prcm-ppu.c208
-rw-r--r--drivers/pmdomain/ti/omap_prm.c8
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/reset/Kconfig13
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c5
-rw-r--r--drivers/power/reset/reboot-mode.c25
-rw-r--r--drivers/power/reset/syscon-reboot.c98
-rw-r--r--drivers/power/reset/tdx-ec-poweroff.c150
-rw-r--r--drivers/power/supply/Kconfig37
-rw-r--r--drivers/power/supply/Makefile3
-rw-r--r--drivers/power/supply/bq24190_charger.c14
-rw-r--r--drivers/power/supply/bq27xxx_battery.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c13
-rw-r--r--drivers/power/supply/chagall-battery.c291
-rw-r--r--drivers/power/supply/collie_battery.c1
-rw-r--r--drivers/power/supply/cros_charge-control.c23
-rw-r--r--drivers/power/supply/gpio-charger.c4
-rw-r--r--drivers/power/supply/huawei-gaokun-battery.c645
-rw-r--r--drivers/power/supply/max17040_battery.c5
-rw-r--r--drivers/power/supply/max77705_charger.c20
-rw-r--r--drivers/power/supply/max8971_charger.c752
-rw-r--r--drivers/power/supply/power_supply_sysfs.c32
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c4
-rw-r--r--drivers/power/supply/rk817_charger.c2
-rw-r--r--drivers/power/supply/rt9471.c12
-rw-r--r--drivers/power/supply/test_power.c21
-rw-r--r--drivers/power/supply/wm831x_power.c20
-rw-r--r--drivers/powercap/intel_rapl_common.c1
-rw-r--r--drivers/powercap/intel_rapl_msr.c7
-rw-r--r--drivers/ptp/Kconfig4
-rw-r--r--drivers/ptp/ptp_chardev.c16
-rw-r--r--drivers/ptp/ptp_clockmatrix.c14
-rw-r--r--drivers/ptp/ptp_fc3.c1
-rw-r--r--drivers/ptp/ptp_idt82p33.c15
-rw-r--r--drivers/ptp/ptp_ocp.c2
-rw-r--r--drivers/pwm/Kconfig113
-rw-r--r--drivers/pwm/Makefile11
-rw-r--r--drivers/pwm/core.c118
-rw-r--r--drivers/pwm/pwm-adp5585.c1
-rw-r--r--drivers/pwm/pwm-loongson.c290
-rw-r--r--drivers/pwm/pwm-mc33xs2410.c391
-rw-r--r--drivers/pwm/pwm-meson.c123
-rw-r--r--drivers/pwm/pwm-pca9685.c8
-rw-r--r--drivers/pwm/pwm-pxa.c18
-rw-r--r--drivers/pwm/pwm-rzg2l-gpt.c447
-rw-r--r--drivers/pwm/pwm-stm32.c15
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c20
-rw-r--r--drivers/rapidio/rio.c103
-rw-r--r--drivers/rapidio/rio.h2
-rw-r--r--drivers/rapidio/rio_cm.c6
-rw-r--r--drivers/ras/amd/atl/internal.h4
-rw-r--r--drivers/regulator/Kconfig23
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/adp5055-regulator.c424
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/da9121-regulator.c2
-rw-r--r--drivers/regulator/gpio-regulator.c10
-rw-r--r--drivers/regulator/max20086-regulator.c4
-rw-r--r--drivers/regulator/pca9450-regulator.c27
-rw-r--r--drivers/regulator/pf9453-regulator.c3
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c69
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c83
-rw-r--r--drivers/regulator/s5m8767.c146
-rw-r--r--drivers/regulator/tps65219-regulator.c242
-rw-r--r--drivers/reset/Kconfig17
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-rzv2h-usb2phy.c236
-rw-r--r--drivers/reset/reset-th1520.c135
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/char/con3270.c17
-rw-r--r--drivers/s390/char/diag_ftp.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c74
-rw-r--r--drivers/s390/crypto/ap_bus.h30
-rw-r--r--drivers/s390/crypto/pkey_api.c50
-rw-r--r--drivers/s390/crypto/pkey_base.c34
-rw-r--r--drivers/s390/crypto/pkey_base.h37
-rw-r--r--drivers/s390/crypto/pkey_cca.c136
-rw-r--r--drivers/s390/crypto/pkey_ep11.c117
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c9
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c4
-rw-r--r--drivers/s390/crypto/pkey_uv.c44
-rw-r--r--drivers/s390/crypto/zcrypt_api.c167
-rw-r--r--drivers/s390/crypto/zcrypt_api.h16
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c486
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h49
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c39
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c454
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h27
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c36
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c109
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c14
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/aha152x.c1
-rw-r--r--drivers/scsi/dc395x.c697
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c6
-rw-r--r--drivers/scsi/fnic/fip.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h51
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c81
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c259
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/isci/remote_device.c30
-rw-r--r--drivers/scsi/isci/remote_device.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c73
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c2
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c22
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h12
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c129
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/scsi_debug.c361
-rw-r--r--drivers/scsi/scsi_devinfo.c27
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c140
-rw-r--r--drivers/sh/intc/irqdomain.c5
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c461
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c17
-rw-r--r--drivers/soc/dove/pmu.c7
-rw-r--r--drivers/soc/fsl/Kconfig2
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c19
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c42
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h2
-rw-r--r--drivers/soc/imx/soc-imx8m.c177
-rw-r--r--drivers/soc/mediatek/mtk-dvfsrc.c53
-rw-r--r--drivers/soc/qcom/ice.c350
-rw-r--r--drivers/soc/qcom/llcc-qcom.c497
-rw-r--r--drivers/soc/qcom/pmic_glink.c4
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c30
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c11
-rw-r--r--drivers/soc/qcom/smem.c2
-rw-r--r--drivers/soc/qcom/smp2p.c4
-rw-r--r--drivers/soc/qcom/smsm.c2
-rw-r--r--drivers/soc/qcom/socinfo.c1
-rw-r--r--drivers/soc/renesas/Kconfig53
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c75
-rw-r--r--drivers/soc/renesas/rz-sysc.c3
-rw-r--r--drivers/soc/renesas/rz-sysc.h1
-rw-r--r--drivers/soc/samsung/exynos-pmu.c78
-rw-r--r--drivers/soc/samsung/exynos-pmu.h1
-rw-r--r--drivers/soc/sophgo/Kconfig34
-rw-r--r--drivers/soc/sophgo/Makefile4
-rw-r--r--drivers/soc/sophgo/cv1800-rtcsys.c63
-rw-r--r--drivers/soc/sophgo/sg2044-topsys.c45
-rw-r--r--drivers/soc/tegra/pmc.c5
-rw-r--r--drivers/soc/ti/k3-ringacc.c2
-rw-r--r--drivers/soc/ti/k3-socinfo.c2
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c10
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c8
-rw-r--r--drivers/soc/vt8500/Kconfig19
-rw-r--r--drivers/soc/vt8500/Makefile2
-rw-r--r--drivers/soc/vt8500/wmt-socinfo.c125
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c26
-rw-r--r--drivers/spi/spi-amd-pci.c70
-rw-r--r--drivers/spi/spi-amd.c227
-rw-r--r--drivers/spi/spi-amd.h44
-rw-r--r--drivers/spi/spi-axi-spi-engine.c91
-rw-r--r--drivers/spi/spi-cadence-quadspi.c2
-rw-r--r--drivers/spi/spi-cavium-thunderx.c4
-rw-r--r--drivers/spi/spi-cs42l43.c4
-rw-r--r--drivers/spi/spi-dw-core.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c46
-rw-r--r--drivers/spi/spi-fsl-qspi.c81
-rw-r--r--drivers/spi/spi-gpio.c2
-rw-r--r--drivers/spi/spi-intel-pci.c8
-rw-r--r--drivers/spi/spi-intel-platform.c9
-rw-r--r--drivers/spi/spi-intel.c9
-rw-r--r--drivers/spi/spi-intel.h4
-rw-r--r--drivers/spi/spi-loopback-test.c8
-rw-r--r--drivers/spi/spi-meson-spicc.c241
-rw-r--r--drivers/spi/spi-nxp-fspi.c189
-rw-r--r--drivers/spi/spi-offload.c5
-rw-r--r--drivers/spi/spi-pci1xxxx.c24
-rw-r--r--drivers/spi/spi-qpic-snand.c166
-rw-r--r--drivers/spi/spi-rpc-if.c16
-rw-r--r--drivers/spi/spi-sh-msiof.c397
-rw-r--r--drivers/spi/spi-stm32-ospi.c2
-rw-r--r--drivers/spi/spi-tegra210-quad.c280
-rw-r--r--drivers/spi/spi-xcomm.c8
-rw-r--r--drivers/spi/spi.c19
-rw-r--r--drivers/ssb/driver_gpio.c8
-rw-r--r--drivers/staging/gpib/common/iblib.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c1612
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h1768
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c73
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h244
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c92
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c60
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c3
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c64
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c239
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.h18
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_configfs.c20
-rw-r--r--drivers/target/target_core_device.c89
-rw-r--r--drivers/target/target_core_spc.c134
-rw-r--r--drivers/target/target_core_stat.c69
-rw-r--r--drivers/target/target_core_transport.c119
-rw-r--r--drivers/tee/amdtee/core.c16
-rw-r--r--drivers/tee/optee/smc_abi.c3
-rw-r--r--drivers/tee/tee_core.c11
-rw-r--r--drivers/thermal/Kconfig11
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/airoha_thermal.c489
-rw-r--r--drivers/thermal/amlogic_thermal.c16
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/platform_temperature_control.c243
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c18
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c5
-rw-r--r--drivers/thermal/intel/intel_hfi.c14
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c5
-rw-r--r--drivers/thermal/intel/therm_throt.c10
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c1
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c18
-rw-r--r--drivers/thermal/qcom/lmh.c3
-rw-r--r--drivers/thermal/qcom/tsens-v1.c62
-rw-r--r--drivers/thermal/qcom/tsens.c27
-rw-r--r--drivers/thermal/qcom/tsens.h4
-rw-r--r--drivers/thermal/tegra/soctherm.c2
-rw-r--r--drivers/ufs/core/ufs-mcq.c6
-rw-r--r--drivers/ufs/core/ufs-sysfs.c133
-rw-r--r--drivers/ufs/core/ufshcd.c103
-rw-r--r--drivers/ufs/host/ufs-qcom.c266
-rw-r--r--drivers/ufs/host/ufs-qcom.h11
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/misc/usbtest.c2
-rw-r--r--drivers/usb/storage/usb.c20
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c2
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c17
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c121
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h14
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c371
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h35
-rw-r--r--drivers/vfio/pci/mlx5/main.c87
-rw-r--r--drivers/vfio/vfio_iommu_type1.c51
-rw-r--r--drivers/vhost/net.c30
-rw-r--r--drivers/vhost/scsi.c190
-rw-r--r--drivers/vhost/vhost.c28
-rw-r--r--drivers/vhost/vringh.c19
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbdev/arkfb.c5
-rw-r--r--drivers/video/fbdev/carminefb.c8
-rw-r--r--drivers/video/fbdev/carminefb.h2
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbcvt.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c22
-rw-r--r--drivers/video/fbdev/geode/display_gx.c1
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c3
-rw-r--r--drivers/video/fbdev/geode/lxfb_ops.c23
-rw-r--r--drivers/video/fbdev/geode/suspend_gx.c10
-rw-r--r--drivers/video/fbdev/geode/video_gx.c16
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c2
-rw-r--r--drivers/video/fbdev/via/via-gpio.c10
-rw-r--r--drivers/video/screen_info_generic.c36
-rw-r--r--drivers/virt/coco/Kconfig6
-rw-r--r--drivers/virt/coco/Makefile2
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c8
-rw-r--r--drivers/virt/coco/guest/Kconfig17
-rw-r--r--drivers/virt/coco/guest/Makefile4
-rw-r--r--drivers/virt/coco/guest/report.c (renamed from drivers/virt/coco/tsm.c)63
-rw-r--r--drivers/virt/coco/guest/tsm-mr.c251
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c12
-rw-r--r--drivers/virt/coco/tdx-guest/Kconfig1
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c259
-rw-r--r--drivers/virtio/Kconfig64
-rw-r--r--drivers/virtio/Makefile5
-rw-r--r--drivers/virtio/virtio_pci_modern.c13
-rw-r--r--drivers/virtio/virtio_rtc_arm.c23
-rw-r--r--drivers/virtio/virtio_rtc_class.c262
-rw-r--r--drivers/virtio/virtio_rtc_driver.c1407
-rw-r--r--drivers/virtio/virtio_rtc_internal.h122
-rw-r--r--drivers/virtio/virtio_rtc_ptp.c347
-rw-r--r--drivers/w1/slaves/w1_ds2406.c12
-rw-r--r--drivers/watchdog/diag288_wdt.c53
-rw-r--r--drivers/watchdog/exar_wdt.c2
-rw-r--r--drivers/xen/balloon.c13
-rw-r--r--drivers/xen/swiotlb-xen.c1
4462 files changed, 212654 insertions, 86669 deletions
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
index 5119bccd1917..ad8ac6e315b6 100644
--- a/drivers/accel/amdxdna/TODO
+++ b/drivers/accel/amdxdna/TODO
@@ -1,3 +1,2 @@
-- Add import and export BO support
- Add debugfs support
- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 00d215ac866e..e04549f64d69 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -758,27 +758,42 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu
static int aie2_populate_range(struct amdxdna_gem_obj *abo)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
- struct mm_struct *mm = abo->mem.notifier.mm;
- struct hmm_range range = { 0 };
+ struct amdxdna_umap *mapp;
unsigned long timeout;
+ struct mm_struct *mm;
+ bool found;
int ret;
- XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
- abo->mem.userptr, abo->mem.size);
- range.notifier = &abo->mem.notifier;
- range.start = abo->mem.userptr;
- range.end = abo->mem.userptr + abo->mem.size;
- range.hmm_pfns = abo->mem.pfns;
- range.default_flags = HMM_PFN_REQ_FAULT;
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+again:
+ found = false;
+ down_write(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (mapp->invalid) {
+ found = true;
+ break;
+ }
+ }
- if (!mmget_not_zero(mm))
+ if (!found) {
+ abo->mem.map_invalid = false;
+ up_write(&xdna->notifier_lock);
+ return 0;
+ }
+ kref_get(&mapp->refcnt);
+ up_write(&xdna->notifier_lock);
+
+ XDNA_DBG(xdna, "populate memory range %lx %lx",
+ mapp->vma->vm_start, mapp->vma->vm_end);
+ mm = mapp->notifier.mm;
+ if (!mmget_not_zero(mm)) {
+ amdxdna_umap_put(mapp);
return -EFAULT;
+ }
- timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
-again:
- range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
+ mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
mmap_read_lock(mm);
- ret = hmm_range_fault(&range);
+ ret = hmm_range_fault(&mapp->range);
mmap_read_unlock(mm);
if (ret) {
if (time_after(jiffies, timeout)) {
@@ -786,21 +801,27 @@ again:
goto put_mm;
}
- if (ret == -EBUSY)
+ if (ret == -EBUSY) {
+ amdxdna_umap_put(mapp);
goto again;
+ }
goto put_mm;
}
- down_read(&xdna->notifier_lock);
- if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
- up_read(&xdna->notifier_lock);
+ down_write(&xdna->notifier_lock);
+ if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
goto again;
}
- abo->mem.map_invalid = false;
- up_read(&xdna->notifier_lock);
+ mapp->invalid = false;
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
+ goto again;
put_mm:
+ amdxdna_umap_put(mapp);
mmput(mm);
return ret;
}
@@ -908,10 +929,6 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
struct drm_gem_object *gobj = to_gobj(abo);
long ret;
- down_write(&xdna->notifier_lock);
- abo->mem.map_invalid = true;
- mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
- up_write(&xdna->notifier_lock);
ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
true, MAX_SCHEDULE_TIMEOUT);
if (!ret || ret == -ERESTARTSYS)
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index bf4219e32cc1..82412eec9a4b 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -525,7 +525,7 @@ aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
if (!payload)
return -EINVAL;
- if (!slot_cf_has_space(offset, payload_len))
+ if (!slot_has_space(*buf, offset, payload_len))
return -ENOSPC;
buf->cu_idx = cu_idx;
@@ -558,7 +558,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
return -EINVAL;
- if (!slot_dpu_has_space(offset, arg_sz))
+ if (!slot_has_space(*buf, offset, arg_sz))
return -ENOSPC;
buf->inst_buf_addr = sn->buffer;
@@ -569,7 +569,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
memcpy(buf->args, sn->prop_args, arg_sz);
/* Accurate buf size to hint firmware to do necessary copy */
- *size += sizeof(*buf) + arg_sz;
+ *size = sizeof(*buf) + arg_sz;
return 0;
}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
index 4e02e744b470..6df9065b13f6 100644
--- a/drivers/accel/amdxdna/aie2_msg_priv.h
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -319,18 +319,16 @@ struct async_event_msg_resp {
} __packed;
#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
-#define slot_cf_has_space(offset, payload_size) \
- (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
- offsetof(struct cmd_chain_slot_execbuf_cf, args[0]))
+#define slot_has_space(slot, offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \
+ sizeof(typeof(slot)))
+
struct cmd_chain_slot_execbuf_cf {
__u32 cu_idx;
__u32 arg_cnt;
__u32 args[] __counted_by(arg_cnt);
};
-#define slot_dpu_has_space(offset, payload_size) \
- (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
- offsetof(struct cmd_chain_slot_dpu, args[0]))
struct cmd_chain_slot_dpu {
__u64 inst_buf_addr;
__u32 inst_size;
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index 5a058e565b01..c6cf7068d23c 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -512,12 +512,6 @@ static int aie2_init(struct amdxdna_dev *xdna)
goto release_fw;
}
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret) {
- XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret);
- goto free_irq;
- }
-
psp_conf.fw_size = fw->size;
psp_conf.fw_buf = fw->data;
for (i = 0; i < PSP_MAX_REGS; i++)
@@ -526,14 +520,14 @@ static int aie2_init(struct amdxdna_dev *xdna)
if (!ndev->psp_hdl) {
XDNA_ERR(xdna, "failed to create psp");
ret = -ENOMEM;
- goto disable_sva;
+ goto free_irq;
}
xdna->dev_handle = ndev;
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
- goto disable_sva;
+ goto free_irq;
}
ret = aie2_mgmt_fw_query(ndev);
@@ -584,8 +578,6 @@ async_event_free:
aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
-disable_sva:
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
free_irq:
pci_free_irq_vectors(pdev);
release_fw:
@@ -601,7 +593,6 @@ static void aie2_fini(struct amdxdna_dev *xdna)
aie2_hw_stop(xdna);
aie2_error_async_events_free(ndev);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_free_irq_vectors(pdev);
}
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index 43442b9e273b..be073224bd69 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -496,11 +496,11 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
struct amdxdna_drm_exec_cmd *args)
{
struct amdxdna_dev *xdna = client->xdna;
- u32 *arg_bo_hdls;
+ u32 *arg_bo_hdls = NULL;
u32 cmd_bo_hdl;
int ret;
- if (!args->arg_count || args->arg_count > MAX_ARG_COUNT) {
+ if (args->arg_count > MAX_ARG_COUNT) {
XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
return -EINVAL;
}
@@ -512,14 +512,16 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
}
cmd_bo_hdl = (u32)args->cmd_handles;
- arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
- if (!arg_bo_hdls)
- return -ENOMEM;
- ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
- args->arg_count * sizeof(u32));
- if (ret) {
- ret = -EFAULT;
- goto free_cmd_bo_hdls;
+ if (args->arg_count) {
+ arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
+ if (!arg_bo_hdls)
+ return -ENOMEM;
+ ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
+ args->arg_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_cmd_bo_hdls;
+ }
}
ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 606433d73236..26831ec69f89 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -9,7 +9,10 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
#include <linux/iosys-map.h>
+#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include "amdxdna_ctx.h"
@@ -18,6 +21,8 @@
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+MODULE_IMPORT_NS("DMA_BUF");
+
static int
amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
{
@@ -55,57 +60,38 @@ amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
return 0;
}
-static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
-{
- struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
- struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
-
- XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
- if (abo->pinned)
- amdxdna_gem_unpin(abo);
-
- if (abo->type == AMDXDNA_BO_DEV) {
- mutex_lock(&abo->client->mm_lock);
- drm_mm_remove_node(&abo->mm_node);
- mutex_unlock(&abo->client->mm_lock);
-
- vunmap(abo->mem.kva);
- drm_gem_object_put(to_gobj(abo->dev_heap));
- drm_gem_object_release(gobj);
- mutex_destroy(&abo->lock);
- kfree(abo);
- return;
- }
-
- if (abo->type == AMDXDNA_BO_DEV_HEAP)
- drm_mm_takedown(&abo->mm);
-
- drm_gem_vunmap_unlocked(gobj, &map);
- mutex_destroy(&abo->lock);
- drm_gem_shmem_free(&abo->base);
-}
-
-static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
- .free = amdxdna_gem_obj_free,
-};
-
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
- struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
- mem.notifier);
- struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
+ struct amdxdna_gem_obj *abo = mapp->abo;
+ struct amdxdna_dev *xdna;
- XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
- abo->mem.userptr, abo->mem.size, abo->type);
+ xdna = to_xdna_dev(to_gobj(abo)->dev);
+ XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
+ mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
if (!mmu_notifier_range_blockable(range))
return false;
+ down_write(&xdna->notifier_lock);
+ abo->mem.map_invalid = true;
+ mapp->invalid = true;
+ mmu_interval_set_seq(&mapp->notifier, cur_seq);
+ up_write(&xdna->notifier_lock);
+
xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
+ if (range->event == MMU_NOTIFY_UNMAP) {
+ down_write(&xdna->notifier_lock);
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ up_write(&xdna->notifier_lock);
+ }
+
return true;
}
@@ -113,102 +99,310 @@ static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
.invalidate = amdxdna_hmm_invalidate,
};
-static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
+static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp;
+
+ down_read(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (!vma || mapp->vma == vma) {
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ if (vma)
+ break;
+ }
+ }
+ up_read(&xdna->notifier_lock);
+}
- if (!xdna->dev_info->ops->hmm_invalidate)
- return;
+static void amdxdna_umap_release(struct kref *ref)
+{
+ struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt);
+ struct vm_area_struct *vma = mapp->vma;
+ struct amdxdna_dev *xdna;
+
+ mmu_interval_notifier_remove(&mapp->notifier);
+ if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_clear_unevictable(vma->vm_file->f_mapping);
+
+ xdna = to_xdna_dev(to_gobj(mapp->abo)->dev);
+ down_write(&xdna->notifier_lock);
+ list_del(&mapp->node);
+ up_write(&xdna->notifier_lock);
+
+ kvfree(mapp->range.hmm_pfns);
+ kfree(mapp);
+}
+
+void amdxdna_umap_put(struct amdxdna_umap *mapp)
+{
+ kref_put(&mapp->refcnt, amdxdna_umap_release);
+}
+
+static void amdxdna_hmm_unreg_work(struct work_struct *work)
+{
+ struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
+ hmm_unreg_work);
- mmu_interval_notifier_remove(&abo->mem.notifier);
- kvfree(abo->mem.pfns);
- abo->mem.pfns = NULL;
+ amdxdna_umap_put(mapp);
}
-static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
- size_t len)
+static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long len = vma->vm_end - vma->vm_start;
+ unsigned long addr = vma->vm_start;
+ struct amdxdna_umap *mapp;
u32 nr_pages;
int ret;
if (!xdna->dev_info->ops->hmm_invalidate)
return 0;
- if (abo->mem.pfns)
- return -EEXIST;
+ mapp = kzalloc(sizeof(*mapp), GFP_KERNEL);
+ if (!mapp)
+ return -ENOMEM;
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
- abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
- GFP_KERNEL);
- if (!abo->mem.pfns)
- return -ENOMEM;
+ mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns),
+ GFP_KERNEL);
+ if (!mapp->range.hmm_pfns) {
+ ret = -ENOMEM;
+ goto free_map;
+ }
- ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
+ ret = mmu_interval_notifier_insert_locked(&mapp->notifier,
current->mm,
addr,
len,
&amdxdna_hmm_ops);
if (ret) {
XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
- kvfree(abo->mem.pfns);
+ goto free_pfns;
}
- abo->mem.userptr = addr;
+ mapp->range.notifier = &mapp->notifier;
+ mapp->range.start = vma->vm_start;
+ mapp->range.end = vma->vm_end;
+ mapp->range.default_flags = HMM_PFN_REQ_FAULT;
+ mapp->vma = vma;
+ mapp->abo = abo;
+ kref_init(&mapp->refcnt);
+
+ if (abo->mem.userptr == AMDXDNA_INVALID_ADDR)
+ abo->mem.userptr = addr;
+ INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work);
+ if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_set_unevictable(vma->vm_file->f_mapping);
+
+ down_write(&xdna->notifier_lock);
+ list_add_tail(&mapp->node, &abo->mem.umap_list);
+ up_write(&xdna->notifier_lock);
+
+ return 0;
+
+free_pfns:
+ kvfree(mapp->range.hmm_pfns);
+free_map:
+ kfree(mapp);
return ret;
}
+static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long num_pages = vma_pages(vma);
+ unsigned long offset = 0;
+ int ret;
+
+ if (!is_import_bo(abo)) {
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed shmem mmap %d", ret);
+ return ret;
+ }
+
+ /* The buffer is based on memory pages. Fix the flag. */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages %d", ret);
+ vma->vm_ops->close(vma);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+ ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
+ return ret;
+ }
+
+ do {
+ vm_fault_t fault_ret;
+
+ fault_ret = handle_mm_fault(vma, vma->vm_start + offset,
+ FAULT_FLAG_WRITE, NULL);
+ if (fault_ret & VM_FAULT_ERROR) {
+ vma->vm_ops->close(vma);
+ XDNA_ERR(xdna, "Fault in page failed");
+ return -EFAULT;
+ }
+
+ offset += PAGE_SIZE;
+ } while (--num_pages);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(to_gobj(abo));
+
+ return 0;
+}
+
static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
struct vm_area_struct *vma)
{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- unsigned long num_pages;
int ret;
- ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
+ ret = amdxdna_hmm_register(abo, vma);
if (ret)
return ret;
+ ret = amdxdna_insert_pages(abo, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages, ret %d", ret);
+ goto hmm_unreg;
+ }
+
+ XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx",
+ drm_vma_node_offset_addr(&gobj->vma_node), abo->type,
+ vma->vm_start, gobj->size);
+ return 0;
+
+hmm_unreg:
+ amdxdna_hmm_unregister(abo, vma);
+ return ret;
+}
+
+static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ unsigned long num_pages = vma_pages(vma);
+ int ret;
+
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
+ vma->vm_private_data = gobj;
+
+ drm_gem_object_get(gobj);
ret = drm_gem_shmem_mmap(&abo->base, vma);
if (ret)
- goto hmm_unreg;
+ goto put_obj;
- num_pages = gobj->size >> PAGE_SHIFT;
- /* Try to insert the pages */
+ /* The buffer is based on memory pages. Fix the flag. */
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
- ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
if (ret)
- XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
+ goto close_vma;
return 0;
-hmm_unreg:
- amdxdna_hmm_unregister(abo);
+close_vma:
+ vma->vm_ops->close(vma);
+put_obj:
+ drm_gem_object_put(gobj);
return ret;
}
-static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
+static const struct dma_buf_ops amdxdna_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = amdxdna_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
{
- return drm_gem_shmem_vm_ops.fault(vmf);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &amdxdna_dmabuf_ops;
+ exp_info.size = gobj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gobj;
+ exp_info.resv = gobj->resv;
+
+ return drm_gem_dmabuf_export(gobj->dev, &exp_info);
}
-static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
+static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo)
{
- drm_gem_shmem_vm_ops.open(vma);
+ dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(abo->dma_buf, abo->attach);
+ dma_buf_put(abo->dma_buf);
+ drm_gem_object_release(to_gobj(abo));
+ kfree(abo);
}
-static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
+static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
{
- struct drm_gem_object *gobj = vma->vm_private_data;
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+
+ amdxdna_hmm_unregister(abo, NULL);
+ flush_workqueue(xdna->notifier_wq);
+
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
- amdxdna_hmm_unregister(to_xdna_obj(gobj));
- drm_gem_shmem_vm_ops.close(vma);
+ if (abo->type == AMDXDNA_BO_DEV) {
+ mutex_lock(&abo->client->mm_lock);
+ drm_mm_remove_node(&abo->mm_node);
+ mutex_unlock(&abo->client->mm_lock);
+
+ vunmap(abo->mem.kva);
+ drm_gem_object_put(to_gobj(abo->dev_heap));
+ drm_gem_object_release(gobj);
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+ return;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV_HEAP)
+ drm_mm_takedown(&abo->mm);
+
+ drm_gem_vunmap(gobj, &map);
+ mutex_destroy(&abo->lock);
+
+ if (is_import_bo(abo)) {
+ amdxdna_imported_obj_free(abo);
+ return;
+ }
+
+ drm_gem_shmem_free(&abo->base);
}
-static const struct vm_operations_struct amdxdna_gem_vm_ops = {
- .fault = amdxdna_gem_vm_fault,
- .open = amdxdna_gem_vm_open,
- .close = amdxdna_gem_vm_close,
+static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
+ .free = amdxdna_gem_obj_free,
};
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -220,7 +414,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = amdxdna_gem_obj_mmap,
- .vm_ops = &amdxdna_gem_vm_ops,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+ .export = amdxdna_gem_prime_export,
};
static struct amdxdna_gem_obj *
@@ -239,6 +434,7 @@ amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
abo->mem.userptr = AMDXDNA_INVALID_ADDR;
abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
abo->mem.size = size;
+ INIT_LIST_HEAD(&abo->mem.umap_list);
return abo;
}
@@ -258,6 +454,51 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
return to_gobj(abo);
}
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ struct sg_table *sgt;
+ int ret;
+
+ get_dma_buf(dma_buf);
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto put_buf;
+ }
+
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(gobj)) {
+ ret = PTR_ERR(gobj);
+ goto fail_unmap;
+ }
+
+ abo = to_xdna_obj(gobj);
+ abo->attach = attach;
+ abo->dma_buf = dma_buf;
+
+ return gobj;
+
+fail_unmap:
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+put_buf:
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
static struct amdxdna_gem_obj *
amdxdna_drm_alloc_shmem(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
@@ -417,7 +658,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap_unlocked(to_gobj(abo), &map);
+ ret = drm_gem_vmap(to_gobj(abo), &map);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
@@ -483,6 +724,9 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
int ret;
+ if (is_import_bo(abo))
+ return 0;
+
switch (abo->type) {
case AMDXDNA_BO_SHMEM:
case AMDXDNA_BO_DEV_HEAP:
@@ -515,6 +759,9 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
{
+ if (is_import_bo(abo))
+ return;
+
if (abo->type == AMDXDNA_BO_DEV)
abo = abo->dev_heap;
@@ -606,7 +853,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
goto put_obj;
}
- if (abo->type == AMDXDNA_BO_DEV)
+ if (is_import_bo(abo))
+ drm_clflush_sg(abo->base.sgt);
+ else if (abo->type == AMDXDNA_BO_DEV)
drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
else
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
index 8ccc0375dd9d..aee97e971d6d 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.h
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -6,6 +6,20 @@
#ifndef _AMDXDNA_GEM_H_
#define _AMDXDNA_GEM_H_
+#include <linux/hmm.h>
+
+struct amdxdna_umap {
+ struct vm_area_struct *vma;
+ struct mmu_interval_notifier notifier;
+ struct hmm_range range;
+ struct work_struct hmm_unreg_work;
+ struct amdxdna_gem_obj *abo;
+ struct list_head node;
+ struct kref refcnt;
+ bool invalid;
+ bool unmapped;
+};
+
struct amdxdna_mem {
u64 userptr;
void *kva;
@@ -13,8 +27,7 @@ struct amdxdna_mem {
size_t size;
struct page **pages;
u32 nr_pages;
- struct mmu_interval_notifier notifier;
- unsigned long *pfns;
+ struct list_head umap_list;
bool map_invalid;
};
@@ -31,9 +44,12 @@ struct amdxdna_gem_obj {
struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
u32 assigned_hwctx;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
};
#define to_gobj(obj) (&(obj)->base.base)
+#define is_import_bo(obj) ((obj)->attach)
static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
{
@@ -47,8 +63,12 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
drm_gem_object_put(to_gobj(abo));
}
+void amdxdna_umap_put(struct amdxdna_umap *mapp);
+
struct drm_gem_object *
amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index f5b8497cf5ad..f2bf1d374cc7 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -226,6 +226,7 @@ const struct drm_driver amdxdna_drm_drv = {
.num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
.gem_create_object = amdxdna_gem_create_object_cb,
+ .gem_prime_import = amdxdna_gem_prime_import,
};
static const struct amdxdna_dev_info *
@@ -266,12 +267,16 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
fs_reclaim_release(GFP_KERNEL);
}
+ xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
+ if (!xdna->notifier_wq)
+ return -ENOMEM;
+
mutex_lock(&xdna->dev_lock);
ret = xdna->dev_info->ops->init(xdna);
mutex_unlock(&xdna->dev_lock);
if (ret) {
XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
- return ret;
+ goto destroy_notifier_wq;
}
ret = amdxdna_sysfs_init(xdna);
@@ -301,6 +306,8 @@ failed_dev_fini:
mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->fini(xdna);
mutex_unlock(&xdna->dev_lock);
+destroy_notifier_wq:
+ destroy_workqueue(xdna->notifier_wq);
return ret;
}
@@ -310,6 +317,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
struct device *dev = &pdev->dev;
struct amdxdna_client *client;
+ destroy_workqueue(xdna->notifier_wq);
+
pm_runtime_get_noresume(dev);
pm_runtime_forbid(dev);
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index 37848a8d8031..ab79600911aa 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -6,6 +6,7 @@
#ifndef _AMDXDNA_PCI_DRV_H_
#define _AMDXDNA_PCI_DRV_H_
+#include <linux/workqueue.h>
#include <linux/xarray.h>
#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
@@ -98,6 +99,7 @@ struct amdxdna_dev {
struct list_head client_list;
struct amdxdna_fw_ver fw_ver;
struct rw_semaphore notifier_lock; /* for mmu notifier*/
+ struct workqueue_struct *notifier_wq;
};
/*
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index be85336107f9..1919fbb169c7 100644
--- a/drivers/accel/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -6,7 +6,7 @@
config DRM_ACCEL_HABANALABS
tristate "HabanaLabs AI accelerators"
depends on DRM_ACCEL
- depends on X86_64
+ depends on X86 && X86_64
depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR
select HWMON
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 8729a0c57d78..dc80ca921d90 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -17,8 +17,6 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <asm/msr.h>
-
/* make sure there is space for all the signed info */
static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ);
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index eff1d3ca075f..0e7748c5e117 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -374,6 +374,9 @@ int ivpu_boot(struct ivpu_device *vdev)
{
int ret;
+ drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+
/* Update boot params located at first 4KB of FW memory */
ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
@@ -573,6 +576,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
+ atomic_set(&vdev->job_timeout_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 92753effb1c9..5497e7030e91 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -154,6 +154,7 @@ struct ivpu_device {
struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
struct xarray submitted_jobs_xa;
struct ivpu_ipc_consumer job_done_consumer;
+ atomic_t job_timeout_counter;
atomic64_t unique_id_counter;
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 1d0b2bd9d65c..9a3935be1c05 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -39,6 +39,7 @@ struct ivpu_fw_info {
u64 read_only_addr;
u32 read_only_size;
u32 sched_mode;
+ u64 last_heartbeat;
};
int ivpu_fw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 8741c73b92ce..e0d242d9f3e5 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -30,7 +30,7 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
- (bool)bo->base.base.import_attach);
+ (bool)drm_gem_is_imported(&bo->base.base));
}
/*
@@ -122,7 +122,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
bo->ctx = NULL;
}
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
return;
dma_resv_lock(bo->base.base.resv, NULL);
@@ -282,7 +282,7 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
ivpu_bo_unbind_locked(bo);
mutex_destroy(&bo->lock);
- drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
drm_gem_shmem_free(&bo->base);
}
@@ -362,7 +362,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
if (flags & DRM_IVPU_BO_MAPPABLE) {
dma_resv_lock(bo->base.base.resv, NULL);
- ret = drm_gem_shmem_vmap(&bo->base, &map);
+ ret = drm_gem_shmem_vmap_locked(&bo->base, &map);
dma_resv_unlock(bo->base.base.resv);
if (ret)
@@ -387,7 +387,7 @@ void ivpu_bo_free(struct ivpu_bo *bo)
if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
dma_resv_lock(bo->base.base.resv, NULL);
- drm_gem_shmem_vunmap(&bo->base, &map);
+ drm_gem_shmem_vunmap_locked(&bo->base, &map);
dma_resv_unlock(bo->base.base.resv);
}
@@ -461,7 +461,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
if (bo->mmu_mapped)
drm_printf(p, " mmu_mapped");
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
drm_printf(p, " imported");
drm_printf(p, "\n");
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index ac0e22454596..ea30db181cd7 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -34,6 +34,7 @@ module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
#define PM_RESCHEDULE_LIMIT 5
+#define PM_TDR_HEARTBEAT_LIMIT 30
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
{
@@ -44,6 +45,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
ivpu_fw_log_reset(vdev);
ivpu_fw_load(vdev);
fw->entry_point = fw->cold_boot_entry_point;
+ fw->last_heartbeat = 0;
}
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
@@ -189,7 +191,24 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
+ u64 heartbeat;
+ if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat not progressed\n");
+ goto recovery;
+ }
+
+ if (atomic_fetch_inc(&vdev->job_timeout_counter) > PM_TDR_HEARTBEAT_LIMIT) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat limit exceeded\n");
+ goto recovery;
+ }
+
+ vdev->fw->last_heartbeat = heartbeat;
+ ivpu_start_job_timeout_detection(vdev);
+ return;
+
+recovery:
+ atomic_set(&vdev->job_timeout_counter, 0);
ivpu_pm_trigger_recovery(vdev, "TDR");
}
@@ -204,6 +223,7 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
{
cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
+ atomic_set(&vdev->job_timeout_counter, 0);
}
int ivpu_pm_suspend_cb(struct device *dev)
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 43aba57b48f0..1bce1af7c72c 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -609,7 +609,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
struct scatterlist *sg;
int ret = 0;
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return -EINVAL;
for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
@@ -630,7 +630,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
{
struct qaic_bo *bo = to_qaic_bo(obj);
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/* DMABUF/PRIME Path */
drm_prime_gem_destroy(obj, NULL);
} else {
@@ -870,7 +870,7 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
ret = qaic_prepare_import_bo(bo, hdr);
else
ret = qaic_prepare_export_bo(qdev, bo, hdr);
@@ -894,7 +894,7 @@ static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *b
static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
qaic_unprepare_import_bo(bo);
else
qaic_unprepare_export_bo(qdev, bo);
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index ba0cf2f94732..a991b8198dc4 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -240,7 +240,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
mhi_unprepare:
mhi_unprepare_from_transfer(mhi_dev);
destroy_workqueue:
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
out:
return ret;
@@ -253,7 +252,6 @@ static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
qdev = dev_get_drvdata(&mhi_dev->dev);
mhi_unprepare_from_transfer(qdev->bootlog_ch);
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
qdev->bootlog_ch = NULL;
}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7f10aa38269d..7bc40c2735ac 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -576,6 +576,9 @@ config ACPI_FFH
Enable this feature if you want to set up and install the FFH Address
Space handler to handle FFH OpRegion in the firmware.
+config ACPI_MRRM
+ bool
+
source "drivers/acpi/pmic/Kconfig"
config ACPI_VIOT
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 797070fc9a3f..d1b0affb844f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -66,6 +66,7 @@ acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
acpi-$(CONFIG_ACPI_PRMT) += prmt.o
acpi-$(CONFIG_ACPI_PCC) += acpi_pcc.o
acpi-$(CONFIG_ACPI_FFH) += acpi_ffh.o
+acpi-$(CONFIG_ACPI_MRRM) += acpi_mrrm.o
# Address translation
acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index f7fb7205028d..f6b9562779de 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -15,6 +15,7 @@
#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include "apei/apei-internal.h"
#include <ras/ras_event.h>
@@ -234,7 +235,7 @@ static int __init extlog_init(void)
u64 cap;
int rc;
- if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
+ if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) ||
!(cap & MCG_ELOG_P) ||
!extlog_get_l1addr())
return -ENODEV;
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 794962c5c88e..b8d98b1b48ae 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -39,7 +39,7 @@ static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
return 0;
}
- err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
+ err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter);
if (!err) {
u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
residency_info_ffh.gaddr. bit_width - 1,
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
new file mode 100644
index 000000000000..47ea3ccc2142
--- /dev/null
+++ b/drivers/acpi/acpi_mrrm.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, Intel Corporation.
+ *
+ * Memory Range and Region Mapping (MRRM) structure
+ *
+ * Parse and report the platform's MRRM table in /sys.
+ */
+
+#define pr_fmt(fmt) "acpi/mrrm: " fmt
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+/* Default assume one memory region covering all system memory, per the spec */
+static int max_mem_region = 1;
+
+/* Access for use by resctrl file system */
+int acpi_mrrm_max_mem_region(void)
+{
+ return max_mem_region;
+}
+
+struct mrrm_mem_range_entry {
+ u64 base;
+ u64 length;
+ int node;
+ u8 local_region_id;
+ u8 remote_region_id;
+};
+
+static struct mrrm_mem_range_entry *mrrm_mem_range_entry;
+static u32 mrrm_mem_entry_num;
+
+static int get_node_num(struct mrrm_mem_range_entry *e)
+{
+ unsigned int nid;
+
+ for_each_online_node(nid) {
+ for (int z = 0; z < MAX_NR_ZONES; z++) {
+ struct zone *zone = NODE_DATA(nid)->node_zones + z;
+
+ if (!populated_zone(zone))
+ continue;
+ if (zone_intersects(zone, PHYS_PFN(e->base), PHYS_PFN(e->length)))
+ return zone_to_nid(zone);
+ }
+ }
+
+ return -ENOENT;
+}
+
+static __init int acpi_parse_mrrm(struct acpi_table_header *table)
+{
+ struct acpi_mrrm_mem_range_entry *mre_entry;
+ struct acpi_table_mrrm *mrrm;
+ void *mre, *mrrm_end;
+ int mre_count = 0;
+
+ mrrm = (struct acpi_table_mrrm *)table;
+ if (!mrrm)
+ return -ENODEV;
+
+ if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
+ return -EOPNOTSUPP;
+
+ mrrm_end = (void *)mrrm + mrrm->header.length - 1;
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ mre_entry = mre;
+ mre_count++;
+ mre += mre_entry->header.length;
+ }
+ if (!mre_count) {
+ pr_info(FW_BUG "No ranges listed in MRRM table\n");
+ return -EINVAL;
+ }
+
+ mrrm_mem_range_entry = kmalloc_array(mre_count, sizeof(*mrrm_mem_range_entry),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!mrrm_mem_range_entry)
+ return -ENOMEM;
+
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ struct mrrm_mem_range_entry *e;
+
+ mre_entry = mre;
+ e = mrrm_mem_range_entry + mrrm_mem_entry_num;
+
+ e->base = mre_entry->addr_base;
+ e->length = mre_entry->addr_len;
+ e->node = get_node_num(e);
+
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_LOCAL)
+ e->local_region_id = mre_entry->local_region_id;
+ else
+ e->local_region_id = -1;
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_REMOTE)
+ e->remote_region_id = mre_entry->remote_region_id;
+ else
+ e->remote_region_id = -1;
+
+ mrrm_mem_entry_num++;
+ mre += mre_entry->header.length;
+ }
+
+ max_mem_region = mrrm->max_mem_region;
+
+ return 0;
+}
+
+#define RANGE_ATTR(name, fmt) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ struct mrrm_mem_range_entry *mre; \
+ const char *kname = kobject_name(kobj); \
+ int n, ret; \
+ \
+ ret = kstrtoint(kname + 5, 10, &n); \
+ if (ret) \
+ return ret; \
+ \
+ mre = mrrm_mem_range_entry + n; \
+ \
+ return sysfs_emit(buf, fmt, mre->name); \
+} \
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+RANGE_ATTR(base, "0x%llx\n");
+RANGE_ATTR(length, "0x%llx\n");
+RANGE_ATTR(node, "%d\n");
+RANGE_ATTR(local_region_id, "%d\n");
+RANGE_ATTR(remote_region_id, "%d\n");
+
+static struct attribute *memory_range_attrs[] = {
+ &base_attr.attr,
+ &length_attr.attr,
+ &node_attr.attr,
+ &local_region_id_attr.attr,
+ &remote_region_id_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(memory_range);
+
+static __init int add_boot_memory_ranges(void)
+{
+ struct kobject *pkobj, *kobj;
+ int ret = -EINVAL;
+ char *name;
+
+ pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
+
+ for (int i = 0; i < mrrm_mem_entry_num; i++) {
+ name = kasprintf(GFP_KERNEL, "range%d", i);
+ if (!name) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ kobj = kobject_create_and_add(name, pkobj);
+
+ ret = sysfs_create_groups(kobj, memory_range_groups);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static __init int mrrm_init(void)
+{
+ int ret;
+
+ ret = acpi_table_parse(ACPI_SIG_MRRM, acpi_parse_mrrm);
+ if (ret < 0)
+ return ret;
+
+ return add_boot_memory_ranges();
+}
+device_initcall(mrrm_init);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 3fde4496f8a2..6f8bbe1247a5 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -19,7 +19,7 @@
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/mwait.h>
#include <xen/xen.h>
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index 07a034a53aca..97064e943768 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -31,7 +31,6 @@
struct pcc_data {
struct pcc_mbox_chan *pcc_chan;
- void __iomem *pcc_comm_addr;
struct completion done;
struct mbox_client cl;
struct acpi_pcc_info ctx;
@@ -81,14 +80,6 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
ret = AE_SUPPORT;
goto err_free_channel;
}
- data->pcc_comm_addr = acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!data->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- ctx->subspace_id);
- ret = AE_NO_MEMORY;
- goto err_free_channel;
- }
*region_context = data;
return AE_OK;
@@ -113,7 +104,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
reinit_completion(&data->done);
/* Write to Shared Memory */
- memcpy_toio(data->pcc_comm_addr, (void *)value, data->ctx.length);
+ memcpy_toio(data->pcc_chan->shmem, (void *)value, data->ctx.length);
ret = mbox_send_message(data->pcc_chan->mchan, NULL);
if (ret < 0)
@@ -134,7 +125,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
mbox_chan_txdone(data->pcc_chan->mchan, ret);
- memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
+ memcpy_fromio(value, data->pcc_chan->shmem, data->ctx.length);
return AE_OK;
}
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 9d4cbd956627..d7d4649ce66f 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2022 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2025 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 4536dc9d3979..662231f4f881 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index c6ba6a36cfb5..24998f2d7539 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 911875c5a5f1..fe6d38b43c9a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -37,7 +37,7 @@ struct acpi_db_argument_info {
struct acpi_db_execute_walk {
u32 count;
u32 max_count;
- char name_seg[ACPI_NAMESEG_SIZE + 1];
+ char name_seg[ACPI_NAMESEG_SIZE + 1] ACPI_NONSTRING;
};
#define PARAM_LIST(pl) pl
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 73eecbf62f06..5d48a344b35f 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 1c5218b79fc2..b40fb3a5ac8a 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 309ce8efb4f6..c8a750d2674c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index b8543a34caea..6aec56c65fa0 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 955114c926bd..1ee6ac9b2baf 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -120,6 +120,9 @@ void
acpi_ex_trace_point(acpi_trace_event_type type,
u8 begin, u8 *aml, char *pathname);
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count);
+
/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 6481c48c22bb..0c41f0097e8d 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
* expected_return_btypes - Allowed type(s) for the return value
*/
struct acpi_name_info {
- char name[ACPI_NAMESEG_SIZE] __nonstring;
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u16 argument_list;
u8 expected_btypes;
};
@@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
converted_object);
struct acpi_simple_repair_info {
- char name[ACPI_NAMESEG_SIZE] __nonstring;
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u32 unexpected_btypes;
u32 package_index;
acpi_object_converter object_converter;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index de83dd22292b..4e9402c02410 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9448bc026b9b..13f050fecb49 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8fc02946d3cd..6ffcc7a0a0c2 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index da96d80e6b3a..a2a9e51d7ac6 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6dad786a382c..65a15dee092b 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index ef068f4c864a..76c5ed02e916 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index d772ff9ca07d..e8a92be5adae 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index f8fee94ba708..e690f604cfa0 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index b6ae979b01b6..ebef72bf58d0 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index edfdbbef81c1..3990d509bbab 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index effe52b40dce..c5b544a006c5 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 4e88f9fc2a28..54d6e51e0b9a 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -504,10 +504,6 @@ struct aml_resource_pin_group_config {
#define AML_RESOURCE_PIN_GROUP_CONFIG_REVISION 1 /* ACPI 6.2 */
-/* restore default alignment */
-
-#pragma pack()
-
/* Union of all resource descriptors, so we can allocate the worst case */
union aml_resource {
@@ -562,6 +558,10 @@ union aml_resource {
u8 byte_item;
};
+/* restore default alignment */
+
+#pragma pack()
+
/* Interfaces used by both the disassembler and compiler */
void
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index e874c1dddefa..554ae35108bd 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 4354c175e12e..e2f00c54cb36 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 80c69af06948..c1f79d7a2026 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index c5c8380a3114..274b74255551 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 532401ecdab0..df132c9089c7 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 6e0e362e461f..57cd9e2d1109 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index e809c2aed78a..c8f37f4e6626 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index eca50517ad82..5393de4dbc4c 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -188,6 +188,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
index++;
}
+ acpi_ex_trace_args(params, index);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%u args passed to method\n", index));
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 555f148d666b..1bf7eec49899 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index dd3059000885..5699b0872848 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index ecf793fe9919..1ed2386fab82 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index fb9ed5e1da89..baf6a1f27605 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -668,6 +668,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
u32 arg_count = 0;
u32 index = walk_state->num_operands;
+ u32 prev_num_operands = walk_state->num_operands;
+ u32 new_num_operands;
u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
@@ -696,6 +698,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
/* Create the interpreter arguments, in reverse order */
+ new_num_operands = index;
index--;
for (i = 0; i < arg_count; i++) {
arg = arguments[index];
@@ -720,7 +723,11 @@ cleanup:
* pop everything off of the operand stack and delete those
* objects
*/
- acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
+ walk_state->num_operands = (u8)(i);
+ acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state);
+
+ /* Restore operand count */
+ walk_state->num_operands = (u8)(prev_num_operands);
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index));
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index a43336f05206..5c5c6d8a4e48 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index f7b8496c8bdd..666419b6a5c6 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 541235f498c2..bfc54c914757 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 1fdd07ae862c..375a8fa43d9d 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 75338a13c802..02aaddb89df9 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9e78c5b9ad52..6cdd39c987b8 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 989dc01af03f..fa3e0d00d1ca 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 934b201d3820..ba65b2ea49b2 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 58e1890ab25b..fadd93caf1d5 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 38f408cf13ce..eb769739420e 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index ee3b1ea656d4..d15b1d75c8ec 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 1c8cb6d924df..5a35dae945e2 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index e68e876d3b84..04a23a6c3bb1 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index cf53b9535f18..fa3475da7ea9 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 46d1b3f5582d..b03952798af5 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 24fa6433d562..86a8d41c079c 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 48bf845191d2..4b052908d2e7 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 4eeeb3b7ab7e..60dacec1b121 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index bff2d099f469..bccc672c934c 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 2fb78b35565b..c248c9b162fa 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 473115309860..4d7dd0fc6b07 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bb1be42daee1..fded9bfc2436 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -226,8 +226,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
/* Copy the string to the buffer */
new_buf = return_desc->buffer.pointer;
- strncpy((char *)new_buf, (char *)obj_desc->string.pointer,
- obj_desc->string.length);
+ memcpy((char *)new_buf, (char *)obj_desc->string.pointer,
+ obj_desc->string.length);
break;
default:
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 1bea9d97652c..052c69567997 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 3f86bfada510..81a07a52b73c 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 2e2da8790224..d8aeebaab70a 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 61ff36189ace..ced3ff9d0a86 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index cf6c812a8b6d..0771934c0455 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c6f2a9166ac0..07cbac58ed21 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 65c487facdda..1fa013197fcf 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 9a448165bfeb..76ab73c37e90 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 20fb34b68bee..6ac7e0ca5c9d 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 743c258bf2e8..a94fa4d70e99 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index d3091f619909..bf08110ed6d2 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 1af35e143ba9..cb078e39abf7 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 82b1fa2d201f..1b1a006e82de 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index c49b9f8de723..a390a1c2b0ab 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 873de01b8ad2..dd83631090fc 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 24a78b5e266c..4589de3f3012 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 3a437e6ace5c..782ee353a709 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 5241f4c01c76..6d2581ec22ad 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -201,6 +201,12 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
function = ACPI_READ;
break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+
+ buffer_length = ACPI_FFH_INPUT_BUFFER_SIZE;
+ function = ACPI_READ;
+ break;
+
default:
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 575c7a39f1aa..cbc42207496d 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index b01ae015e1b5..0470b2639831 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 37c3131a82fa..5b168fbc03e8 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 2c384bd52b9c..7f843c9d8a06 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index f1730221ff13..d34497f3576a 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -149,6 +149,57 @@ acpi_ex_trace_point(acpi_trace_event_type type,
/*******************************************************************************
*
+ * FUNCTION: acpi_ex_trace_args
+ *
+ * PARAMETERS: params - AML method arguments
+ * count - numer of method arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Trace any arguments
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count)
+{
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ex_trace_args);
+
+ for (i = 0; i < count; i++) {
+ union acpi_operand_object *obj_desc = params[i];
+
+ if (!i) {
+ ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, " "));
+ }
+
+ switch (obj_desc->common.type) {
+ case ACPI_TYPE_INTEGER:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "%llx", obj_desc->integer.value));
+ break;
+ case ACPI_TYPE_STRING:
+ if (!obj_desc->string.length) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "NULL"));
+ continue;
+ }
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_TRACE_POINT, _COMPONENT))
+ acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
+ break;
+ default:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "Unknown"));
+ break;
+ }
+ if (i+1 == count) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "\n"));
+ } else {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, ", "));
+ }
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ex_start_trace_method
*
* PARAMETERS: method_node - Node of the method
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index f4d4a033f166..cc10c0732218 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 790f342dcd25..a1e1fa787566 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index a9ba9190408b..631fd8e2b774 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index e0c847ab8324..386f4759c317 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index e0921f08b71a..87d78bef6323 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 192c04b5a599..a5e0bccae6a4 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b8de458f0368..496fd9e49f0b 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c31f803995c6..847cd1b2493d 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 8dbf83aeb455..9aabe30416da 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 3efb46f0dc54..366d54a1d157 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 7e5a683ae957..f05a92b88642 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 90a26cb0c472..6dc20486ad51 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index fa116ebe49a3..d5b16aaec233 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 86d126fdb27d..03373e7f7978 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index fcb9de0f77a2..6ec4c646fff7 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d91153f65700..22aeeeb56cff 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -194,7 +194,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing)
{
u32 length = 0, i;
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u8 do_no_trailing;
char c, *left, *right;
struct acpi_namespace_node *next_node;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 31e551cf4ea6..959e6379bc4c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index cf57bd69616d..81995ee48c49 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index dd37fc108fce..ca137ce5674f 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index b8657004190d..accfdcfb7e62 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 330b5e4711da..8dbb870f40d2 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
return_object_ptr);
typedef struct acpi_repair_info {
- char name[ACPI_NAMESEG_SIZE] __nonstring;
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
acpi_repair_function repair_function;
} acpi_repair_info;
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 06ffdb6808f5..49cc07e2ac5a 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index eee396a77bae..a2ac06a26e92 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 5d5bcf165298..1db831545ec8 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 28582adfc0ac..6f6ae38ec044 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index d0fd55636129..c989cadf271c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 54471083ba54..496a1c1d5b0b 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -636,7 +636,8 @@ acpi_status
acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op, acpi_status status)
{
- acpi_status status2;
+ acpi_status return_status = status;
+ u8 ascending = TRUE;
ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
@@ -650,7 +651,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
op));
do {
if (op) {
- if (walk_state->ascending_callback != NULL) {
+ if (ascending && walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->op_info =
acpi_ps_get_opcode_info(op->common.
@@ -672,49 +673,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
}
if (status == AE_CTRL_TERMINATE) {
- status = AE_OK;
-
- /* Clean up */
- do {
- if (op) {
- status2 =
- acpi_ps_complete_this_op
- (walk_state, op);
- if (ACPI_FAILURE
- (status2)) {
- return_ACPI_STATUS
- (status2);
- }
- }
-
- acpi_ps_pop_scope(&
- (walk_state->
- parser_state),
- &op,
- &walk_state->
- arg_types,
- &walk_state->
- arg_count);
-
- } while (op);
-
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = AE_CTRL_TERMINATE;
}
else if (ACPI_FAILURE(status)) {
/* First error is most important */
- (void)
- acpi_ps_complete_this_op(walk_state,
- op);
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = status;
}
}
- status2 = acpi_ps_complete_this_op(walk_state, op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
+ status = acpi_ps_complete_this_op(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ ascending = FALSE;
+ if (ACPI_SUCCESS(return_status) ||
+ return_status == AE_CTRL_TERMINATE) {
+ return_status = status;
+ }
}
}
@@ -724,5 +702,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
} while (op);
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(return_status);
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 39e31030e5f4..bf6103986f48 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index bccf606e08b4..1c8044ffcb97 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 10a072953d78..55a416e56fd8 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a0035bde7556..c4e4483f0a0b 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 7f7f5ecd4011..5a285d3f2cdb 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index d550c4af4702..ada1dc304d25 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index d92817c72b8d..2f3ebcd8aebe 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6f4eace0ba69..d480de075a90 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 27384ee245f0..f92010e667cd 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -272,18 +272,13 @@ u8
acpi_rs_get_address_common(struct acpi_resource *resource,
union aml_resource *aml)
{
- struct aml_resource_address address;
-
ACPI_FUNCTION_ENTRY();
- /* Avoid undefined behavior: member access within misaligned address */
-
- memcpy(&address, aml, sizeof(address));
-
/* Validate the Resource Type */
- if ((address.resource_type > 2) &&
- (address.resource_type < 0xC0) && (address.resource_type != 0x0A)) {
+ if ((aml->address.resource_type > 2) &&
+ (aml->address.resource_type < 0xC0) &&
+ (aml->address.resource_type != 0x0A)) {
return (FALSE);
}
@@ -304,7 +299,7 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
/* Generic resource type, just grab the type_specific byte */
resource->data.address.info.type_specific =
- address.specific_flags;
+ aml->address.specific_flags;
}
return (TRUE);
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 6e7a152d6459..242daf45e20e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -608,18 +608,12 @@ acpi_rs_get_list_length(u8 *aml_buffer,
case ACPI_RESOURCE_NAME_SERIAL_BUS:{
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus
- common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
minimum_aml_resource_length =
acpi_gbl_resource_aml_serial_bus_sizes
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
extra_struct_bytes +=
- common_serial_bus.resource_length -
+ aml_resource->common_serial_bus.
+ resource_length -
minimum_aml_resource_length;
break;
}
@@ -688,16 +682,10 @@ acpi_rs_get_list_length(u8 *aml_buffer,
*/
if (acpi_ut_get_resource_type(aml_buffer) ==
ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
buffer_size =
acpi_gbl_resource_struct_serial_bus_sizes
- [common_serial_bus.type] + extra_struct_bytes;
+ [aml_resource->common_serial_bus.type] +
+ extra_struct_bytes;
} else {
buffer_size =
acpi_gbl_resource_struct_sizes[resource_index] +
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 164c96e063c6..e46efaa889cd 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -55,21 +55,15 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
- if (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ if (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
conversion_table = NULL;
} else {
/* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */
conversion_table =
acpi_gbl_convert_resource_serial_bus_dispatch
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
}
} else {
conversion_table =
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index a1f10e4409a3..5b98e09fff76 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 3c126c6d306b..c6658b2f3027 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 1c1b2e284bd9..d71a73216380 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -57,8 +57,8 @@ acpi_tb_find_table(char *signature,
memset(&header, 0, sizeof(struct acpi_table_header));
ACPI_COPY_NAMESEG(header.signature, signature);
- strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
- strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+ memcpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+ memcpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
/* Search for the table */
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 0dc003c20e4d..ee9b85bc238b 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 58b02e4b254b..fd64460a2e26 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index dad7425fce3f..fa64851c7b62 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 275b52dc42e9..a8f07d2641b6 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0f2a7343de3a..2a17c60a9a39 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 5b413bbab338..961577ba9486 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index be94d2fd99a7..c673d6c95e0a 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index c1fb70457e20..2418a312733a 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 2be37676edd7..259c28d3fecd 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index b054bb5eeaf0..f6e6e98e9523 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 85a85f7cf750..cabec193febb 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -251,9 +251,9 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
} else {
/* The cache is empty, create a new object */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_MEM_TRACKING(cache->total_allocated++);
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
if ((cache->total_allocated - cache->total_freed) >
cache->max_occupied) {
cache->max_occupied =
diff --git a/drivers/acpi/acpica/utcksum.c b/drivers/acpi/acpica/utcksum.c
index b483894c3629..e6f6030b3a3f 100644
--- a/drivers/acpi/acpica/utcksum.c
+++ b/drivers/acpi/acpica/utcksum.c
@@ -3,7 +3,7 @@
*
* Module Name: utcksum - Support generating table checksums
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 2e17e657dfa4..80458e70ac2b 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 3d71bd9245c7..9f197e293c7e 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 95a4b7509e01..b82130d1a8bc 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index c85bfa13ac1e..e8180099d01f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -404,7 +404,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
object, object->common.type,
acpi_ut_get_object_type_name(object),
new_count));
- message = "Incremement";
+ message = "Increment";
break;
case REF_DECREMENT:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 3e5173d03953..abc6583ed369 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 820820ea8119..97c55a113bae 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index e62802791dcf..8cd050e9cad5 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 15c2ce91d403..eb88335dea2c 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 6d78504e9fbc..4bef97e8223a 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index ee6d72385c5c..123dbcbc60bc 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index f4aae8f0d3a8..272e46208263 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 99b85fd6eccf..f6ac16729e42 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 29d2977d0746..d9bd80e2d32a 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 42b30b9f9312..423d10569736 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -333,11 +333,8 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
pos = string;
- if (size != ACPI_UINT32_MAX) {
- end = string + size;
- } else {
- end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX);
- }
+ size = ACPI_MIN(size, ACPI_PTR_DIFF(ACPI_MAX_PTR, string));
+ end = string + size;
for (; *format; ++format) {
if (*format != '%') {
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index cff7901f7866..e1cc3d348750 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -361,20 +361,16 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
/* Validate the bus_type field */
- if ((common_serial_bus.type == 0) ||
- (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ if ((aml_resource->common_serial_bus.type == 0) ||
+ (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE)) {
if (walk_state) {
ACPI_ERROR((AE_INFO,
"Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
- common_serial_bus.type));
+ aml_resource->common_serial_bus.
+ type));
}
return (AE_AML_INVALID_RESOURCE_TYPE);
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index f5f5da441458..a99c4c9e3d39 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 8f10b413e928..0682554934ca 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index aa2e923462b7..56942b5f026b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 70ae0afa7939..c1702f8fba67 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 3cfe7e7475f2..070c07d68dfb 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -23,6 +23,7 @@ config ACPI_APEI_GHES
select ACPI_HED
select IRQ_WORK
select GENERIC_ALLOCATOR
+ select ARM_SDE_INTERFACE if ARM64
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 04731a5b01fa..fea11a35eea3 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -21,7 +21,7 @@
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mm.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/unaligned.h>
#include "apei-internal.h"
@@ -83,6 +83,8 @@ static struct debugfs_blob_wrapper vendor_blob;
static struct debugfs_blob_wrapper vendor_errors;
static char vendor_dev[64];
+static u32 available_error_type;
+
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
@@ -648,14 +650,9 @@ static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
static int available_error_type_show(struct seq_file *m, void *v)
{
- int rc;
- u32 error_type = 0;
- rc = einj_get_available_error_type(&error_type);
- if (rc)
- return rc;
for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++)
- if (error_type & einj_error_type_string[pos].mask)
+ if (available_error_type & einj_error_type_string[pos].mask)
seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask,
einj_error_type_string[pos].str);
@@ -678,8 +675,7 @@ bool einj_is_cxl_error_type(u64 type)
int einj_validate_error_type(u64 type)
{
- u32 tval, vendor, available_error_type = 0;
- int rc;
+ u32 tval, vendor;
/* Only low 32 bits for error type are valid */
if (type & GENMASK_ULL(63, 32))
@@ -695,13 +691,9 @@ int einj_validate_error_type(u64 type)
/* Only one error type can be specified */
if (tval & (tval - 1))
return -EINVAL;
- if (!vendor) {
- rc = einj_get_available_error_type(&available_error_type);
- if (rc)
- return rc;
+ if (!vendor)
if (!(type & available_error_type))
return -EINVAL;
- }
return 0;
}
@@ -749,17 +741,12 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
return 0;
}
-static int __init einj_probe(struct platform_device *pdev)
+static int __init einj_probe(struct faux_device *fdev)
{
int rc;
acpi_status status;
struct apei_exec_context ctx;
- if (acpi_disabled) {
- pr_debug("ACPI disabled.\n");
- return -ENODEV;
- }
-
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
if (status == AE_NOT_FOUND) {
@@ -777,6 +764,10 @@ static int __init einj_probe(struct platform_device *pdev)
goto err_put_table;
}
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ goto err_put_table;
+
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
@@ -851,7 +842,7 @@ err_put_table:
return rc;
}
-static void __exit einj_remove(struct platform_device *pdev)
+static void __exit einj_remove(struct faux_device *fdev)
{
struct apei_exec_context ctx;
@@ -872,34 +863,30 @@ static void __exit einj_remove(struct platform_device *pdev)
acpi_put_table((struct acpi_table_header *)einj_tab);
}
-static struct platform_device *einj_dev;
+static struct faux_device *einj_dev;
/*
* einj_remove() lives in .exit.text. For drivers registered via
* platform_driver_probe() this is ok because they cannot get unbound at
* runtime. So mark the driver struct with __refdata to prevent modpost
* triggering a section mismatch warning.
*/
-static struct platform_driver einj_driver __refdata = {
+static struct faux_device_ops einj_device_ops __refdata = {
+ .probe = einj_probe,
.remove = __exit_p(einj_remove),
- .driver = {
- .name = "acpi-einj",
- },
};
static int __init einj_init(void)
{
- struct platform_device_info einj_dev_info = {
- .name = "acpi-einj",
- .id = -1,
- };
- int rc;
+ if (acpi_disabled) {
+ pr_debug("ACPI disabled.\n");
+ return -ENODEV;
+ }
- einj_dev = platform_device_register_full(&einj_dev_info);
- if (IS_ERR(einj_dev))
- return PTR_ERR(einj_dev);
+ einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops);
+ if (!einj_dev)
+ return -ENODEV;
- rc = platform_driver_probe(&einj_driver, einj_probe);
- einj_initialized = rc == 0;
+ einj_initialized = true;
return 0;
}
@@ -907,9 +894,8 @@ static int __init einj_init(void)
static void __exit einj_exit(void)
{
if (einj_initialized)
- platform_driver_unregister(&einj_driver);
+ faux_device_destroy(einj_dev);
- platform_device_unregister(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 289e365f84b2..0f3c663c1b0a 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1715,7 +1715,7 @@ void __init acpi_ghes_init(void)
{
int rc;
- sdei_init();
+ acpi_sdei_init();
if (acpi_disabled)
return;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6760330a8af5..45593612a4db 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -243,10 +243,23 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
- if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) {
ret = -ENODEV;
- else
- val->intval = battery->rate_now * 1000;
+ break;
+ }
+
+ val->intval = battery->rate_now * 1000;
+ /*
+ * When discharging, the current should be reported as a
+ * negative number as per the power supply class interface
+ * definition.
+ */
+ if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
+ (battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
+ acpi_battery_handle_discharging(battery)
+ == POWER_SUPPLY_STATUS_DISCHARGING)
+ val->intval = -val->intval;
+
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
@@ -279,8 +292,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
- val->intval = battery->capacity_now * 100/
- full_capacity;
+ val->intval = DIV_ROUND_CLOSEST_ULL(battery->capacity_now * 100ULL,
+ full_capacity);
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 058910af82bc..c2ab2783303f 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1446,8 +1446,10 @@ static int __init acpi_init(void)
}
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
- if (!acpi_kobj)
- pr_debug("%s: kset create error\n", __func__);
+ if (!acpi_kobj) {
+ pr_err("Failed to register kobject\n");
+ return -ENOMEM;
+ }
init_prmt();
acpi_init_pcc();
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index f193e713825a..a9ae2fd62863 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -47,7 +47,6 @@
struct cppc_pcc_data {
struct pcc_mbox_chan *pcc_channel;
- void __iomem *pcc_comm_addr;
bool pcc_channel_acquired;
unsigned int deadline_us;
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
@@ -95,7 +94,7 @@ static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
/* pcc mapped address + header size + offset within PCC subspace */
-#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
+#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_channel->shmem + \
0x8 + (offs))
/* Check if a CPC register is in PCC */
@@ -129,6 +128,20 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
!!(cpc)->cpc_entry.int_value : \
!IS_NULL_REG(&(cpc)->cpc_entry.reg))
+
+/*
+ * Each bit indicates the optionality of the register in per-cpu
+ * cpc_regs[] with the corresponding index. 0 means mandatory and 1
+ * means optional.
+ */
+#define REG_OPTIONAL (0x1FC7D0)
+
+/*
+ * Use the index of the register in per-cpu cpc_regs[] to check if
+ * it's an optional one.
+ */
+#define IS_OPTIONAL_CPC_REG(reg_idx) (REG_OPTIONAL & (1U << (reg_idx)))
+
/*
* Arbitrary Retries in case the remote processor is slow to respond
* to PCC commands. Keeping it high enough to cover emulators where
@@ -223,7 +236,7 @@ static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
int ret, status;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
if (!pcc_ss_data->platform_owns_pcc)
return 0;
@@ -258,7 +271,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
int ret = -EIO, i;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
unsigned int time_delta;
/*
@@ -571,15 +584,6 @@ static int register_pcc_channel(int pcc_ss_idx)
pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
- pcc_data[pcc_ss_idx]->pcc_comm_addr =
- acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- pcc_ss_idx);
- return -ENOMEM;
- }
-
/* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
@@ -1175,43 +1179,106 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return ret_val;
}
-static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
+static int cppc_get_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 *val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
+
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
+ ret = cpc_read(cpu, reg, val);
+ else
+ ret = -EIO;
+
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_get_reg_val(int cpu, enum cppc_regs reg_idx, u64 *val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *reg;
+ if (val == NULL)
+ return -EINVAL;
+
if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
reg = &cpc_desc->cpc_regs[reg_idx];
- if (CPC_IN_PCC(reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
+ if ((reg->type == ACPI_TYPE_INTEGER && IS_OPTIONAL_CPC_REG(reg_idx) &&
+ !reg->cpc_entry.int_value) || (reg->type != ACPI_TYPE_INTEGER &&
+ IS_NULL_REG(&reg->cpc_entry.reg))) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
+ }
- if (pcc_ss_id < 0)
- return -EIO;
+ if (CPC_IN_PCC(reg))
+ return cppc_get_reg_val_in_pcc(cpu, reg, val);
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
+ return cpc_read(cpu, reg, val);
+}
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
- cpc_read(cpunum, reg, perf);
- else
- ret = -EIO;
+static int cppc_set_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 val)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
- up_write(&pcc_ss_data->pcc_lock);
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+ ret = cpc_write(cpu, reg, val);
+ if (ret)
return ret;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_set_reg_val(int cpu, enum cppc_regs reg_idx, u64 val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cpc_register_resource *reg;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
}
- cpc_read(cpunum, reg, perf);
+ reg = &cpc_desc->cpc_regs[reg_idx];
- return 0;
+ /* if a register is writeable, it must be a buffer and not null */
+ if ((reg->type != ACPI_TYPE_BUFFER) || IS_NULL_REG(&reg->cpc_entry.reg)) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (CPC_IN_PCC(reg))
+ return cppc_set_reg_val_in_pcc(cpu, reg, val);
+
+ return cpc_write(cpu, reg, val);
}
/**
@@ -1223,7 +1290,7 @@ static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
*/
int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
+ return cppc_get_reg_val(cpunum, DESIRED_PERF, desired_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
@@ -1236,7 +1303,7 @@ EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
*/
int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
- return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
+ return cppc_get_reg_val(cpunum, NOMINAL_PERF, nominal_perf);
}
/**
@@ -1248,7 +1315,7 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
*/
int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{
- return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
+ return cppc_get_reg_val(cpunum, HIGHEST_PERF, highest_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
@@ -1261,7 +1328,7 @@ EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
*/
int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
- return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
+ return cppc_get_reg_val(cpunum, ENERGY_PERF, epp_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
@@ -1535,53 +1602,110 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
/**
- * cppc_get_auto_sel_caps - Read autonomous selection register.
- * @cpunum : CPU from which to read register.
- * @perf_caps : struct where autonomous selection register value is updated.
+ * cppc_set_epp() - Write the EPP register.
+ * @cpu: CPU on which to write register.
+ * @epp_val: Value to write to the EPP register.
*/
-int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+int cppc_set_epp(int cpu, u64 epp_val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- struct cpc_register_resource *auto_sel_reg;
- u64 auto_sel;
+ if (epp_val > CPPC_ENERGY_PERF_MAX)
+ return -EINVAL;
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
- return -ENODEV;
- }
+ return cppc_set_reg_val(cpu, ENERGY_PERF, epp_val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_epp);
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+/**
+ * cppc_get_auto_act_window() - Read autonomous activity window register.
+ * @cpu: CPU from which to read register.
+ * @auto_act_window: Return address.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value read from the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
+{
+ unsigned int exp;
+ u64 val, sig;
+ int ret;
- if (!CPC_SUPPORTED(auto_sel_reg))
- pr_warn_once("Autonomous mode is not unsupported!\n");
+ if (auto_act_window == NULL)
+ return -EINVAL;
- if (CPC_IN_PCC(auto_sel_reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
+ ret = cppc_get_reg_val(cpu, AUTO_ACT_WINDOW, &val);
+ if (ret)
+ return ret;
- if (pcc_ss_id < 0)
- return -ENODEV;
+ sig = val & CPPC_AUTO_ACT_WINDOW_MAX_SIG;
+ exp = (val >> CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) & CPPC_AUTO_ACT_WINDOW_MAX_EXP;
+ *auto_act_window = sig * int_pow(10, exp);
- pcc_ss_data = pcc_data[pcc_ss_id];
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_get_auto_act_window);
- down_write(&pcc_ss_data->pcc_lock);
+/**
+ * cppc_set_auto_act_window() - Write autonomous activity window register.
+ * @cpu: CPU on which to write register.
+ * @auto_act_window: usec value to write to the autonomous activity window register.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value to write to the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
+{
+ /* The max value to store is 1270000000 */
+ u64 max_val = CPPC_AUTO_ACT_WINDOW_MAX_SIG * int_pow(10, CPPC_AUTO_ACT_WINDOW_MAX_EXP);
+ int exp = 0;
+ u64 val;
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
- cpc_read(cpunum, auto_sel_reg, &auto_sel);
- perf_caps->auto_sel = (bool)auto_sel;
- } else {
- ret = -EIO;
- }
+ if (auto_act_window > max_val)
+ return -EINVAL;
- up_write(&pcc_ss_data->pcc_lock);
+ /*
+ * The max significand is 127, when auto_act_window is larger than
+ * 129, discard the precision of the last digit and increase the
+ * exponent by 1.
+ */
+ while (auto_act_window > CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH) {
+ auto_act_window /= 10;
+ exp += 1;
+ }
+
+ /* For 128 and 129, cut it to 127. */
+ if (auto_act_window > CPPC_AUTO_ACT_WINDOW_MAX_SIG)
+ auto_act_window = CPPC_AUTO_ACT_WINDOW_MAX_SIG;
+
+ val = (exp << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) + auto_act_window;
+ return cppc_set_reg_val(cpu, AUTO_ACT_WINDOW, val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_auto_act_window);
+
+/**
+ * cppc_get_auto_sel() - Read autonomous selection register.
+ * @cpu: CPU from which to read register.
+ * @enable: Return address.
+ */
+int cppc_get_auto_sel(int cpu, bool *enable)
+{
+ u64 auto_sel;
+ int ret;
+
+ if (enable == NULL)
+ return -EINVAL;
+
+ ret = cppc_get_reg_val(cpu, AUTO_SEL_ENABLE, &auto_sel);
+ if (ret)
return ret;
- }
+
+ *enable = (bool)auto_sel;
return 0;
}
-EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
+EXPORT_SYMBOL_GPL(cppc_get_auto_sel);
/**
* cppc_set_auto_sel - Write autonomous selection register.
@@ -1590,43 +1714,7 @@ EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
*/
int cppc_set_auto_sel(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *auto_sel_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -ENODEV;
- }
-
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
-
- if (CPC_IN_PCC(auto_sel_reg)) {
- if (pcc_ss_id < 0) {
- pr_debug("Invalid pcc_ss_id\n");
- return -ENODEV;
- }
-
- if (CPC_SUPPORTED(auto_sel_reg)) {
- ret = cpc_write(cpu, auto_sel_reg, enable);
- if (ret)
- return ret;
- }
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platform */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- } else {
- ret = -ENOTSUPP;
- pr_debug("_CPC in PCC is not supported\n");
- }
-
- return ret;
+ return cppc_set_reg_val(cpu, AUTO_SEL_ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
@@ -1640,38 +1728,7 @@ EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
*/
int cppc_set_enable(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *enable_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -EINVAL;
- }
-
- enable_reg = &cpc_desc->cpc_regs[ENABLE];
-
- if (CPC_IN_PCC(enable_reg)) {
-
- if (pcc_ss_id < 0)
- return -EIO;
-
- ret = cpc_write(cpu, enable_reg, enable);
- if (ret)
- return ret;
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platfrom */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- return ret;
- }
-
- return cpc_write(cpu, enable_reg, enable);
+ return cppc_set_reg_val(cpu, ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_enable);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 3c5f34892734..6f4203716b53 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2329,6 +2329,12 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
}
},
+ {
+ // TUXEDO InfinityBook Pro AMD Gen9
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 0a725e46d017..53816dfab645 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/memblock.h>
+#include <linux/memory.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
@@ -429,13 +430,23 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
{
struct acpi_cedt_cfmws *cfmws;
int *fake_pxm = arg;
- u64 start, end;
+ u64 start, end, align;
int node;
+ int err;
cfmws = (struct acpi_cedt_cfmws *)header;
start = cfmws->base_hpa;
end = cfmws->base_hpa + cfmws->window_size;
+ /* Align memblock size to CFMW regions if possible */
+ align = 1UL << __ffs(start | end);
+ if (align >= SZ_256M) {
+ err = memory_block_advise_max_size(align);
+ if (err)
+ pr_warn("CFMWS: memblock size advise failed (%d)\n", err);
+ } else
+ pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n");
+
/*
* The SRAT may have already described NUMA details for all,
* or a portion of, this CFMWS HPA range. Extend the memblks
@@ -453,7 +464,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
return -EINVAL;
}
- if (numa_add_memblk(node, start, end) < 0) {
+ if (numa_add_reserved_memblk(node, start, end) < 0) {
/* CXL driver must handle the NUMA_NO_NODE case */
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
node, start, end);
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index df9328c850bd..f2c943b934be 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -42,7 +42,6 @@ static struct acpi_osi_entry
osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Module Device", true},
{"Processor Device", true},
- {"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true},
};
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d0b6a024daae..74ade4160314 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -858,7 +858,7 @@ next:
}
}
-static void acpi_pci_root_remap_iospace(struct fwnode_handle *fwnode,
+static void acpi_pci_root_remap_iospace(const struct fwnode_handle *fwnode,
struct resource_entry *entry)
{
#ifdef PCI_IOBASE
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index ffbfd32f4cf1..b43f4459a4f6 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -688,6 +688,9 @@ static int __init platform_profile_init(void)
{
int err;
+ if (acpi_disabled)
+ return -EOPNOTSUPP;
+
err = class_register(&platform_profile_class);
if (err)
return err;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b181f7fc2090..e2febca2ec13 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -461,10 +461,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
- unsigned int i;
int result;
-
/* NOTE: the idle thread may not be running while calling
* this function */
@@ -481,17 +479,7 @@ static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
acpi_processor_get_power_info_default(pr);
pr->power.count = acpi_processor_power_verify(pr);
-
- /*
- * if one state of type C2 or C3 is available, mark this
- * CPU as being "idle manageable"
- */
- for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
- if (pr->power.states[i].valid) {
- pr->power.count = i;
- pr->flags.power = 1;
- }
- }
+ pr->flags.power = 1;
return 0;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 53996f1a2d80..64b8d1e19594 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -20,6 +20,7 @@
#include <acpi/processor.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
+#include <asm/msr.h>
#endif
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 00d045e5f524..d1541a386fbc 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -18,9 +18,12 @@
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/acpi.h>
+#include <linux/uaccess.h>
#include <acpi/processor.h>
#include <asm/io.h>
-#include <linux/uaccess.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
/* ignore_tpc:
* 0 -> acpi processor driver doesn't ignore _TPC values
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 14c7bac4100b..7d59c6c9185f 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -534,7 +534,7 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
*/
static const struct dmi_system_id irq1_edge_low_force_override[] = {
{
- /* MECHREV Jiaolong17KS Series GM7XG0M */
+ /* MECHREVO Jiaolong17KS Series GM7XG0M */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"),
},
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 2295abbecd14..fa9bb8c8ce95 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -396,7 +396,7 @@ static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
}
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
+static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst = {
ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT,
ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT,
ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT,
@@ -719,8 +719,12 @@ int __init acpi_locate_initial_tables(void)
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_warn("Failed to initialize tables, status=0x%x (%s)", status, msg);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 0c874186f8ae..5c2defe55898 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -803,6 +803,12 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_thermal_aml_dependency_fix(tz);
+ /*
+ * Set the cooling mode [_SCP] to active cooling. This needs to happen before
+ * we retrieve the trip point values.
+ */
+ acpi_execute_simple_method(tz->device->handle, "_SCP", ACPI_THERMAL_MODE_ACTIVE);
+
/* Get trip points [_ACi, _PSV, etc.] (required). */
acpi_thermal_get_trip_points(tz);
@@ -814,10 +820,6 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- /* Set the cooling mode [_SCP] to active cooling. */
- acpi_execute_simple_method(tz->device->handle, "_SCP",
- ACPI_THERMAL_MODE_ACTIVE);
-
/* Determine the default polling frequency [_TZP]. */
if (tzp)
tz->polling_frequency = tzp;
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 2aa69a2fba73..c13a20365c2c 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -19,11 +19,11 @@
#define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h>
-#include <linux/fwnode.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
struct viot_iommu {
/* Node offset within the table */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 94c6446604fc..98da8c4eea59 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -187,7 +187,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode_lock(d_inode(root));
/* look it up */
- dentry = lookup_one_len(name, root, name_len);
+ dentry = lookup_noperm(&QSTR(name), root);
if (IS_ERR(dentry)) {
inode_unlock(d_inode(root));
ret = PTR_ERR(dentry);
@@ -487,7 +487,7 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent,
{
struct dentry *dentry;
- dentry = lookup_one_len(name, parent, strlen(name));
+ dentry = lookup_noperm(&QSTR(name), parent);
if (IS_ERR(dentry))
return dentry;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index af0029d30dbe..1037169abb45 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -154,14 +154,6 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
per_cpu(arch_freq_scale, i) = scale;
}
-DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
-
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
-{
- per_cpu(cpu_scale, cpu) = capacity;
-}
-
DEFINE_PER_CPU(unsigned long, hw_pressure);
/**
@@ -207,53 +199,9 @@ void topology_update_hw_pressure(const struct cpumask *cpus,
}
EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
-static ssize_t cpu_capacity_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
-
- return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
-}
-
static void update_topology_flags_workfn(struct work_struct *work);
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
-static DEVICE_ATTR_RO(cpu_capacity);
-
-static int cpu_capacity_sysctl_add(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_create_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int cpu_capacity_sysctl_remove(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int register_cpu_capacity_sysctl(void)
-{
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
- cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
-
- return 0;
-}
-subsys_initcall(register_cpu_capacity_sysctl);
-
static int update_topology;
int topology_update_cpu_topology(void)
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 95717d509ca9..dba7c8e13a53 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -395,6 +395,114 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
}
EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
+static void auxiliary_device_release(struct device *dev)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+
+ kfree(auxdev);
+}
+
+/**
+ * auxiliary_device_create - create a device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = kzalloc(sizeof(*auxdev), GFP_KERNEL);
+ if (!auxdev)
+ return NULL;
+
+ auxdev->id = id;
+ auxdev->name = devname;
+ auxdev->dev.parent = dev;
+ auxdev->dev.platform_data = platform_data;
+ auxdev->dev.release = auxiliary_device_release;
+ device_set_of_node_from_dev(&auxdev->dev, dev);
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret) {
+ kfree(auxdev);
+ return NULL;
+ }
+
+ ret = __auxiliary_device_add(auxdev, modname);
+ if (ret) {
+ /*
+ * It may look odd but auxdev should not be freed here.
+ * auxiliary_device_uninit() calls device_put() which call
+ * the device release function, freeing auxdev.
+ */
+ auxiliary_device_uninit(auxdev);
+ return NULL;
+ }
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_create);
+
+/**
+ * auxiliary_device_destroy - remove an auxiliary device
+ * @auxdev: pointer to the auxdev to be removed
+ *
+ * Helper to remove an auxiliary device created with
+ * auxiliary_device_create()
+ */
+void auxiliary_device_destroy(void *auxdev)
+{
+ struct auxiliary_device *_auxdev = auxdev;
+
+ auxiliary_device_delete(_auxdev);
+ auxiliary_device_uninit(_auxdev);
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_destroy);
+
+/**
+ * __devm_auxiliary_device_create - create a managed device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Device managed helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = auxiliary_device_create(dev, modname, devname, platform_data, id);
+ if (!auxdev)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, auxiliary_device_destroy,
+ auxdev);
+ if (ret)
+ return NULL;
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(__devm_auxiliary_device_create);
+
void __init auxiliary_bus_init(void)
{
WARN_ON(bus_register(&auxiliary_bus_type));
diff --git a/drivers/base/component.c b/drivers/base/component.c
index abe60eb45c55..024ad9471b8a 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -586,7 +586,8 @@ EXPORT_SYMBOL_GPL(component_master_is_bound);
static void component_unbind(struct component *component,
struct aggregate_device *adev, void *data)
{
- WARN_ON(!component->bound);
+ if (WARN_ON(!component->bound))
+ return;
dev_dbg(adev->parent, "unbinding %s component %p (ops %ps)\n",
dev_name(component->dev), component, component->ops);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 50651435577c..7779ab0ca7ce 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -600,6 +600,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(ghostwrite);
+CPU_SHOW_VULN_FALLBACK(old_microcode);
CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
@@ -617,6 +618,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
+static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
@@ -635,6 +637,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
&dev_attr_ghostwrite.attr,
+ &dev_attr_old_microcode.attr,
&dev_attr_indirect_target_selection.attr,
NULL
};
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index d8a733ea5e1a..ff55e1bcfa30 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -759,6 +759,17 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co
}
EXPORT_SYMBOL_GPL(__devm_add_action);
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ return devres_find(dev, devm_action_release, devm_action_match, &devres);
+}
+EXPORT_SYMBOL_GPL(devm_is_action_added);
+
/**
* devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
@@ -976,17 +987,10 @@ EXPORT_SYMBOL_GPL(devm_krealloc);
*/
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
{
- size_t size;
- char *buf;
-
if (!s)
return NULL;
- size = strlen(s) + 1;
- buf = devm_kmalloc(dev, size, gfp);
- if (buf)
- memcpy(buf, s, size);
- return buf;
+ return devm_kmemdup(dev, s, strlen(s) + 1, gfp);
}
EXPORT_SYMBOL_GPL(devm_kstrdup);
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index 407c1d1aad50..9054d346bd7f 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -25,6 +25,7 @@
struct faux_object {
struct faux_device faux_dev;
const struct faux_device_ops *faux_ops;
+ const struct attribute_group **groups;
};
#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev)
@@ -43,10 +44,21 @@ static int faux_probe(struct device *dev)
struct faux_object *faux_obj = to_faux_object(dev);
struct faux_device *faux_dev = &faux_obj->faux_dev;
const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
- int ret = 0;
+ int ret;
- if (faux_ops && faux_ops->probe)
+ if (faux_ops && faux_ops->probe) {
ret = faux_ops->probe(faux_dev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Add groups after the probe succeeds to ensure resources are
+ * initialized correctly
+ */
+ ret = device_add_groups(dev, faux_obj->groups);
+ if (ret && faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
return ret;
}
@@ -57,6 +69,8 @@ static void faux_remove(struct device *dev)
struct faux_device *faux_dev = &faux_obj->faux_dev;
const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+ device_remove_groups(dev, faux_obj->groups);
+
if (faux_ops && faux_ops->remove)
faux_ops->remove(faux_dev);
}
@@ -124,8 +138,9 @@ struct faux_device *faux_device_create_with_groups(const char *name,
if (!faux_obj)
return NULL;
- /* Save off the callbacks so we can use them in the future */
+ /* Save off the callbacks and groups so we can use them in the future */
faux_obj->faux_ops = faux_ops;
+ faux_obj->groups = groups;
/* Initialize the device portion and register it with the driver core */
faux_dev = &faux_obj->faux_dev;
@@ -138,7 +153,6 @@ struct faux_device *faux_device_create_with_groups(const char *name,
else
dev->parent = &faux_bus_root;
dev->bus = &faux_bus_type;
- dev->groups = groups;
dev_set_name(dev, "%s", name);
ret = device_add(dev);
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index a03701674265..752b9a9bea03 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -3,8 +3,7 @@ menu "Firmware loader"
config FW_LOADER
tristate "Firmware loading facility" if EXPERT
- select CRYPTO_HASH if FW_LOADER_DEBUG
- select CRYPTO_SHA256 if FW_LOADER_DEBUG
+ select CRYPTO_LIB_SHA256 if FW_LOADER_DEBUG
default y
help
This enables the firmware loading facility in the kernel. The kernel
@@ -28,7 +27,6 @@ config FW_LOADER
config FW_LOADER_DEBUG
bool "Log filenames and checksums for loaded firmware"
- depends on CRYPTO = FW_LOADER || CRYPTO=y
depends on DYNAMIC_DEBUG
depends on FW_LOADER
default FW_LOADER
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index cb0912ea3e62..44486b2c7172 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -806,41 +806,15 @@ static void fw_abort_batch_reqs(struct firmware *fw)
}
#if defined(CONFIG_FW_LOADER_DEBUG)
-#include <crypto/hash.h>
#include <crypto/sha2.h>
static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device)
{
- struct shash_desc *shash;
- struct crypto_shash *alg;
- u8 *sha256buf;
- char *outbuf;
+ u8 digest[SHA256_DIGEST_SIZE];
- alg = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(alg))
- return;
-
- sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
- outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL);
- shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL);
- if (!sha256buf || !outbuf || !shash)
- goto out_free;
-
- shash->tfm = alg;
-
- if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
- goto out_free;
-
- for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
- sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
- outbuf[SHA256_BLOCK_SIZE] = 0;
- dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
-
-out_free:
- kfree(shash);
- kfree(outbuf);
- kfree(sha256buf);
- crypto_free_shash(alg);
+ sha256(fw->data, fw->size, digest);
+ dev_dbg(device, "Loaded FW: %s, sha256: %*phN\n",
+ name, SHA256_DIGEST_SIZE, digest);
}
#else
static void fw_log_firmware_info(const struct firmware *fw, const char *name,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 19469e7f88c2..ed3e69dc785c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -110,6 +110,57 @@ static void memory_block_release(struct device *dev)
kfree(mem);
}
+
+/* Max block size to be set by memory_block_advise_max_size */
+static unsigned long memory_block_advised_size;
+static bool memory_block_advised_size_queried;
+
+/**
+ * memory_block_advise_max_size() - advise memory hotplug on the max suggested
+ * block size, usually for alignment.
+ * @size: suggestion for maximum block size. must be aligned on power of 2.
+ *
+ * Early boot software (pre-allocator init) may advise archs on the max block
+ * size. This value can only decrease after initialization, as the intent is
+ * to identify the largest supported alignment for all sources.
+ *
+ * Use of this value is arch-defined, as is min/max block size.
+ *
+ * Return: 0 on success
+ * -EINVAL if size is 0 or not pow2 aligned
+ * -EBUSY if value has already been probed
+ */
+int __init memory_block_advise_max_size(unsigned long size)
+{
+ if (!size || !is_power_of_2(size))
+ return -EINVAL;
+
+ if (memory_block_advised_size_queried)
+ return -EBUSY;
+
+ if (memory_block_advised_size)
+ memory_block_advised_size = min(memory_block_advised_size, size);
+ else
+ memory_block_advised_size = size;
+
+ return 0;
+}
+
+/**
+ * memory_block_advised_max_size() - query advised max hotplug block size.
+ *
+ * After the first call, the value can never change. Callers looking for the
+ * actual block size should use memory_block_size_bytes. This interface is
+ * intended for use by arch-init when initializing the hotplug block size.
+ *
+ * Return: advised size in bytes, or 0 if never set.
+ */
+unsigned long memory_block_advised_max_size(void)
+{
+ memory_block_advised_size_queried = true;
+ return memory_block_advised_size;
+}
+
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index cd13ef287011..c19094481630 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memory.h>
+#include <linux/mempolicy.h>
#include <linux/vmstat.h>
#include <linux/notifier.h>
#include <linux/node.h>
@@ -214,6 +215,14 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
break;
}
}
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access == ACCESS_COORDINATE_CPU) {
+ if (mempolicy_set_node_perf(nid, coord)) {
+ pr_info("failed to set mempolicy attrs for node %d\n",
+ nid);
+ }
+ }
}
EXPORT_SYMBOL_GPL(node_set_perf_attrs);
@@ -468,7 +477,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
- nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
+ nid, 0UL,
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
nid, K(sreclaimable +
node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 0e60dd650b5e..70db08f3ac6f 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
void platform_device_msi_free_irqs_all(struct device *dev)
{
msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
+ msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN);
}
EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index cfccf3ff36e7..075ec1d1b73a 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -982,7 +982,7 @@ struct platform_device * __init_or_module __platform_create_bundle(
struct platform_device *pdev;
int error;
- pdev = platform_device_alloc(driver->driver.name, -1);
+ pdev = platform_device_alloc(driver->driver.name, PLATFORM_DEVID_NONE);
if (!pdev) {
error = -ENOMEM;
goto err_out;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c8b0a9e29ed8..19fd55b8ac77 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -63,6 +63,7 @@ static LIST_HEAD(dpm_noirq_list);
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
static const char *pm_verb(int event)
@@ -560,7 +561,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
struct timer_list *timer = &wd->timer;
timer_delete_sync(timer);
- destroy_timer_on_stack(timer);
+ timer_destroy_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
@@ -597,8 +598,11 @@ static bool is_async(struct device *dev)
&& !pm_trace_is_enabled();
}
-static bool dpm_async_fn(struct device *dev, async_func_t func)
+static bool __dpm_async(struct device *dev, async_func_t func)
{
+ if (dev->power.work_in_progress)
+ return true;
+
if (!is_async(dev))
return false;
@@ -611,14 +615,37 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
put_device(dev);
+ return false;
+}
+
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&async_wip_mtx);
+
+ return __dpm_async(dev, func);
+}
+
+static int dpm_async_with_cleanup(struct device *dev, void *fn)
+{
+ guard(mutex)(&async_wip_mtx);
+
+ if (!__dpm_async(dev, fn))
+ dev->power.work_in_progress = false;
+
+ return 0;
+}
+
+static void dpm_async_resume_children(struct device *dev, async_func_t func)
+{
/*
- * async_schedule_dev_nocall() above has returned false, so func() is
- * not running and it is safe to update power.work_in_progress without
- * extra synchronization.
+ * Start processing "async" children of the device unless it's been
+ * started already for them.
+ *
+ * This could have been done for the device's "async" consumers too, but
+ * they either need to wait for their parents or the processing has
+ * already started for them after their parents were processed.
*/
- dev->power.work_in_progress = false;
-
- return false;
+ device_for_each_child(dev, func, dpm_async_with_cleanup);
}
static void dpm_clear_async_state(struct device *dev)
@@ -627,6 +654,13 @@ static void dpm_clear_async_state(struct device *dev)
dev->power.work_in_progress = false;
}
+static bool dpm_root_device(struct device *dev)
+{
+ return !dev->parent;
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie);
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -710,6 +744,8 @@ Out:
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
+
+ dpm_async_resume_children(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
@@ -733,19 +769,20 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
dpm_clear_async_state(dev);
- dpm_async_fn(dev, async_resume_noirq);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_noirq);
}
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- if (!dev->power.work_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_noirq)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -781,6 +818,8 @@ void dpm_resume_noirq(pm_message_t state)
device_wakeup_disarm_wake_irqs();
}
+static void async_resume_early(void *data, async_cookie_t cookie);
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -848,6 +887,8 @@ Out:
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
+
+ dpm_async_resume_children(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
@@ -875,19 +916,20 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
dpm_clear_async_state(dev);
- dpm_async_fn(dev, async_resume_early);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_early);
}
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- if (!dev->power.work_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_early)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -919,6 +961,8 @@ void dpm_resume_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
+static void async_resume(void *data, async_cookie_t cookie);
+
/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
@@ -1018,6 +1062,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
+
+ dpm_async_resume_children(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
@@ -1049,19 +1095,20 @@ void dpm_resume(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
dpm_clear_async_state(dev);
- dpm_async_fn(dev, async_resume);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume);
}
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
- if (!dev->power.work_in_progress) {
+ if (!dpm_async_fn(dev, async_resume)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -1189,6 +1236,41 @@ EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
+static bool dpm_leaf_device(struct device *dev)
+{
+ struct device *child;
+
+ lockdep_assert_held(&dpm_list_mtx);
+
+ child = device_find_any_child(dev);
+ if (child) {
+ put_device(child);
+
+ return false;
+ }
+
+ return true;
+}
+
+static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&dpm_list_mtx);
+
+ /*
+ * If the device is suspended asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by checking
+ * if the device has been deleted already as the parent cannot be
+ * deleted before it.
+ */
+ if (!device_pm_initialized(dev))
+ return;
+
+ /* Start processing the device's parent if it is "async". */
+ if (dev->parent)
+ dpm_async_with_cleanup(dev->parent, func);
+}
+
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
@@ -1226,6 +1308,8 @@ static void dpm_superior_set_must_resume(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend_noirq(void *data, async_cookie_t cookie);
+
/**
* device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1304,7 +1388,13 @@ Skip:
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_parent(dev, async_suspend_noirq);
+
+ return 0;
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
@@ -1318,6 +1408,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
@@ -1327,12 +1418,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_noirq);
+ }
+
while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.prev);
+ dev = to_device(dpm_late_early_list.prev);
list_move(&dev->power.entry, &dpm_noirq_list);
- dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_noirq))
continue;
@@ -1346,8 +1446,14 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (error || async_error) {
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice(&dpm_late_early_list, &dpm_noirq_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
@@ -1400,6 +1506,8 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
spin_unlock_irq(&parent->power.lock);
}
+static void async_suspend_late(void *data, async_cookie_t cookie);
+
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1476,7 +1584,13 @@ Skip:
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_parent(dev, async_suspend_late);
+
+ return 0;
}
static void async_suspend_late(void *data, async_cookie_t cookie)
@@ -1494,6 +1608,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
@@ -1505,12 +1620,21 @@ int dpm_suspend_late(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_late);
+ }
+
while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ dev = to_device(dpm_suspended_list.prev);
list_move(&dev->power.entry, &dpm_late_early_list);
- dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_late))
continue;
@@ -1524,8 +1648,14 @@ int dpm_suspend_late(pm_message_t state)
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (error || async_error) {
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice(&dpm_suspended_list, &dpm_late_early_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
@@ -1614,6 +1744,8 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend(void *data, async_cookie_t cookie);
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@@ -1743,7 +1875,13 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_parent(dev, async_suspend);
+
+ return 0;
}
static void async_suspend(void *data, async_cookie_t cookie)
@@ -1761,6 +1899,7 @@ static void async_suspend(void *data, async_cookie_t cookie)
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
@@ -1774,12 +1913,21 @@ int dpm_suspend(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend);
+ }
+
while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
+ dev = to_device(dpm_prepared_list.prev);
list_move(&dev->power.entry, &dpm_suspended_list);
- dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend))
continue;
@@ -1793,8 +1941,14 @@ int dpm_suspend(pm_message_t state)
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (error || async_error) {
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice(&dpm_prepared_list, &dpm_suspended_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0e127b0329c0..c55a7c70bc1a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1011,7 +1011,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
* If 'expires' is after the current time, we've been called
* too early.
*/
- if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
+ if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -1568,6 +1568,32 @@ out:
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_set_suspended_action(void *data)
+{
+ pm_runtime_set_suspended(data);
+}
+
+/**
+ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_set_active_enabled(struct device *dev)
+{
+ int err;
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
+ if (err)
+ return err;
+
+ return devm_pm_runtime_enable(dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
+
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
@@ -1590,6 +1616,24 @@ int devm_pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+static void pm_runtime_put_noidle_action(void *data)
+{
+ pm_runtime_put_noidle(data);
+}
+
+/**
+ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_get_noresume(struct device *dev)
+{
+ pm_runtime_get_noresume(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index f84018125b46..13b31a3adc77 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -611,15 +611,9 @@ static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
-#ifdef CONFIG_PM_ADVANCED_DEBUG
-#ifdef CONFIG_PM_SLEEP
+#if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP)
&dev_attr_async.attr,
#endif
- &dev_attr_runtime_status.attr,
- &dev_attr_runtime_usage.attr,
- &dev_attr_runtime_active_kids.attr,
- &dev_attr_runtime_enabled.attr,
-#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static const struct attribute_group pm_attr_group = {
@@ -650,13 +644,16 @@ static const struct attribute_group pm_wakeup_attr_group = {
};
static struct attribute *runtime_attrs[] = {
-#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
-#endif
&dev_attr_control.attr,
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
NULL,
};
static const struct attribute_group pm_runtime_attr_group = {
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 63bf914a4d44..f7c96a3bf719 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -77,7 +77,7 @@ static DEFINE_IDA(wakeup_ida);
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
-struct wakeup_source *wakeup_source_create(const char *name)
+static struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
const char *ws_name;
@@ -106,7 +106,6 @@ err_name:
err_ws:
return NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_create);
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
@@ -149,7 +148,7 @@ static void wakeup_source_free(struct wakeup_source *ws)
*
* Use only for wakeup source objects created with wakeup_source_create().
*/
-void wakeup_source_destroy(struct wakeup_source *ws)
+static void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
@@ -158,13 +157,12 @@ void wakeup_source_destroy(struct wakeup_source *ws)
wakeup_source_record(ws);
wakeup_source_free(ws);
}
-EXPORT_SYMBOL_GPL(wakeup_source_destroy);
/**
* wakeup_source_add - Add given object to the list of wakeup sources.
* @ws: Wakeup source object to add to the list.
*/
-void wakeup_source_add(struct wakeup_source *ws)
+static void wakeup_source_add(struct wakeup_source *ws)
{
unsigned long flags;
@@ -179,13 +177,12 @@ void wakeup_source_add(struct wakeup_source *ws)
list_add_rcu(&ws->entry, &wakeup_sources);
raw_spin_unlock_irqrestore(&events_lock, flags);
}
-EXPORT_SYMBOL_GPL(wakeup_source_add);
/**
* wakeup_source_remove - Remove given object from the wakeup sources list.
* @ws: Wakeup source object to remove from the list.
*/
-void wakeup_source_remove(struct wakeup_source *ws)
+static void wakeup_source_remove(struct wakeup_source *ws)
{
unsigned long flags;
@@ -204,7 +201,6 @@ void wakeup_source_remove(struct wakeup_source *ws)
*/
ws->timer.function = NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
@@ -337,7 +333,7 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
- if (pm_suspend_target_state != PM_SUSPEND_ON)
+ if (pm_sleep_transition_in_progress())
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
ws = wakeup_source_register(dev, dev_name(dev));
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
index 6732ed2869f9..3ffd427248e8 100644
--- a/drivers/base/power/wakeup_stats.c
+++ b/drivers/base/power/wakeup_stats.c
@@ -34,6 +34,7 @@ wakeup_attr(active_count);
wakeup_attr(event_count);
wakeup_attr(wakeup_count);
wakeup_attr(expire_count);
+wakeup_attr(relax_count);
static ssize_t active_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -119,6 +120,7 @@ static struct attribute *wakeup_source_attrs[] = {
&dev_attr_event_count.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_expire_count.attr,
+ &dev_attr_relax_count.attr,
&dev_attr_active_time_ms.attr,
&dev_attr_total_time_ms.attr,
&dev_attr_max_time_ms.attr,
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index b1affac70d5d..ffb2ef488298 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,8 +6,6 @@
config REGMAP
bool
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
- select IRQ_DOMAIN if REGMAP_IRQ
- select MDIO_BUS if REGMAP_MDIO
help
Enable support for the Register Map (regmap) access API.
@@ -58,12 +56,14 @@ config REGMAP_W1
config REGMAP_MDIO
tristate
+ select MDIO_BUS
config REGMAP_MMIO
tristate
config REGMAP_IRQ
bool
+ select IRQ_DOMAIN
config REGMAP_RAM
tristate
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f7fcf2de1301..c7650fa434ad 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -34,21 +34,10 @@ static int regcache_defaults_cmp(const void *a, const void *b)
return 0;
}
-static void regcache_defaults_swap(void *a, void *b, int size)
-{
- struct reg_default *x = a;
- struct reg_default *y = b;
- struct reg_default tmp;
-
- tmp = *x;
- *x = *y;
- *y = tmp;
-}
-
void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
{
sort(defaults, ndefaults, sizeof(*defaults),
- regcache_defaults_cmp, regcache_defaults_swap);
+ regcache_defaults_cmp, NULL);
}
EXPORT_SYMBOL_GPL(regcache_sort_defaults);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 6c6869188c31..d1585f073776 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -6,11 +6,13 @@
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/overflow.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -33,6 +35,7 @@ struct regmap_irq_chip_data {
void *status_reg_buf;
unsigned int *main_status_buf;
unsigned int *status_buf;
+ unsigned int *prev_status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
unsigned int *wake_buf;
@@ -193,10 +196,10 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
/* If we've changed our wakeup count propagate it to the parent */
if (d->wake_count < 0)
for (i = d->wake_count; i < 0; i++)
- irq_set_irq_wake(d->irq, 0);
+ disable_irq_wake(d->irq);
else if (d->wake_count > 0)
for (i = 0; i < d->wake_count; i++)
- irq_set_irq_wake(d->irq, 1);
+ enable_irq_wake(d->irq);
d->wake_count = 0;
@@ -332,27 +335,13 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
return ret;
}
-static irqreturn_t regmap_irq_thread(int irq, void *d)
+static int read_irq_data(struct regmap_irq_chip_data *data)
{
- struct regmap_irq_chip_data *data = d;
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
int ret, i;
- bool handled = false;
u32 reg;
- if (chip->handle_pre_irq)
- chip->handle_pre_irq(chip->irq_drv_data);
-
- if (chip->runtime_pm) {
- ret = pm_runtime_get_sync(map->dev);
- if (ret < 0) {
- dev_err(map->dev, "IRQ thread failed to resume: %d\n",
- ret);
- goto exit;
- }
- }
-
/*
* Read only registers with active IRQs if the chip has 'main status
* register'. Else read in the statuses, using a single bulk read if
@@ -379,10 +368,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
reg = data->get_irq_reg(data, chip->main_status, i);
ret = regmap_read(map, reg, &data->main_status_buf[i]);
if (ret) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -398,10 +385,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = read_sub_irq_data(data, b);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -418,9 +403,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_reg_buf,
chip->num_regs);
if (ret != 0) {
- dev_err(map->dev, "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
for (i = 0; i < data->chip->num_regs; i++) {
@@ -436,7 +420,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
break;
default:
BUG();
- goto exit;
+ return -EIO;
}
}
@@ -447,10 +431,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = regmap_read(map, reg, &data->status_buf[i]);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
}
}
@@ -459,6 +441,42 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
for (i = 0; i < data->chip->num_regs; i++)
data->status_buf[i] = ~data->status_buf[i];
+ return 0;
+}
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+ struct regmap_irq_chip_data *data = d;
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ int ret, i;
+ bool handled = false;
+ u32 reg;
+
+ if (chip->handle_pre_irq)
+ chip->handle_pre_irq(chip->irq_drv_data);
+
+ if (chip->runtime_pm) {
+ ret = pm_runtime_get_sync(map->dev);
+ if (ret < 0) {
+ dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = read_irq_data(data);
+ if (ret < 0)
+ goto exit;
+
+ if (chip->status_is_level) {
+ for (i = 0; i < data->chip->num_regs; i++) {
+ unsigned int val = data->status_buf[i];
+
+ data->status_buf[i] ^= data->prev_status_buf[i];
+ data->prev_status_buf[i] = val;
+ }
+ }
+
/*
* Ignore masked IRQs and ack if we need to; we ack early so
* there is no race between handling and acknowledging the
@@ -705,6 +723,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (!d->status_buf)
goto err_alloc;
+ if (chip->status_is_level) {
+ d->prev_status_buf = kcalloc(chip->num_regs, sizeof(*d->prev_status_buf),
+ GFP_KERNEL);
+ if (!d->prev_status_buf)
+ goto err_alloc;
+ }
+
d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
GFP_KERNEL);
if (!d->mask_buf)
@@ -881,6 +906,16 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
}
}
+ /* Store current levels */
+ if (chip->status_is_level) {
+ ret = read_irq_data(d);
+ if (ret < 0)
+ goto err_alloc;
+
+ memcpy(d->prev_status_buf, d->status_buf,
+ array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
+ }
+
ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
if (ret)
goto err_alloc;
@@ -908,6 +943,7 @@ err_alloc:
kfree(d->mask_buf);
kfree(d->main_status_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
for (i = 0; i < chip->num_config_bases; i++)
@@ -985,6 +1021,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
if (d->config_buf) {
for (i = 0; i < d->chip->num_config_bases; i++)
kfree(d->config_buf[i]);
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 5c78fa6ae772..deda7f35a059 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (prop->is_inline)
return -EINVAL;
- if (index * sizeof(*ref) >= prop->length)
+ if ((index + 1) * sizeof(*ref) > prop->length)
return -ENOENT;
ref_array = prop->pointer;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index b962da263eee..8b42df05feff 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -208,3 +208,55 @@ static int __init topology_sysfs_init(void)
}
device_initcall(topology_sysfs_init);
+
+DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
+static ssize_t cpu_capacity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+ return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
+}
+
+static DEVICE_ATTR_RO(cpu_capacity);
+
+static int cpu_capacity_sysctl_add(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_create_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int cpu_capacity_sysctl_remove(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int register_cpu_capacity_sysctl(void)
+{
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
+ cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
+
+ return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 5f90bac6bb09..f021e27644e0 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -26,12 +26,14 @@ static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!bcma_chipco_gpio_in(cc, 1 << gpio);
}
-static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
- int value)
+static int bcma_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct bcma_drv_cc *cc = gpiochip_get_data(chip);
bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+
+ return 0;
}
static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
@@ -184,7 +186,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->request = bcma_gpio_request;
chip->free = bcma_gpio_free;
chip->get = bcma_gpio_get_value;
- chip->set = bcma_gpio_set_value;
+ chip->set_rv = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->parent = bus->dev;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e48b24be45ee..0f70e2374e7f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -407,4 +407,23 @@ config BLKDEV_UBLK_LEGACY_OPCODES
source "drivers/block/rnbd/Kconfig"
+config BLK_DEV_ZONED_LOOP
+ tristate "Zoned loopback device support"
+ depends on BLK_DEV_ZONED
+ help
+ Saying Y here will allow you to use create a zoned block device using
+ regular files for zones (one file per zones). This is useful to test
+ file systems, device mapper and applications that support zoned block
+ devices. To create a zoned loop device, no user utility is needed, a
+ zoned loop device can be created (or re-started) using a command
+ like:
+
+ echo "add id=0,zone_size_mb=256,capacity_mb=16384,conv_zones=11" > \
+ /dev/zloop-control
+
+ See Documentation/admin-guide/blockdev/zoned_loop.rst for usage
+ details.
+
+ If unsure, say N.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1105a2d4fdcb..097707aca725 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -41,5 +41,6 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
+obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 292f127cae0a..b1be6c510372 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -54,32 +54,33 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
/*
* Insert a new page for a given sector, if one does not already exist.
*/
-static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
+static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
+ blk_opf_t opf)
+ __releases(rcu)
+ __acquires(rcu)
{
- pgoff_t idx = sector >> PAGE_SECTORS_SHIFT;
- struct page *page;
- int ret = 0;
-
- page = brd_lookup_page(brd, sector);
- if (page)
- return 0;
+ gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO;
+ struct page *page, *ret;
+ rcu_read_unlock();
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
+ rcu_read_lock();
if (!page)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
xa_lock(&brd->brd_pages);
- ret = __xa_insert(&brd->brd_pages, idx, page, gfp);
- if (!ret)
- brd->brd_nr_pages++;
- xa_unlock(&brd->brd_pages);
-
- if (ret < 0) {
+ ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
+ page, gfp);
+ if (ret) {
+ xa_unlock(&brd->brd_pages);
__free_page(page);
- if (ret == -EBUSY)
- ret = 0;
+ if (xa_is_err(ret))
+ return ERR_PTR(xa_err(ret));
+ return ret;
}
- return ret;
+ brd->brd_nr_pages++;
+ xa_unlock(&brd->brd_pages);
+ return page;
}
/*
@@ -100,143 +101,77 @@ static void brd_free_pages(struct brd_device *brd)
}
/*
- * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
+ * Process a single segment. The segment is capped to not cross page boundaries
+ * in both the bio and the brd backing memory.
*/
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
- gfp_t gfp)
-{
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- int ret;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- ret = brd_insert_page(brd, sector, gfp);
- if (ret)
- return ret;
- if (copy < n) {
- sector += copy >> SECTOR_SHIFT;
- ret = brd_insert_page(brd, sector, gfp);
- }
- return ret;
-}
-
-/*
- * Copy n bytes from src to the brd starting at sector. Does not sleep.
- */
-static void copy_to_brd(struct brd_device *brd, const void *src,
- sector_t sector, size_t n)
+static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
{
+ struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+ sector_t sector = bio->bi_iter.bi_sector;
+ u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
+ blk_opf_t opf = bio->bi_opf;
struct page *page;
- void *dst;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
+ void *kaddr;
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst + offset, src, copy);
- kunmap_atomic(dst);
-
- if (copy < n) {
- src += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(dst);
- }
-}
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
-/*
- * Copy n bytes to dst from the brd starting at sector. Does not sleep.
- */
-static void copy_from_brd(void *dst, struct brd_device *brd,
- sector_t sector, size_t n)
-{
- struct page *page;
- void *src;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
+ rcu_read_lock();
page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src + offset, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
-
- if (copy < n) {
- dst += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
+ if (!page && op_is_write(opf)) {
+ page = brd_insert_page(brd, sector, opf);
+ if (IS_ERR(page))
+ goto out_error;
}
-}
-
-/*
- * Process a single bvec of a bio.
- */
-static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, blk_opf_t opf,
- sector_t sector)
-{
- void *mem;
- int err = 0;
+ kaddr = bvec_kmap_local(&bv);
if (op_is_write(opf)) {
- /*
- * Must use NOIO because we don't want to recurse back into the
- * block or filesystem layers from page reclaim.
- */
- gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
-
- err = copy_to_brd_setup(brd, sector, len, gfp);
- if (err)
- goto out;
- }
-
- mem = kmap_atomic(page);
- if (!op_is_write(opf)) {
- copy_from_brd(mem + off, brd, sector, len);
- flush_dcache_page(page);
+ memcpy_to_page(page, offset, kaddr, bv.bv_len);
} else {
- flush_dcache_page(page);
- copy_to_brd(brd, mem + off, sector, len);
+ if (page)
+ memcpy_from_page(kaddr, page, offset, bv.bv_len);
+ else
+ memset(kaddr, 0, bv.bv_len);
}
- kunmap_atomic(mem);
+ kunmap_local(kaddr);
+ rcu_read_unlock();
+
+ bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ return true;
+
+out_error:
+ rcu_read_unlock();
+ if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+ bio_io_error(bio);
+ return false;
+}
-out:
- return err;
+static void brd_free_one_page(struct rcu_head *head)
+{
+ struct page *page = container_of(head, struct page, rcu_head);
+
+ __free_page(page);
}
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
{
- sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS;
+ sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
+ sector_t aligned_end = round_down(
+ sector + (size >> SECTOR_SHIFT), PAGE_SECTORS);
struct page *page;
- size -= (aligned_sector - sector) * SECTOR_SIZE;
+ if (aligned_end <= aligned_sector)
+ return;
+
xa_lock(&brd->brd_pages);
- while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
+ while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
if (page) {
- __free_page(page);
+ call_rcu(&page->rcu_head, brd_free_one_page);
brd->brd_nr_pages--;
}
aligned_sector += PAGE_SECTORS;
- size -= PAGE_SIZE;
}
xa_unlock(&brd->brd_pages);
}
@@ -244,36 +179,18 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
- sector_t sector = bio->bi_iter.bi_sector;
- struct bio_vec bvec;
- struct bvec_iter iter;
if (unlikely(op_is_discard(bio->bi_opf))) {
- brd_do_discard(brd, sector, bio->bi_iter.bi_size);
+ brd_do_discard(brd, bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
bio_endio(bio);
return;
}
- bio_for_each_segment(bvec, bio, iter) {
- unsigned int len = bvec.bv_len;
- int err;
-
- /* Don't support un-aligned buffer */
- WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
- (len & (SECTOR_SIZE - 1)));
-
- err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- bio->bi_opf, sector);
- if (err) {
- if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
- bio_wouldblock_error(bio);
- return;
- }
- bio_io_error(bio);
+ do {
+ if (!brd_rw_bvec(brd, bio))
return;
- }
- sector += len >> SECTOR_SHIFT;
- }
+ } while (bio->bi_iter.bi_size);
bio_endio(bio);
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 65b96c083b3c..d5cc7bd2875c 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -725,7 +725,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
scmd = blk_mq_rq_to_pdu(rq);
if (cgc->buflen) {
- ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+ ret = blk_rq_map_kern(rq, cgc->buffer, cgc->buflen,
GFP_NOIO);
if (ret)
goto out;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 2ee6e9bd4e28..2df8941a6b14 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -147,12 +147,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
- if (bio_add_page(bio, virt_to_page(data), datalen,
- offset_in_page(data)) != datalen) {
- rnbd_srv_err_rl(sess_dev, "Failed to map data to bio\n");
- err = -EINVAL;
- goto bio_put;
- }
+ bio_add_virt_nofail(bio, data, datalen);
bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
if (bio_has_data(bio) &&
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index dc104c025cd5..6f51072776f1 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -50,6 +50,8 @@
/* private ioctl command mirror */
#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
+#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE)
+#define UBLK_CMD_QUIESCE_DEV _IOC_NR(UBLK_U_CMD_QUIESCE_DEV)
#define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF)
#define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF)
@@ -64,7 +66,10 @@
| UBLK_F_CMD_IOCTL_ENCODE \
| UBLK_F_USER_COPY \
| UBLK_F_ZONED \
- | UBLK_F_USER_RECOVERY_FAIL_IO)
+ | UBLK_F_USER_RECOVERY_FAIL_IO \
+ | UBLK_F_UPDATE_SIZE \
+ | UBLK_F_AUTO_BUF_REG \
+ | UBLK_F_QUIESCE)
#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
| UBLK_F_USER_RECOVERY_REISSUE \
@@ -77,7 +82,11 @@
UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
struct ublk_rq_data {
- struct kref ref;
+ refcount_t ref;
+
+ /* for auto-unregister buffer in case of UBLK_F_AUTO_BUF_REG */
+ u16 buf_index;
+ void *buf_ctx_handle;
};
struct ublk_uring_cmd_pdu {
@@ -99,6 +108,9 @@ struct ublk_uring_cmd_pdu {
* setup in ublk uring_cmd handler
*/
struct ublk_queue *ubq;
+
+ struct ublk_auto_buf_reg buf;
+
u16 tag;
};
@@ -131,6 +143,14 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+/*
+ * request buffer is registered automatically, so we have to unregister it
+ * before completing this request.
+ *
+ * io_uring will unregister buffer automatically for us during exiting.
+ */
+#define UBLK_IO_FLAG_AUTO_BUF_REG 0x10
+
/* atomic RW with ubq->cancel_lock */
#define UBLK_IO_FLAG_CANCELED 0x80000000
@@ -140,7 +160,12 @@ struct ublk_io {
unsigned int flags;
int res;
- struct io_uring_cmd *cmd;
+ union {
+ /* valid if UBLK_IO_FLAG_ACTIVE is set */
+ struct io_uring_cmd *cmd;
+ /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */
+ struct request *req;
+ };
};
struct ublk_queue {
@@ -198,13 +223,19 @@ struct ublk_params_header {
__u32 types;
};
+static void ublk_io_release(void *priv);
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
const struct ublk_queue *ubq, int tag, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag);
+
+static inline struct ublksrv_io_desc *
+ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
+{
+ return &ubq->io_cmd_buf[tag];
+}
+
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
{
return ub->dev_info.flags & UBLK_F_ZONED;
@@ -356,8 +387,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
if (ret)
goto free_req;
- ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
- GFP_KERNEL);
+ ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL);
if (ret)
goto erase_desc;
@@ -477,7 +507,6 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
static inline void __ublk_complete_rq(struct request *req);
-static void ublk_complete_rq(struct kref *ref);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -609,6 +638,11 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
}
+static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_AUTO_BUF_REG;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_COPY;
@@ -616,7 +650,8 @@ static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
{
- return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq);
+ return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
+ !ublk_support_auto_buf_reg(ubq);
}
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
@@ -627,8 +662,13 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
*
* for zero copy, request buffer need to be registered to io_uring
* buffer table, so reference is needed
+ *
+ * For auto buffer register, ublk server still may issue
+ * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up,
+ * so reference is required too.
*/
- return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq);
+ return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) ||
+ ublk_support_auto_buf_reg(ubq);
}
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
@@ -637,7 +677,7 @@ static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
- kref_init(&data->ref);
+ refcount_set(&data->ref, 1);
}
}
@@ -647,7 +687,7 @@ static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
- return kref_get_unless_zero(&data->ref);
+ return refcount_inc_not_zero(&data->ref);
}
return true;
@@ -659,7 +699,8 @@ static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
- kref_put(&data->ref, ublk_complete_rq);
+ if (refcount_dec_and_test(&data->ref))
+ __ublk_complete_rq(req);
} else {
__ublk_complete_rq(req);
}
@@ -695,12 +736,6 @@ static inline bool ublk_rq_has_data(const struct request *rq)
return bio_has_data(rq->bio);
}
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag)
-{
- return &ubq->io_cmd_buf[tag];
-}
-
static inline struct ublksrv_io_desc *
ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
@@ -1117,18 +1152,12 @@ exit:
blk_mq_end_request(req, res);
}
-static void ublk_complete_rq(struct kref *ref)
+static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
+ int res, unsigned issue_flags)
{
- struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
- ref);
- struct request *req = blk_mq_rq_from_pdu(data);
+ /* read cmd first because req will overwrite it */
+ struct io_uring_cmd *cmd = io->cmd;
- __ublk_complete_rq(req);
-}
-
-static void ubq_complete_io_cmd(struct ublk_io *io, int res,
- unsigned issue_flags)
-{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@@ -1138,8 +1167,10 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res,
*/
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+ io->req = req;
+
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -1154,16 +1185,91 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
+static void ublk_auto_buf_reg_fallback(struct request *req)
+{
+ const struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
+ refcount_set(&data->ref, 1);
+}
+
+static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io,
+ unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd);
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ int ret;
+
+ ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
+ pdu->buf.index, issue_flags);
+ if (ret) {
+ if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(req);
+ return true;
+ }
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ return false;
+ }
+ /* one extra reference is dropped by ublk_io_release */
+ refcount_set(&data->ref, 2);
+
+ data->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
+ /* store buffer index in request payload */
+ data->buf_index = pdu->buf.index;
+ io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
+ return true;
+}
+
+static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
+ struct request *req, struct ublk_io *io,
+ unsigned int issue_flags)
+{
+ if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
+ return ublk_auto_buf_reg(req, io, issue_flags);
+
+ ublk_init_req_ref(ubq, req);
+ return true;
+}
+
+static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io)
+{
+ unsigned mapped_bytes = ublk_map_io(ubq, req, io);
+
+ /* partially mapped, update io descriptor */
+ if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
+ /*
+ * Nothing mapped, retry until we succeed.
+ *
+ * We may never succeed in mapping any bytes here because
+ * of OOM. TODO: reserve one buffer with single page pinned
+ * for providing forward progress guarantee.
+ */
+ if (unlikely(!mapped_bytes)) {
+ blk_mq_requeue_request(req, false);
+ blk_mq_delay_kick_requeue_list(req->q,
+ UBLK_REQUEUE_DELAY_MS);
+ return false;
+ }
+
+ ublk_get_iod(ubq, req->tag)->nr_sectors =
+ mapped_bytes >> 9;
+ }
+
+ return true;
+}
+
static void ublk_dispatch_req(struct ublk_queue *ubq,
struct request *req,
unsigned int issue_flags)
{
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
- unsigned int mapped_bytes;
- pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
/*
@@ -1183,54 +1289,22 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
/*
* We have not handled UBLK_IO_NEED_GET_DATA command yet,
- * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
+ * so immediately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
* and notify it.
*/
- if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
- io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
- pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
- __func__, io->cmd->cmd_op, ubq->q_id,
- req->tag, io->flags);
- ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
- return;
- }
- /*
- * We have handled UBLK_IO_NEED_GET_DATA command,
- * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
- * do the copy work.
- */
- io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
- /* update iod->addr because ublksrv may have passed a new io buffer */
- ublk_get_iod(ubq, req->tag)->addr = io->addr;
- pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
- ublk_get_iod(ubq, req->tag)->addr);
+ io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
+ pr_devel("%s: need get data. qid %d tag %d io_flags %x\n",
+ __func__, ubq->q_id, req->tag, io->flags);
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA,
+ issue_flags);
+ return;
}
- mapped_bytes = ublk_map_io(ubq, req, io);
-
- /* partially mapped, update io descriptor */
- if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
- /*
- * Nothing mapped, retry until we succeed.
- *
- * We may never succeed in mapping any bytes here because
- * of OOM. TODO: reserve one buffer with single page pinned
- * for providing forward progress guarantee.
- */
- if (unlikely(!mapped_bytes)) {
- blk_mq_requeue_request(req, false);
- blk_mq_delay_kick_requeue_list(req->q,
- UBLK_REQUEUE_DELAY_MS);
- return;
- }
-
- ublk_get_iod(ubq, req->tag)->nr_sectors =
- mapped_bytes >> 9;
- }
+ if (!ublk_start_io(ubq, req, io))
+ return;
- ublk_init_req_ref(ubq, req);
- ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
+ if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags))
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
}
static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
@@ -1590,30 +1664,6 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
-static void ublk_commit_completion(struct ublk_device *ub,
- const struct ublksrv_io_cmd *ub_cmd)
-{
- u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
- struct ublk_queue *ubq = ublk_get_queue(ub, qid);
- struct ublk_io *io = &ubq->ios[tag];
- struct request *req;
-
- /* now this cmd slot is owned by nbd driver */
- io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
- io->res = ub_cmd->result;
-
- /* find the io request and complete */
- req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
- if (WARN_ON_ONCE(unlikely(!req)))
- return;
-
- if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
-
- if (likely(!blk_should_fake_timeout(req->q)))
- ublk_put_req_ref(ubq, req);
-}
-
static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
struct request *req)
{
@@ -1642,17 +1692,8 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
- struct request *rq;
-
- /*
- * Either we fail the request or ublk_rq_task_work_cb
- * will do it
- */
- rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
- if (rq && blk_mq_request_started(rq))
- __ublk_fail_req(ubq, io, rq);
- }
+ if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ __ublk_fail_req(ubq, io, io->req);
}
}
@@ -1940,6 +1981,20 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
io_uring_cmd_mark_cancelable(cmd, issue_flags);
}
+static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
+
+ if (pdu->buf.reserved0 || pdu->buf.reserved1)
+ return -EINVAL;
+
+ if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ return -EINVAL;
+ return 0;
+}
+
static void ublk_io_release(void *priv)
{
struct request *rq = priv;
@@ -1953,16 +2008,12 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
unsigned int index, unsigned int issue_flags)
{
struct ublk_device *ub = cmd->file->private_data;
- const struct ublk_io *io = &ubq->ios[tag];
struct request *req;
int ret;
if (!ublk_support_zero_copy(ubq))
return -EINVAL;
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- return -EINVAL;
-
req = __ublk_check_and_get_req(ub, ubq, tag, 0);
if (!req)
return -EINVAL;
@@ -1978,17 +2029,12 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
}
static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq, unsigned int tag,
+ const struct ublk_queue *ubq,
unsigned int index, unsigned int issue_flags)
{
- const struct ublk_io *io = &ubq->ios[tag];
-
if (!ublk_support_zero_copy(ubq))
return -EINVAL;
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- return -EINVAL;
-
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
@@ -2031,6 +2077,12 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
goto out;
}
+ if (ublk_support_auto_buf_reg(ubq)) {
+ ret = ublk_set_auto_buf_reg(cmd);
+ if (ret)
+ goto out;
+ }
+
ublk_fill_io_cmd(io, cmd, buf_addr);
ublk_mark_io_ready(ub, ubq);
out:
@@ -2038,6 +2090,90 @@ out:
return ret;
}
+static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
+ struct ublk_io *io, struct io_uring_cmd *cmd,
+ const struct ublksrv_io_cmd *ub_cmd,
+ unsigned int issue_flags)
+{
+ struct request *req = io->req;
+
+ if (ublk_need_map_io(ubq)) {
+ /*
+ * COMMIT_AND_FETCH_REQ has to provide IO buffer if
+ * NEED GET DATA is not enabled or it is Read IO.
+ */
+ if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
+ req_op(req) == REQ_OP_READ))
+ return -EINVAL;
+ } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
+ /*
+ * User copy requires addr to be unset when command is
+ * not zone append
+ */
+ return -EINVAL;
+ }
+
+ if (ublk_support_auto_buf_reg(ubq)) {
+ int ret;
+
+ /*
+ * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
+ * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
+ * `io_ring_ctx`.
+ *
+ * If this uring_cmd's io_ring_ctx isn't same with the
+ * one for registering the buffer, it is ublk server's
+ * responsibility for unregistering the buffer, otherwise
+ * this ublk request gets stuck.
+ */
+ if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ if (data->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
+ io_buffer_unregister_bvec(cmd, data->buf_index,
+ issue_flags);
+ io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
+ }
+
+ ret = ublk_set_auto_buf_reg(cmd);
+ if (ret)
+ return ret;
+ }
+
+ ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
+
+ /* now this cmd slot is owned by ublk driver */
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+ io->res = ub_cmd->result;
+
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = ub_cmd->zone_append_lba;
+
+ if (likely(!blk_should_fake_timeout(req->q)))
+ ublk_put_req_ref(ubq, req);
+
+ return 0;
+}
+
+static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io)
+{
+ struct request *req = io->req;
+
+ /*
+ * We have handled UBLK_IO_NEED_GET_DATA command,
+ * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
+ * do the copy work.
+ */
+ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+
+ return ublk_start_io(ubq, req, io);
+}
+
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
@@ -2048,7 +2184,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
- struct request *req;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
@@ -2058,9 +2193,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
ubq = ublk_get_queue(ub, ub_cmd->q_id);
- if (!ubq || ub_cmd->q_id != ubq->q_id)
- goto out;
-
if (ubq->ubq_daemon && ubq->ubq_daemon != current)
goto out;
@@ -2075,6 +2207,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
}
+ /* only UBLK_IO_FETCH_REQ is allowed if io is not OWNED_BY_SRV */
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) &&
+ _IOC_NR(cmd_op) != UBLK_IO_FETCH_REQ)
+ goto out;
+
/*
* ensure that the user issues UBLK_IO_NEED_GET_DATA
* iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
@@ -2092,45 +2229,23 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_IO_REGISTER_IO_BUF:
return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
case UBLK_IO_UNREGISTER_IO_BUF:
- return ublk_unregister_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
+ return ublk_unregister_io_buf(cmd, ubq, ub_cmd->addr, issue_flags);
case UBLK_IO_FETCH_REQ:
ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
if (ret)
goto out;
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
-
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- goto out;
-
- if (ublk_need_map_io(ubq)) {
- /*
- * COMMIT_AND_FETCH_REQ has to provide IO buffer if
- * NEED GET DATA is not enabled or it is Read IO.
- */
- if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
- req_op(req) == REQ_OP_READ))
- goto out;
- } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
- /*
- * User copy requires addr to be unset when command is
- * not zone append
- */
- ret = -EINVAL;
+ ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags);
+ if (ret)
goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_commit_completion(ub, ub_cmd);
break;
case UBLK_IO_NEED_GET_DATA:
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- goto out;
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
- ublk_dispatch_req(ubq, req, issue_flags);
- return -EIOCBQUEUED;
+ io->addr = ub_cmd->addr;
+ if (!ublk_get_data(ubq, io))
+ return -EIOCBQUEUED;
+
+ return UBLK_IO_RES_OK;
default:
goto out;
}
@@ -2728,6 +2843,11 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
return -EINVAL;
}
+ if ((info.flags & UBLK_F_QUIESCE) && !(info.flags & UBLK_F_USER_RECOVERY)) {
+ pr_warn("UBLK_F_QUIESCE requires UBLK_F_USER_RECOVERY\n");
+ return -EINVAL;
+ }
+
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
@@ -2744,8 +2864,11 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
* For USER_COPY, we depends on userspace to fill request
* buffer by pwrite() to ublk char device, which can't be
* used for unprivileged device
+ *
+ * Same with zero copy or auto buffer register.
*/
- if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
+ if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
return -EINVAL;
}
@@ -2803,7 +2926,8 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
UBLK_F_URING_CMD_COMP_IN_TASK;
/* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
- if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
+ if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
/*
@@ -3106,6 +3230,127 @@ static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header)
return 0;
}
+static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
+{
+ struct ublk_param_basic *p = &ub->params.basic;
+ u64 new_size = header->data[0];
+
+ mutex_lock(&ub->mutex);
+ p->dev_sectors = new_size;
+ set_capacity_and_notify(ub->ub_disk, p->dev_sectors);
+ mutex_unlock(&ub->mutex);
+}
+
+struct count_busy {
+ const struct ublk_queue *ubq;
+ unsigned int nr_busy;
+};
+
+static bool ublk_count_busy_req(struct request *rq, void *data)
+{
+ struct count_busy *idle = data;
+
+ if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq)
+ idle->nr_busy += 1;
+ return true;
+}
+
+/* uring_cmd is guaranteed to be active if the associated request is idle */
+static bool ubq_has_idle_io(const struct ublk_queue *ubq)
+{
+ struct count_busy data = {
+ .ubq = ubq,
+ };
+
+ blk_mq_tagset_busy_iter(&ubq->dev->tag_set, ublk_count_busy_req, &data);
+ return data.nr_busy < ubq->q_depth;
+}
+
+/* Wait until each hw queue has at least one idle IO */
+static int ublk_wait_for_idle_io(struct ublk_device *ub,
+ unsigned int timeout_ms)
+{
+ unsigned int elapsed = 0;
+ int ret;
+
+ while (elapsed < timeout_ms && !signal_pending(current)) {
+ unsigned int queues_cancelable = 0;
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ queues_cancelable += !!ubq_has_idle_io(ubq);
+ }
+
+ /*
+ * Each queue needs at least one active command for
+ * notifying ublk server
+ */
+ if (queues_cancelable == ub->dev_info.nr_hw_queues)
+ break;
+
+ msleep(UBLK_REQUEUE_DELAY_MS);
+ elapsed += UBLK_REQUEUE_DELAY_MS;
+ }
+
+ if (signal_pending(current))
+ ret = -EINTR;
+ else if (elapsed >= timeout_ms)
+ ret = -EBUSY;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
+ const struct ublksrv_ctrl_cmd *header)
+{
+ /* zero means wait forever */
+ u64 timeout_ms = header->data[0];
+ struct gendisk *disk;
+ int i, ret = -ENODEV;
+
+ if (!(ub->dev_info.flags & UBLK_F_QUIESCE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ub->mutex);
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto unlock;
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ goto put_disk;
+
+ ret = 0;
+ /* already in expected state */
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto put_disk;
+
+ /* Mark all queues as canceling */
+ blk_mq_quiesce_queue(disk->queue);
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ ubq->canceling = true;
+ }
+ blk_mq_unquiesce_queue(disk->queue);
+
+ if (!timeout_ms)
+ timeout_ms = UINT_MAX;
+ ret = ublk_wait_for_idle_io(ub, timeout_ms);
+
+put_disk:
+ ublk_put_disk(disk);
+unlock:
+ mutex_unlock(&ub->mutex);
+
+ /* Cancel pending uring_cmd */
+ if (!ret)
+ ublk_cancel_dev(ub);
+ return ret;
+}
+
/*
* All control commands are sent via /dev/ublk-control, so we have to check
* the destination device's permission
@@ -3191,6 +3436,8 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
case UBLK_CMD_SET_PARAMS:
case UBLK_CMD_START_USER_RECOVERY:
case UBLK_CMD_END_USER_RECOVERY:
+ case UBLK_CMD_UPDATE_SIZE:
+ case UBLK_CMD_QUIESCE_DEV:
mask = MAY_READ | MAY_WRITE;
break;
default:
@@ -3282,6 +3529,13 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_CMD_END_USER_RECOVERY:
ret = ublk_ctrl_end_recovery(ub, header);
break;
+ case UBLK_CMD_UPDATE_SIZE:
+ ublk_ctrl_set_size(ub, header);
+ ret = 0;
+ break;
+ case UBLK_CMD_QUIESCE_DEV:
+ ret = ublk_ctrl_quiesce_dev(ub, header);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -3315,6 +3569,7 @@ static int __init ublk_init(void)
BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
+ BUILD_BUG_ON(sizeof(struct ublk_auto_buf_reg) != 8);
init_waitqueue_head(&ublk_idr_wq);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7cffea01d868..30bca8cb7106 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -571,7 +571,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
- err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
+ err = blk_rq_map_kern(req, report_buf, report_len, GFP_KERNEL);
if (err)
goto out;
@@ -817,7 +817,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
- err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ err = blk_rq_map_kern(req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
goto out;
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
new file mode 100644
index 000000000000..553b1a713ab9
--- /dev/null
+++ b/drivers/block/zloop.c
@@ -0,0 +1,1385 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Christoph Hellwig.
+ * Copyright (c) 2025, Western Digital Corporation or its affiliates.
+ *
+ * Zoned Loop Device driver - exports a zoned block device using one file per
+ * zone as backing storage.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blk-mq.h>
+#include <linux/blkzoned.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/falloc.h>
+#include <linux/mutex.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+
+/*
+ * Options for adding (and removing) a device.
+ */
+enum {
+ ZLOOP_OPT_ERR = 0,
+ ZLOOP_OPT_ID = (1 << 0),
+ ZLOOP_OPT_CAPACITY = (1 << 1),
+ ZLOOP_OPT_ZONE_SIZE = (1 << 2),
+ ZLOOP_OPT_ZONE_CAPACITY = (1 << 3),
+ ZLOOP_OPT_NR_CONV_ZONES = (1 << 4),
+ ZLOOP_OPT_BASE_DIR = (1 << 5),
+ ZLOOP_OPT_NR_QUEUES = (1 << 6),
+ ZLOOP_OPT_QUEUE_DEPTH = (1 << 7),
+ ZLOOP_OPT_BUFFERED_IO = (1 << 8),
+};
+
+static const match_table_t zloop_opt_tokens = {
+ { ZLOOP_OPT_ID, "id=%d" },
+ { ZLOOP_OPT_CAPACITY, "capacity_mb=%u" },
+ { ZLOOP_OPT_ZONE_SIZE, "zone_size_mb=%u" },
+ { ZLOOP_OPT_ZONE_CAPACITY, "zone_capacity_mb=%u" },
+ { ZLOOP_OPT_NR_CONV_ZONES, "conv_zones=%u" },
+ { ZLOOP_OPT_BASE_DIR, "base_dir=%s" },
+ { ZLOOP_OPT_NR_QUEUES, "nr_queues=%u" },
+ { ZLOOP_OPT_QUEUE_DEPTH, "queue_depth=%u" },
+ { ZLOOP_OPT_BUFFERED_IO, "buffered_io" },
+ { ZLOOP_OPT_ERR, NULL }
+};
+
+/* Default values for the "add" operation. */
+#define ZLOOP_DEF_ID -1
+#define ZLOOP_DEF_ZONE_SIZE ((256ULL * SZ_1M) >> SECTOR_SHIFT)
+#define ZLOOP_DEF_NR_ZONES 64
+#define ZLOOP_DEF_NR_CONV_ZONES 8
+#define ZLOOP_DEF_BASE_DIR "/var/local/zloop"
+#define ZLOOP_DEF_NR_QUEUES 1
+#define ZLOOP_DEF_QUEUE_DEPTH 128
+#define ZLOOP_DEF_BUFFERED_IO false
+
+/* Arbitrary limit on the zone size (16GB). */
+#define ZLOOP_MAX_ZONE_SIZE_MB 16384
+
+struct zloop_options {
+ unsigned int mask;
+ int id;
+ sector_t capacity;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_conv_zones;
+ char *base_dir;
+ unsigned int nr_queues;
+ unsigned int queue_depth;
+ bool buffered_io;
+};
+
+/*
+ * Device states.
+ */
+enum {
+ Zlo_creating = 0,
+ Zlo_live,
+ Zlo_deleting,
+};
+
+enum zloop_zone_flags {
+ ZLOOP_ZONE_CONV = 0,
+ ZLOOP_ZONE_SEQ_ERROR,
+};
+
+struct zloop_zone {
+ struct file *file;
+
+ unsigned long flags;
+ struct mutex lock;
+ enum blk_zone_cond cond;
+ sector_t start;
+ sector_t wp;
+
+ gfp_t old_gfp_mask;
+};
+
+struct zloop_device {
+ unsigned int id;
+ unsigned int state;
+
+ struct blk_mq_tag_set tag_set;
+ struct gendisk *disk;
+
+ struct workqueue_struct *workqueue;
+ bool buffered_io;
+
+ const char *base_dir;
+ struct file *data_dir;
+
+ unsigned int zone_shift;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_zones;
+ unsigned int nr_conv_zones;
+ unsigned int block_size;
+
+ struct zloop_zone zones[] __counted_by(nr_zones);
+};
+
+struct zloop_cmd {
+ struct work_struct work;
+ atomic_t ref;
+ sector_t sector;
+ sector_t nr_sectors;
+ long ret;
+ struct kiocb iocb;
+ struct bio_vec *bvec;
+};
+
+static DEFINE_IDR(zloop_index_idr);
+static DEFINE_MUTEX(zloop_ctl_mutex);
+
+static unsigned int rq_zone_no(struct request *rq)
+{
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ return blk_rq_pos(rq) >> zlo->zone_shift;
+}
+
+static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ struct kstat stat;
+ sector_t file_sectors;
+ int ret;
+
+ lockdep_assert_held(&zone->lock);
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat (err=%d)\n",
+ zone_no, ret);
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ return ret;
+ }
+
+ file_sectors = stat.size >> SECTOR_SHIFT;
+ if (file_sectors > zlo->zone_capacity) {
+ pr_err("Zone %u file too large (%llu sectors > %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return -EINVAL;
+ }
+
+ if (file_sectors & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone %u file size not aligned to block size %u\n",
+ zone_no, zlo->block_size);
+ return -EINVAL;
+ }
+
+ if (!file_sectors) {
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ } else if (file_sectors == zlo->zone_capacity) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zlo->zone_size;
+ } else {
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ zone->wp = zone->start + file_sectors;
+ }
+
+ return 0;
+}
+
+static int zloop_open_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_CLOSED:
+ case BLK_ZONE_COND_IMP_OPEN:
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ if (zone->wp == zone->start)
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ else
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, 0)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_all_zones(struct zloop_device *zlo)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = zlo->nr_conv_zones; i < zlo->nr_zones; i++) {
+ ret = zloop_reset_zone(zlo, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_FULL)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, zlo->zone_size << SECTOR_SHIFT)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zlo->zone_size;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+
+ unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static void zloop_put_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
+ if (!atomic_dec_and_test(&cmd->ref))
+ return;
+ kfree(cmd->bvec);
+ cmd->bvec = NULL;
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_rw_complete(struct kiocb *iocb, long ret)
+{
+ struct zloop_cmd *cmd = container_of(iocb, struct zloop_cmd, iocb);
+
+ cmd->ret = ret;
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_rw(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = rq_zone_no(rq);
+ sector_t sector = blk_rq_pos(rq);
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
+ bool is_write = req_op(rq) == REQ_OP_WRITE || is_append;
+ int rw = is_write ? ITER_SOURCE : ITER_DEST;
+ struct req_iterator rq_iter;
+ struct zloop_zone *zone;
+ struct iov_iter iter;
+ struct bio_vec tmp;
+ sector_t zone_end;
+ int nr_bvec = 0;
+ int ret;
+
+ atomic_set(&cmd->ref, 2);
+ cmd->sector = sector;
+ cmd->nr_sectors = nr_sectors;
+ cmd->ret = 0;
+
+ /* We should never get an I/O beyond the device capacity. */
+ if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+ zone = &zlo->zones[zone_no];
+ zone_end = zone->start + zlo->zone_capacity;
+
+ /*
+ * The block layer should never send requests that are not fully
+ * contained within the zone.
+ */
+ if (WARN_ON_ONCE(sector + nr_sectors > zone->start + zlo->zone_size)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+ if (ret)
+ goto out;
+ }
+
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
+ mutex_lock(&zone->lock);
+
+ if (is_append) {
+ sector = zone->wp;
+ cmd->sector = sector;
+ }
+
+ /*
+ * Write operations must be aligned to the write pointer and
+ * fully contained within the zone capacity.
+ */
+ if (sector != zone->wp || zone->wp + nr_sectors > zone_end) {
+ pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n",
+ zone_no, sector, zone->wp);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* Implicitly open the target zone. */
+ if (zone->cond == BLK_ZONE_COND_CLOSED ||
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ /*
+ * Advance the write pointer of sequential zones. If the write
+ * fails, the wp position will be corrected when the next I/O
+ * copmpletes.
+ */
+ zone->wp += nr_sectors;
+ if (zone->wp == zone_end)
+ zone->cond = BLK_ZONE_COND_FULL;
+ }
+
+ rq_for_each_bvec(tmp, rq, rq_iter)
+ nr_bvec++;
+
+ if (rq->bio != rq->biotail) {
+ struct bio_vec *bvec;
+
+ cmd->bvec = kmalloc_array(nr_bvec, sizeof(*cmd->bvec), GFP_NOIO);
+ if (!cmd->bvec) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /*
+ * The bios of the request may be started from the middle of
+ * the 'bvec' because of bio splitting, so we can't directly
+ * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
+ * API will take care of all details for us.
+ */
+ bvec = cmd->bvec;
+ rq_for_each_bvec(tmp, rq, rq_iter) {
+ *bvec = tmp;
+ bvec++;
+ }
+ iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq));
+ } else {
+ /*
+ * Same here, this bio may be started from the middle of the
+ * 'bvec' because of bio splitting, so offset from the bvec
+ * must be passed to iov iterator
+ */
+ iov_iter_bvec(&iter, rw,
+ __bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter),
+ nr_bvec, blk_rq_bytes(rq));
+ iter.iov_offset = rq->bio->bi_iter.bi_bvec_done;
+ }
+
+ cmd->iocb.ki_pos = (sector - zone->start) << SECTOR_SHIFT;
+ cmd->iocb.ki_filp = zone->file;
+ cmd->iocb.ki_complete = zloop_rw_complete;
+ if (!zlo->buffered_io)
+ cmd->iocb.ki_flags = IOCB_DIRECT;
+ cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+
+ if (rw == ITER_SOURCE)
+ ret = zone->file->f_op->write_iter(&cmd->iocb, &iter);
+ else
+ ret = zone->file->f_op->read_iter(&cmd->iocb, &iter);
+unlock:
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write)
+ mutex_unlock(&zone->lock);
+out:
+ if (ret != -EIOCBQUEUED)
+ zloop_rw_complete(&cmd->iocb, ret);
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_handle_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ /*
+ * zloop_rw() always executes asynchronously or completes
+ * directly.
+ */
+ zloop_rw(cmd);
+ return;
+ case REQ_OP_FLUSH:
+ /*
+ * Sync the entire FS containing the zone files instead of
+ * walking all files
+ */
+ cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb);
+ break;
+ case REQ_OP_ZONE_RESET:
+ cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_RESET_ALL:
+ cmd->ret = zloop_reset_all_zones(zlo);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ cmd->ret = zloop_finish_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_OPEN:
+ cmd->ret = zloop_open_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ cmd->ret = zloop_close_zone(zlo, rq_zone_no(rq));
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ pr_err("Unsupported operation %d\n", req_op(rq));
+ cmd->ret = -EOPNOTSUPP;
+ break;
+ }
+
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_cmd_workfn(struct work_struct *work)
+{
+ struct zloop_cmd *cmd = container_of(work, struct zloop_cmd, work);
+ int orig_flags = current->flags;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+ zloop_handle_cmd(cmd);
+ current->flags = orig_flags;
+}
+
+static void zloop_complete_rq(struct request *rq)
+{
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = cmd->sector >> zlo->zone_shift;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ blk_status_t sts = BLK_STS_OK;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed read sector %llu, %llu sectors\n",
+ zone_no, cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ /* short read */
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ zero_fill_bio(bio);
+ }
+ break;
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed %swrite sector %llu, %llu sectors\n",
+ zone_no,
+ req_op(rq) == REQ_OP_WRITE ? "" : "append ",
+ cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ pr_err("Zone %u: partial write %ld/%u B\n",
+ zone_no, cmd->ret, blk_rq_bytes(rq));
+ cmd->ret = -EIO;
+ }
+
+ if (cmd->ret < 0 && !test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ /*
+ * A write to a sequential zone file failed: mark the
+ * zone as having an error. This will be corrected and
+ * cleared when the next IO is submitted.
+ */
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ break;
+ }
+ if (req_op(rq) == REQ_OP_ZONE_APPEND)
+ rq->__sector = cmd->sector;
+
+ break;
+ default:
+ break;
+ }
+
+ if (cmd->ret < 0)
+ sts = errno_to_blk_status(cmd->ret);
+ blk_mq_end_request(rq, sts);
+}
+
+static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ if (zlo->state == Zlo_deleting)
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(rq);
+
+ INIT_WORK(&cmd->work, zloop_cmd_workfn);
+ queue_work(zlo->workqueue, &cmd->work);
+
+ return BLK_STS_OK;
+}
+
+static const struct blk_mq_ops zloop_mq_ops = {
+ .queue_rq = zloop_queue_rq,
+ .complete = zloop_complete_rq,
+};
+
+static int zloop_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct zloop_device *zlo = disk->private_data;
+ int ret;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ if (zlo->state != Zlo_live)
+ ret = -ENXIO;
+ mutex_unlock(&zloop_ctl_mutex);
+ return ret;
+}
+
+static int zloop_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct zloop_device *zlo = disk->private_data;
+ struct blk_zone blkz = {};
+ unsigned int first, i;
+ int ret;
+
+ first = disk_zone_no(disk, sector);
+ if (first >= zlo->nr_zones)
+ return 0;
+ nr_zones = min(nr_zones, zlo->nr_zones - first);
+
+ for (i = 0; i < nr_zones; i++) {
+ unsigned int zone_no = first + i;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret) {
+ mutex_unlock(&zone->lock);
+ return ret;
+ }
+ }
+
+ blkz.start = zone->start;
+ blkz.len = zlo->zone_size;
+ blkz.wp = zone->wp;
+ blkz.cond = zone->cond;
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ blkz.type = BLK_ZONE_TYPE_CONVENTIONAL;
+ blkz.capacity = zlo->zone_size;
+ } else {
+ blkz.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ blkz.capacity = zlo->zone_capacity;
+ }
+
+ mutex_unlock(&zone->lock);
+
+ ret = cb(&blkz, i, data);
+ if (ret)
+ return ret;
+ }
+
+ return nr_zones;
+}
+
+static void zloop_free_disk(struct gendisk *disk)
+{
+ struct zloop_device *zlo = disk->private_data;
+ unsigned int i;
+
+ for (i = 0; i < zlo->nr_zones; i++) {
+ struct zloop_zone *zone = &zlo->zones[i];
+
+ mapping_set_gfp_mask(zone->file->f_mapping,
+ zone->old_gfp_mask);
+ fput(zone->file);
+ }
+
+ fput(zlo->data_dir);
+ destroy_workqueue(zlo->workqueue);
+ kfree(zlo->base_dir);
+ kvfree(zlo);
+}
+
+static const struct block_device_operations zloop_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_open,
+ .report_zones = zloop_report_zones,
+ .free_disk = zloop_free_disk,
+};
+
+__printf(3, 4)
+static struct file *zloop_filp_open_fmt(int oflags, umode_t mode,
+ const char *fmt, ...)
+{
+ struct file *file;
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ file = filp_open(p, oflags, mode);
+ kfree(p);
+ return file;
+}
+
+static int zloop_get_block_size(struct zloop_device *zlo,
+ struct zloop_zone *zone)
+{
+ struct block_device *sb_bdev = zone->file->f_mapping->host->i_sb->s_bdev;
+ struct kstat st;
+
+ /*
+ * If the FS block size is lower than or equal to 4K, use that as the
+ * device block size. Otherwise, fallback to the FS direct IO alignment
+ * constraint if that is provided, and to the FS underlying device
+ * physical block size if the direct IO alignment is unknown.
+ */
+ if (file_inode(zone->file)->i_sb->s_blocksize <= SZ_4K)
+ zlo->block_size = file_inode(zone->file)->i_sb->s_blocksize;
+ else if (!vfs_getattr(&zone->file->f_path, &st, STATX_DIOALIGN, 0) &&
+ (st.result_mask & STATX_DIOALIGN))
+ zlo->block_size = st.dio_offset_align;
+ else if (sb_bdev)
+ zlo->block_size = bdev_physical_block_size(sb_bdev);
+ else
+ zlo->block_size = SECTOR_SIZE;
+
+ if (zlo->zone_capacity & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone capacity is not aligned to block size %u\n",
+ zlo->block_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int zloop_init_zone(struct zloop_device *zlo, struct zloop_options *opts,
+ unsigned int zone_no, bool restore)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int oflags = O_RDWR;
+ struct kstat stat;
+ sector_t file_sectors;
+ int ret;
+
+ mutex_init(&zone->lock);
+ zone->start = (sector_t)zone_no << zlo->zone_shift;
+
+ if (!restore)
+ oflags |= O_CREAT;
+
+ if (!opts->buffered_io)
+ oflags |= O_DIRECT;
+
+ if (zone_no < zlo->nr_conv_zones) {
+ /* Conventional zone file. */
+ set_bit(ZLOOP_ZONE_CONV, &zone->flags);
+ zone->cond = BLK_ZONE_COND_NOT_WP;
+ zone->wp = U64_MAX;
+
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/cnv-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat\n", zone_no);
+ return ret;
+ }
+ file_sectors = stat.size >> SECTOR_SHIFT;
+
+ if (restore && file_sectors != zlo->zone_size) {
+ pr_err("Invalid conventional zone %u file size (%llu sectors != %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return ret;
+ }
+
+ ret = vfs_truncate(&zone->file->f_path,
+ zlo->zone_size << SECTOR_SHIFT);
+ if (ret < 0) {
+ pr_err("Failed to truncate zone %u file (err=%d)\n",
+ zone_no, ret);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* Sequential zone file. */
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/seq-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ zloop_get_block_size(zlo, zone);
+
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static bool zloop_dev_exists(struct zloop_device *zlo)
+{
+ struct file *cnv, *seq;
+ bool exists;
+
+ cnv = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, 0);
+ seq = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, 0);
+ exists = !IS_ERR(cnv) || !IS_ERR(seq);
+
+ if (!IS_ERR(cnv))
+ fput(cnv);
+ if (!IS_ERR(seq))
+ fput(seq);
+
+ return exists;
+}
+
+static int zloop_ctl_add(struct zloop_options *opts)
+{
+ struct queue_limits lim = {
+ .max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
+ .max_hw_zone_append_sectors = SZ_1M >> SECTOR_SHIFT,
+ .chunk_sectors = opts->zone_size,
+ .features = BLK_FEAT_ZONED,
+ };
+ unsigned int nr_zones, i, j;
+ struct zloop_device *zlo;
+ int ret = -EINVAL;
+ bool restore;
+
+ __module_get(THIS_MODULE);
+
+ nr_zones = opts->capacity >> ilog2(opts->zone_size);
+ if (opts->nr_conv_zones >= nr_zones) {
+ pr_err("Invalid number of conventional zones %u\n",
+ opts->nr_conv_zones);
+ goto out;
+ }
+
+ zlo = kvzalloc(struct_size(zlo, zones, nr_zones), GFP_KERNEL);
+ if (!zlo) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ zlo->state = Zlo_creating;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ goto out_free_dev;
+
+ /* Allocate id, if @opts->id >= 0, we're requesting that specific id */
+ if (opts->id >= 0) {
+ ret = idr_alloc(&zloop_index_idr, zlo,
+ opts->id, opts->id + 1, GFP_KERNEL);
+ if (ret == -ENOSPC)
+ ret = -EEXIST;
+ } else {
+ ret = idr_alloc(&zloop_index_idr, zlo, 0, 0, GFP_KERNEL);
+ }
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret < 0)
+ goto out_free_dev;
+
+ zlo->id = ret;
+ zlo->zone_shift = ilog2(opts->zone_size);
+ zlo->zone_size = opts->zone_size;
+ if (opts->zone_capacity)
+ zlo->zone_capacity = opts->zone_capacity;
+ else
+ zlo->zone_capacity = zlo->zone_size;
+ zlo->nr_zones = nr_zones;
+ zlo->nr_conv_zones = opts->nr_conv_zones;
+ zlo->buffered_io = opts->buffered_io;
+
+ zlo->workqueue = alloc_workqueue("zloop%d", WQ_UNBOUND | WQ_FREEZABLE,
+ opts->nr_queues * opts->queue_depth, zlo->id);
+ if (!zlo->workqueue) {
+ ret = -ENOMEM;
+ goto out_free_idr;
+ }
+
+ if (opts->base_dir)
+ zlo->base_dir = kstrdup(opts->base_dir, GFP_KERNEL);
+ else
+ zlo->base_dir = kstrdup(ZLOOP_DEF_BASE_DIR, GFP_KERNEL);
+ if (!zlo->base_dir) {
+ ret = -ENOMEM;
+ goto out_destroy_workqueue;
+ }
+
+ zlo->data_dir = zloop_filp_open_fmt(O_RDONLY | O_DIRECTORY, 0, "%s/%u",
+ zlo->base_dir, zlo->id);
+ if (IS_ERR(zlo->data_dir)) {
+ ret = PTR_ERR(zlo->data_dir);
+ pr_warn("Failed to open directory %s/%u (err=%d)\n",
+ zlo->base_dir, zlo->id, ret);
+ goto out_free_base_dir;
+ }
+
+ /*
+ * If we already have zone files, we are restoring a device created by a
+ * previous add operation. In this case, zloop_init_zone() will check
+ * that the zone files are consistent with the zone configuration given.
+ */
+ restore = zloop_dev_exists(zlo);
+ for (i = 0; i < nr_zones; i++) {
+ ret = zloop_init_zone(zlo, opts, i, restore);
+ if (ret)
+ goto out_close_files;
+ }
+
+ lim.physical_block_size = zlo->block_size;
+ lim.logical_block_size = zlo->block_size;
+
+ zlo->tag_set.ops = &zloop_mq_ops;
+ zlo->tag_set.nr_hw_queues = opts->nr_queues;
+ zlo->tag_set.queue_depth = opts->queue_depth;
+ zlo->tag_set.numa_node = NUMA_NO_NODE;
+ zlo->tag_set.cmd_size = sizeof(struct zloop_cmd);
+ zlo->tag_set.driver_data = zlo;
+
+ ret = blk_mq_alloc_tag_set(&zlo->tag_set);
+ if (ret) {
+ pr_err("blk_mq_alloc_tag_set failed (err=%d)\n", ret);
+ goto out_close_files;
+ }
+
+ zlo->disk = blk_mq_alloc_disk(&zlo->tag_set, &lim, zlo);
+ if (IS_ERR(zlo->disk)) {
+ pr_err("blk_mq_alloc_disk failed (err=%d)\n", ret);
+ ret = PTR_ERR(zlo->disk);
+ goto out_cleanup_tags;
+ }
+ zlo->disk->flags = GENHD_FL_NO_PART;
+ zlo->disk->fops = &zloop_fops;
+ zlo->disk->private_data = zlo;
+ sprintf(zlo->disk->disk_name, "zloop%d", zlo->id);
+ set_capacity(zlo->disk, (u64)lim.chunk_sectors * zlo->nr_zones);
+
+ ret = blk_revalidate_disk_zones(zlo->disk);
+ if (ret)
+ goto out_cleanup_disk;
+
+ ret = add_disk(zlo->disk);
+ if (ret) {
+ pr_err("add_disk failed (err=%d)\n", ret);
+ goto out_cleanup_disk;
+ }
+
+ mutex_lock(&zloop_ctl_mutex);
+ zlo->state = Zlo_live;
+ mutex_unlock(&zloop_ctl_mutex);
+
+ pr_info("Added device %d: %u zones of %llu MB, %u B block size\n",
+ zlo->id, zlo->nr_zones,
+ ((sector_t)zlo->zone_size << SECTOR_SHIFT) >> 20,
+ zlo->block_size);
+
+ return 0;
+
+out_cleanup_disk:
+ put_disk(zlo->disk);
+out_cleanup_tags:
+ blk_mq_free_tag_set(&zlo->tag_set);
+out_close_files:
+ for (j = 0; j < i; j++) {
+ struct zloop_zone *zone = &zlo->zones[j];
+
+ if (!IS_ERR_OR_NULL(zone->file))
+ fput(zone->file);
+ }
+ fput(zlo->data_dir);
+out_free_base_dir:
+ kfree(zlo->base_dir);
+out_destroy_workqueue:
+ destroy_workqueue(zlo->workqueue);
+out_free_idr:
+ mutex_lock(&zloop_ctl_mutex);
+ idr_remove(&zloop_index_idr, zlo->id);
+ mutex_unlock(&zloop_ctl_mutex);
+out_free_dev:
+ kvfree(zlo);
+out:
+ module_put(THIS_MODULE);
+ if (ret == -ENOENT)
+ ret = -EINVAL;
+ return ret;
+}
+
+static int zloop_ctl_remove(struct zloop_options *opts)
+{
+ struct zloop_device *zlo;
+ int ret;
+
+ if (!(opts->mask & ZLOOP_OPT_ID)) {
+ pr_err("No ID specified\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ zlo = idr_find(&zloop_index_idr, opts->id);
+ if (!zlo || zlo->state == Zlo_creating) {
+ ret = -ENODEV;
+ } else if (zlo->state == Zlo_deleting) {
+ ret = -EINVAL;
+ } else {
+ idr_remove(&zloop_index_idr, zlo->id);
+ zlo->state = Zlo_deleting;
+ }
+
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ del_gendisk(zlo->disk);
+ put_disk(zlo->disk);
+ blk_mq_free_tag_set(&zlo->tag_set);
+
+ pr_info("Removed device %d\n", opts->id);
+
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static int zloop_parse_options(struct zloop_options *opts, const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ unsigned int token;
+ int ret = 0;
+
+ /* Set defaults. */
+ opts->mask = 0;
+ opts->id = ZLOOP_DEF_ID;
+ opts->capacity = ZLOOP_DEF_ZONE_SIZE * ZLOOP_DEF_NR_ZONES;
+ opts->zone_size = ZLOOP_DEF_ZONE_SIZE;
+ opts->nr_conv_zones = ZLOOP_DEF_NR_CONV_ZONES;
+ opts->nr_queues = ZLOOP_DEF_NR_QUEUES;
+ opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH;
+ opts->buffered_io = ZLOOP_DEF_BUFFERED_IO;
+
+ if (!buf)
+ return 0;
+
+ /* Skip leading spaces before the options. */
+ while (isspace(*buf))
+ buf++;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ /* Parse the options, doing only some light invalid value checks. */
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, zloop_opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case ZLOOP_OPT_ID:
+ if (match_int(args, &opts->id)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case ZLOOP_OPT_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_SIZE:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token || token > ZLOOP_MAX_ZONE_SIZE_MB ||
+ !is_power_of_2(token)) {
+ pr_err("Invalid zone size %u\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_size =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid zone capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_NR_CONV_ZONES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_conv_zones = token;
+ break;
+ case ZLOOP_OPT_BASE_DIR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->base_dir);
+ opts->base_dir = p;
+ break;
+ case ZLOOP_OPT_NR_QUEUES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid number of queues\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_queues = min(token, num_online_cpus());
+ break;
+ case ZLOOP_OPT_QUEUE_DEPTH:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid queue depth\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->queue_depth = token;
+ break;
+ case ZLOOP_OPT_BUFFERED_IO:
+ opts->buffered_io = true;
+ break;
+ case ZLOOP_OPT_ERR:
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ret = -EINVAL;
+ if (opts->capacity <= opts->zone_size) {
+ pr_err("Invalid capacity\n");
+ goto out;
+ }
+
+ if (opts->zone_capacity > opts->zone_size) {
+ pr_err("Invalid zone capacity\n");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(options);
+ return ret;
+}
+
+enum {
+ ZLOOP_CTL_ADD,
+ ZLOOP_CTL_REMOVE,
+};
+
+static struct zloop_ctl_op {
+ int code;
+ const char *name;
+} zloop_ctl_ops[] = {
+ { ZLOOP_CTL_ADD, "add" },
+ { ZLOOP_CTL_REMOVE, "remove" },
+ { -1, NULL },
+};
+
+static ssize_t zloop_ctl_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct zloop_options opts = { };
+ struct zloop_ctl_op *op;
+ const char *buf, *opts_buf;
+ int i, ret;
+
+ if (count > PAGE_SIZE)
+ return -ENOMEM;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ for (i = 0; i < ARRAY_SIZE(zloop_ctl_ops); i++) {
+ op = &zloop_ctl_ops[i];
+ if (!op->name) {
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!strncmp(buf, op->name, strlen(op->name)))
+ break;
+ }
+
+ if (count <= strlen(op->name))
+ opts_buf = NULL;
+ else
+ opts_buf = buf + strlen(op->name);
+
+ ret = zloop_parse_options(&opts, opts_buf);
+ if (ret) {
+ pr_err("Failed to parse options\n");
+ goto out;
+ }
+
+ switch (op->code) {
+ case ZLOOP_CTL_ADD:
+ ret = zloop_ctl_add(&opts);
+ break;
+ case ZLOOP_CTL_REMOVE:
+ ret = zloop_ctl_remove(&opts);
+ break;
+ default:
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(opts.base_dir);
+ kfree(buf);
+ return ret ? ret : count;
+}
+
+static int zloop_ctl_show(struct seq_file *seq_file, void *private)
+{
+ const struct match_token *tok;
+ int i;
+
+ /* Add operation */
+ seq_printf(seq_file, "%s ", zloop_ctl_ops[0].name);
+ for (i = 0; i < ARRAY_SIZE(zloop_opt_tokens); i++) {
+ tok = &zloop_opt_tokens[i];
+ if (!tok->pattern)
+ break;
+ if (i)
+ seq_putc(seq_file, ',');
+ seq_puts(seq_file, tok->pattern);
+ }
+ seq_putc(seq_file, '\n');
+
+ /* Remove operation */
+ seq_puts(seq_file, zloop_ctl_ops[1].name);
+ seq_puts(seq_file, " id=%d\n");
+
+ return 0;
+}
+
+static int zloop_ctl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return single_open(file, zloop_ctl_show, NULL);
+}
+
+static int zloop_ctl_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations zloop_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_ctl_open,
+ .release = zloop_ctl_release,
+ .write = zloop_ctl_write,
+ .read = seq_read,
+};
+
+static struct miscdevice zloop_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "zloop-control",
+ .fops = &zloop_ctl_fops,
+};
+
+static int __init zloop_init(void)
+{
+ int ret;
+
+ ret = misc_register(&zloop_misc);
+ if (ret) {
+ pr_err("Failed to register misc device: %d\n", ret);
+ return ret;
+ }
+ pr_info("Module loaded\n");
+
+ return 0;
+}
+
+static void __exit zloop_exit(void)
+{
+ misc_deregister(&zloop_misc);
+ idr_destroy(&zloop_index_idr);
+}
+
+module_init(zloop_init);
+module_exit(zloop_exit);
+
+MODULE_DESCRIPTION("Zoned loopback device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fda7d8624889..94e6e9b80bf0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -734,114 +734,19 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
submit_bio(bio);
}
-#define PAGE_WB_SIG "page_index="
-
-#define PAGE_WRITEBACK 0
-#define HUGE_WRITEBACK (1<<0)
-#define IDLE_WRITEBACK (1<<1)
-#define INCOMPRESSIBLE_WRITEBACK (1<<2)
-
-static int scan_slots_for_writeback(struct zram *zram, u32 mode,
- unsigned long nr_pages,
- unsigned long index,
- struct zram_pp_ctl *ctl)
+static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
{
- for (; nr_pages != 0; index++, nr_pages--) {
- bool ok = true;
-
- zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index))
- goto next;
-
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME))
- goto next;
-
- if (mode & IDLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
- if (mode & HUGE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
- goto next;
- if (mode & INCOMPRESSIBLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- goto next;
-
- ok = place_pp_slot(zram, ctl, index);
-next:
- zram_slot_unlock(zram, index);
- if (!ok)
- break;
- }
-
- return 0;
-}
-
-static ssize_t writeback_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
- struct zram_pp_ctl *ctl = NULL;
+ unsigned long blk_idx = 0;
+ struct page *page = NULL;
struct zram_pp_slot *pps;
- unsigned long index = 0;
- struct bio bio;
struct bio_vec bio_vec;
- struct page *page = NULL;
- ssize_t ret = len;
- int mode, err;
- unsigned long blk_idx = 0;
-
- if (sysfs_streq(buf, "idle"))
- mode = IDLE_WRITEBACK;
- else if (sysfs_streq(buf, "huge"))
- mode = HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "huge_idle"))
- mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "incompressible"))
- mode = INCOMPRESSIBLE_WRITEBACK;
- else {
- if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
- return -EINVAL;
-
- if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
- index >= nr_pages)
- return -EINVAL;
-
- nr_pages = 1;
- mode = PAGE_WRITEBACK;
- }
-
- down_read(&zram->init_lock);
- if (!init_done(zram)) {
- ret = -EINVAL;
- goto release_init_lock;
- }
-
- /* Do not permit concurrent post-processing actions. */
- if (atomic_xchg(&zram->pp_in_progress, 1)) {
- up_read(&zram->init_lock);
- return -EAGAIN;
- }
-
- if (!zram->backing_dev) {
- ret = -ENODEV;
- goto release_init_lock;
- }
+ struct bio bio;
+ int ret = 0, err;
+ u32 index;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
-
- ctl = init_pp_ctl();
- if (!ctl) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
-
- scan_slots_for_writeback(zram, mode, nr_pages, index, ctl);
+ if (!page)
+ return -ENOMEM;
while ((pps = select_pp_slot(ctl))) {
spin_lock(&zram->wb_limit_lock);
@@ -929,10 +834,215 @@ next:
if (blk_idx)
free_block_bdev(zram, blk_idx);
-
-release_init_lock:
if (page)
__free_page(page);
+
+ return ret;
+}
+
+#define PAGE_WRITEBACK 0
+#define HUGE_WRITEBACK (1 << 0)
+#define IDLE_WRITEBACK (1 << 1)
+#define INCOMPRESSIBLE_WRITEBACK (1 << 2)
+
+static int parse_page_index(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ int ret;
+
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+ *hi = *lo + 1;
+ return 0;
+}
+
+static int parse_page_indexes(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ char *delim;
+ int ret;
+
+ delim = strchr(val, '-');
+ if (!delim)
+ return -EINVAL;
+
+ *delim = 0x00;
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+
+ ret = kstrtoul(delim + 1, 10, hi);
+ if (ret)
+ return ret;
+ if (*hi >= nr_pages || *lo > *hi)
+ return -ERANGE;
+ *hi += 1;
+ return 0;
+}
+
+static int parse_mode(char *val, u32 *mode)
+{
+ *mode = 0;
+
+ if (!strcmp(val, "idle"))
+ *mode = IDLE_WRITEBACK;
+ if (!strcmp(val, "huge"))
+ *mode = HUGE_WRITEBACK;
+ if (!strcmp(val, "huge_idle"))
+ *mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
+ if (!strcmp(val, "incompressible"))
+ *mode = INCOMPRESSIBLE_WRITEBACK;
+
+ if (*mode == 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int scan_slots_for_writeback(struct zram *zram, u32 mode,
+ unsigned long lo, unsigned long hi,
+ struct zram_pp_ctl *ctl)
+{
+ u32 index = lo;
+
+ while (index < hi) {
+ bool ok = true;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME))
+ goto next;
+
+ if (mode & IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+ if (mode & HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+ if (mode & INCOMPRESSIBLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
+next:
+ zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
+ index++;
+ }
+
+ return 0;
+}
+
+static ssize_t writeback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long lo = 0, hi = nr_pages;
+ struct zram_pp_ctl *ctl = NULL;
+ char *args, *param, *val;
+ ssize_t ret = len;
+ int err, mode = 0;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
+ if (!zram->backing_dev) {
+ ret = -ENODEV;
+ goto release_init_lock;
+ }
+
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ /*
+ * Workaround to support the old writeback interface.
+ *
+ * The old writeback interface has a minor inconsistency and
+ * requires key=value only for page_index parameter, while the
+ * writeback mode is a valueless parameter.
+ *
+ * This is not the case anymore and now all parameters are
+ * required to have values, however, we need to support the
+ * legacy writeback interface format so we check if we can
+ * recognize a valueless parameter as the (legacy) writeback
+ * mode.
+ */
+ if (!val || !*val) {
+ err = parse_mode(param, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "type")) {
+ err = parse_mode(val, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "page_index")) {
+ err = parse_page_index(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+
+ if (!strcmp(param, "page_indexes")) {
+ err = parse_page_indexes(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+ }
+
+ err = zram_writeback_slots(zram, ctl);
+ if (err)
+ ret = err;
+
+release_init_lock:
release_pp_ctl(zram, ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
@@ -1694,7 +1804,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
*/
handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
@@ -1761,7 +1871,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle);
@@ -1981,10 +2091,15 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
* We are holding per-CPU stream mutex and entry lock so better
* avoid direct reclaim. Allocation error is not fatal since
* we still have the old object in the mem_pool.
+ *
+ * XXX: technically, the node we really want here is the node that holds
+ * the original compressed data. But that would require us to modify
+ * zsmalloc API to return this information. For now, we will make do with
+ * the node of the page allocated for recompression.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle_new)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 7771edf54fb3..4ab32abf0f48 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -56,18 +56,6 @@ config BT_HCIBTUSB_POLL_SYNC
Say Y here to enable USB poll_sync for Bluetooth USB devices by
default.
-config BT_HCIBTUSB_AUTO_ISOC_ALT
- bool "Automatically adjust alternate setting for Isoc endpoints"
- depends on BT_HCIBTUSB
- default y if CHROME_PLATFORMS
- help
- Say Y here to automatically adjusting the alternate setting for
- HCI_USER_CHANNEL whenever a SCO link is established.
-
- When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets
- and configures isoc endpoint alternate setting automatically when
- HCI_USER_CHANNEL is in use.
-
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 48e2f400957b..55cc1652bfe4 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -2719,7 +2719,7 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
} __packed data;
efi_status_t status;
- unsigned long data_size = 0;
+ unsigned long data_size = sizeof(data);
efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03,
0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31);
@@ -2730,15 +2730,9 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
return -EOPNOTSUPP;
status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
- NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL || !data_size)
- return -EIO;
-
- status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
&data);
- if (status != EFI_SUCCESS)
+ if (status != EFI_SUCCESS || data_size != sizeof(data))
return -ENXIO;
*dsbr_var = data.dsbr;
@@ -3688,7 +3682,7 @@ int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name)
}
EXPORT_SYMBOL_GPL(btintel_configure_setup);
-int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
+static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
{
struct intel_tlv *tlv = (void *)&skb->data[5];
@@ -3716,7 +3710,6 @@ int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
recv_frame:
return hci_recv_frame(hdev, skb);
}
-EXPORT_SYMBOL_GPL(btintel_diagnostics);
int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 2aece3effa4e..1d12c4113c66 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -277,7 +277,6 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
int btintel_shutdown_combined(struct hci_dev *hdev);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
void btintel_print_fseq_info(struct hci_dev *hdev);
-int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -411,9 +410,4 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
static inline void btintel_print_fseq_info(struct hci_dev *hdev)
{
}
-
-static inline int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
-{
- return -EOPNOTSUPP;
-}
#endif
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 0a759ea26fd3..50fe17f1e1d1 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -208,6 +208,96 @@ static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
memcpy(buf->data, skb->data, tfd->size);
}
+static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ u16 cr_hia, cr_tia;
+ u32 reg, mbox_reg;
+ struct sk_buff *skb;
+ u8 buf[80];
+
+ skb = alloc_skb(1024, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+ snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+ data->boot_stage_cache = reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
+ snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
+ snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
+ snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ /*Read the Mail box status and registers*/
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
+ snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_1_REG);
+ snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_2_REG);
+ snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_3_REG);
+ snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_4_REG);
+ snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+ snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
+ snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ snprintf(buf, sizeof(buf), "--------------------------------");
+ bt_dev_dbg(hdev, "%s", buf);
+
+ hci_recv_diag(hdev, skb);
+}
+
static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
struct sk_buff *skb)
{
@@ -237,8 +327,11 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
/* Wait for the complete interrupt - URBD0 */
ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
- if (!ret)
+ if (!ret) {
+ bt_dev_err(data->hdev, "tx completion timeout");
+ btintel_pcie_dump_debug_registers(data->hdev);
return -ETIME;
+ }
return 0;
}
@@ -756,6 +849,26 @@ static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
return 0;
}
+static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
+ (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
+}
+
+static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
+ (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
+}
+
+static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
+{
+ bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
+ btintel_pcie_dump_debug_registers(data->hdev);
+}
+
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/
@@ -779,6 +892,18 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
if (reg != data->img_resp_cache)
data->img_resp_cache = reg;
+ if (btintel_pcie_in_error(data)) {
+ bt_dev_err(data->hdev, "Controller in error state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
+ if (btintel_pcie_in_lockdown(data)) {
+ bt_dev_err(data->hdev, "Controller in lockdown state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
data->gp0_received = true;
old_ctxt = data->alive_intr_ctxt;
@@ -889,7 +1014,6 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *)skb->data;
- const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
@@ -945,15 +1069,6 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
}
}
- /* Handle all diagnostics events separately. May still call
- * hci_recv_frame.
- */
- if (len >= sizeof(diagnostics_hdr) &&
- memcmp(&skb->data[2], diagnostics_hdr,
- sizeof(diagnostics_hdr)) == 0) {
- return btintel_diagnostics(hdev, skb);
- }
-
/* This is a debug event that comes from IML and OP image when it
* starts execution. There is no need pass this event to stack.
*/
@@ -1343,6 +1458,9 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
btintel_pcie_msix_hw_exp_handler(data);
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
+ btintel_pcie_msix_gp1_handler(data);
+
/* This interrupt is triggered by the firmware after updating
* boot_stage register and image_response register
*/
@@ -2028,6 +2146,7 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
bt_dev_err(hdev, "Firmware download retry count: %d",
fw_dl_retry);
+ btintel_pcie_dump_debug_registers(hdev);
err = btintel_pcie_reset_bt(data);
if (err) {
bt_dev_err(hdev, "Failed to do shr reset: %d", err);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 873178019cad..21b964b15c1c 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -12,10 +12,17 @@
#define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028)
#define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C)
#define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108)
+#define BTINTEL_PCIE_CSR_IPC_CONTROL_REG (BTINTEL_PCIE_CSR_BASE + 0x10C)
+#define BTINTEL_PCIE_CSR_IPC_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x110)
#define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG (BTINTEL_PCIE_CSR_BASE + 0x114)
#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
+#define BTINTEL_PCIE_CSR_MBOX_1_REG (BTINTEL_PCIE_CSR_BASE + 0x170)
+#define BTINTEL_PCIE_CSR_MBOX_2_REG (BTINTEL_PCIE_CSR_BASE + 0x174)
+#define BTINTEL_PCIE_CSR_MBOX_3_REG (BTINTEL_PCIE_CSR_BASE + 0x178)
+#define BTINTEL_PCIE_CSR_MBOX_4_REG (BTINTEL_PCIE_CSR_BASE + 0x17C)
+#define BTINTEL_PCIE_CSR_MBOX_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x180)
#define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440)
#define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458)
#define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460)
@@ -41,6 +48,9 @@
#define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR (BIT(12))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER (BIT(13))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED (BIT(14))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY (BIT(24))
@@ -89,6 +99,7 @@ enum msix_fh_int_causes {
/* Causes for the HW register interrupts */
enum msix_hw_int_causes {
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1 = BIT(1), /* cause 33 */
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */
};
@@ -121,6 +132,14 @@ enum btintel_pcie_tlv_type {
BTINTEL_FW_BUILD,
};
+/* causes for the MBOX interrupts */
+enum msix_mbox_int_causes {
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1 = BIT(0), /* cause MBOX1 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2 = BIT(1), /* cause MBOX2 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3 = BIT(2), /* cause MBOX3 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4 = BIT(3), /* cause MBOX4 */
+};
+
#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
/* Minimum and Maximum number of MSI-X Vector
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 07cd308f7abf..93932a0d8625 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -100,7 +100,9 @@ static int btmrvl_sdio_probe_of(struct device *dev,
}
/* Configure wakeup (enabled by default) */
- device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
}
}
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 1d26207b2ba7..c16a3518b8ff 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1414,7 +1414,7 @@ static int btmtksdio_probe(struct sdio_func *func,
*/
pm_runtime_put_noidle(bdev->dev);
- err = device_init_wakeup(bdev->dev, true);
+ err = devm_device_init_wakeup(bdev->dev);
if (err)
bt_dev_err(hdev, "failed to initialize device wakeup");
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 604ab2bba231..b34623a69b8a 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -17,6 +17,7 @@
#include <linux/crc32.h>
#include <linux/string_helpers.h>
#include <linux/gpio/consumer.h>
+#include <linux/of_irq.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -143,7 +144,9 @@ struct ps_data {
bool driver_sent_cmd;
u16 h2c_ps_interval;
u16 c2h_ps_interval;
+ bool wakeup_source;
struct gpio_desc *h2c_ps_gpio;
+ s32 irq_handler;
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
@@ -476,12 +479,21 @@ static void ps_timeout_func(struct timer_list *t)
}
}
+static irqreturn_t ps_host_wakeup_irq_handler(int irq, void *priv)
+{
+ struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)priv;
+
+ bt_dev_dbg(nxpdev->hdev, "Host wakeup interrupt");
+ return IRQ_HANDLED;
+}
static int ps_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct serdev_device *serdev = nxpdev->serdev;
struct ps_data *psdata = &nxpdev->psdata;
+ int ret;
+ /* Out-Of-Band Device Wakeup */
psdata->h2c_ps_gpio = devm_gpiod_get_optional(&serdev->dev, "device-wakeup",
GPIOD_OUT_LOW);
if (IS_ERR(psdata->h2c_ps_gpio)) {
@@ -493,11 +505,37 @@ static int ps_setup(struct hci_dev *hdev)
if (device_property_read_u8(&serdev->dev, "nxp,wakein-pin", &psdata->h2c_wakeup_gpio)) {
psdata->h2c_wakeup_gpio = 0xff; /* 0xff: use default pin/gpio */
} else if (!psdata->h2c_ps_gpio) {
- bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup GPIO");
+ bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup-gpios");
psdata->h2c_wakeup_gpio = 0xff;
}
- device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio);
+ /* Out-Of-Band Host Wakeup */
+ if (of_property_read_bool(serdev->dev.of_node, "wakeup-source")) {
+ psdata->irq_handler = of_irq_get_byname(serdev->dev.of_node, "wakeup");
+ bt_dev_info(nxpdev->hdev, "irq_handler: %d", psdata->irq_handler);
+ if (psdata->irq_handler > 0)
+ psdata->wakeup_source = true;
+ }
+
+ if (device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio)) {
+ psdata->c2h_wakeup_gpio = 0xff;
+ if (psdata->wakeup_source) {
+ bt_dev_warn(hdev, "host wakeup interrupt without nxp,wakeout-pin");
+ psdata->wakeup_source = false;
+ }
+ } else if (!psdata->wakeup_source) {
+ bt_dev_warn(hdev, "nxp,wakeout-pin property without host wakeup interrupt");
+ psdata->c2h_wakeup_gpio = 0xff;
+ }
+
+ if (psdata->wakeup_source) {
+ ret = devm_request_irq(&serdev->dev, psdata->irq_handler,
+ ps_host_wakeup_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ dev_name(&serdev->dev), nxpdev);
+ disable_irq(psdata->irq_handler);
+ device_init_wakeup(&serdev->dev, true);
+ }
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
@@ -637,12 +675,10 @@ static void ps_init(struct hci_dev *hdev)
psdata->ps_state = PS_STATE_AWAKE;
- if (psdata->c2h_wakeup_gpio) {
+ if (psdata->c2h_wakeup_gpio != 0xff)
psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_GPIO;
- } else {
+ else
psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
- psdata->c2h_wakeup_gpio = 0xff;
- }
psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
if (psdata->h2c_ps_gpio)
@@ -1821,6 +1857,11 @@ static int nxp_serdev_suspend(struct device *dev)
struct ps_data *psdata = &nxpdev->psdata;
ps_control(psdata->hdev, PS_STATE_SLEEP);
+
+ if (psdata->wakeup_source) {
+ enable_irq_wake(psdata->irq_handler);
+ enable_irq(psdata->irq_handler);
+ }
return 0;
}
@@ -1829,6 +1870,11 @@ static int nxp_serdev_resume(struct device *dev)
struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
struct ps_data *psdata = &nxpdev->psdata;
+ if (psdata->wakeup_source) {
+ disable_irq(psdata->irq_handler);
+ disable_irq_wake(psdata->irq_handler);
+ }
+
ps_control(psdata->hdev, PS_STATE_AWAKE);
return 0;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 256b451bbe06..9ab661d2d1e6 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,6 +21,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_drv.h>
#include "btintel.h"
#include "btbcm.h"
@@ -34,7 +35,6 @@ static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC);
static bool reset = true;
-static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT);
static struct usb_driver btusb_driver;
@@ -513,6 +513,7 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* Realtek 8851BE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb850), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
/* Realtek 8852AE Bluetooth devices */
@@ -678,6 +679,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3584), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
@@ -716,8 +719,12 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -1118,42 +1125,6 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags);
}
-static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb)
-{
- struct hci_event_hdr *hdr = (void *) skb->data;
- struct hci_ev_sync_conn_complete *ev =
- (void *) skb->data + sizeof(*hdr);
- struct hci_dev *hdev = data->hdev;
- unsigned int notify_air_mode;
-
- if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
- return;
-
- if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE)
- return;
-
- if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status)
- return;
-
- switch (ev->air_mode) {
- case BT_CODEC_CVSD:
- notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD;
- break;
-
- case BT_CODEC_TRANSPARENT:
- notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP;
- break;
-
- default:
- return;
- }
-
- bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode);
- data->sco_num = 1;
- data->air_mode = notify_air_mode;
- schedule_work(&data->work);
-}
-
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
@@ -1161,10 +1132,6 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
schedule_delayed_work(&data->rx_work, 0);
}
- /* Configure altsetting for HCI_USER_CHANNEL on SCO connected */
- if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL))
- btusb_sco_connected(data, skb);
-
return data->recv_event(data->hdev, skb);
}
@@ -3753,31 +3720,133 @@ static const struct file_operations force_poll_sync_fops = {
.llseek = default_llseek,
};
-static ssize_t isoc_alt_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+#define BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0000)
+#define BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE 0
+struct btusb_hci_drv_rp_supported_altsettings {
+ __u8 num;
+ __u8 altsettings[];
+} __packed;
+
+#define BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0001)
+#define BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE 1
+struct btusb_hci_drv_cmd_switch_altsetting {
+ __u8 altsetting;
+} __packed;
+
+static const struct {
+ u16 opcode;
+ const char *desc;
+} btusb_hci_drv_supported_commands[] = {
+ /* Common commands */
+ { HCI_DRV_OP_READ_INFO, "Read Info" },
+
+ /* Driver specific commands */
+ { BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS, "Supported Altsettings" },
+ { BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING, "Switch Altsetting" },
+};
+static int btusb_hci_drv_read_info(struct hci_dev *hdev, void *data,
+ u16 data_len)
{
- struct btusb_data *data = dev_get_drvdata(dev);
+ struct hci_drv_rp_read_info *rp;
+ size_t rp_size;
+ int err, i;
+ u16 opcode, num_supported_commands =
+ ARRAY_SIZE(btusb_hci_drv_supported_commands);
+
+ rp_size = sizeof(*rp) + num_supported_commands * 2;
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ strscpy_pad(rp->driver_name, btusb_driver.name);
- return sysfs_emit(buf, "%d\n", data->isoc_altsetting);
+ rp->num_supported_commands = cpu_to_le16(num_supported_commands);
+ for (i = 0; i < num_supported_commands; i++) {
+ opcode = btusb_hci_drv_supported_commands[i].opcode;
+ bt_dev_info(hdev,
+ "Supported HCI Drv command (0x%02x|0x%04x): %s",
+ hci_opcode_ogf(opcode),
+ hci_opcode_ocf(opcode),
+ btusb_hci_drv_supported_commands[i].desc);
+ rp->supported_commands[i] = cpu_to_le16(opcode);
+ }
+
+ err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+
+ kfree(rp);
+ return err;
}
-static ssize_t isoc_alt_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int btusb_hci_drv_supported_altsettings(struct hci_dev *hdev, void *data,
+ u16 data_len)
{
- struct btusb_data *data = dev_get_drvdata(dev);
- int alt;
- int ret;
+ struct btusb_data *drvdata = hci_get_drvdata(hdev);
+ struct btusb_hci_drv_rp_supported_altsettings *rp;
+ size_t rp_size;
+ int err;
+ u8 i;
+
+ /* There are at most 7 alt (0 - 6) */
+ rp = kmalloc(sizeof(*rp) + 7, GFP_KERNEL);
+
+ rp->num = 0;
+ if (!drvdata->isoc)
+ goto done;
+
+ for (i = 0; i <= 6; i++) {
+ if (btusb_find_altsetting(drvdata, i))
+ rp->altsettings[rp->num++] = i;
+ }
+
+done:
+ rp_size = sizeof(*rp) + rp->num;
+
+ err = hci_drv_cmd_complete(hdev, BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+ kfree(rp);
+ return err;
+}
- if (kstrtoint(buf, 10, &alt))
- return -EINVAL;
+static int btusb_hci_drv_switch_altsetting(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct btusb_hci_drv_cmd_switch_altsetting *cmd = data;
+ u8 status;
- ret = btusb_switch_alt_setting(data->hdev, alt);
- return ret < 0 ? ret : count;
+ if (cmd->altsetting > 6) {
+ status = HCI_DRV_STATUS_INVALID_PARAMETERS;
+ } else {
+ if (btusb_switch_alt_setting(hdev, cmd->altsetting))
+ status = HCI_DRV_STATUS_UNSPECIFIED_ERROR;
+ else
+ status = HCI_DRV_STATUS_SUCCESS;
+ }
+
+ return hci_drv_cmd_status(hdev, BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING,
+ status);
}
-static DEVICE_ATTR_RW(isoc_alt);
+static const struct hci_drv_handler btusb_hci_drv_common_handlers[] = {
+ { btusb_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
+};
+
+static const struct hci_drv_handler btusb_hci_drv_specific_handlers[] = {
+ { btusb_hci_drv_supported_altsettings,
+ BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE },
+ { btusb_hci_drv_switch_altsetting,
+ BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE },
+};
+
+static struct hci_drv btusb_hci_drv = {
+ .common_handler_count = ARRAY_SIZE(btusb_hci_drv_common_handlers),
+ .common_handlers = btusb_hci_drv_common_handlers,
+ .specific_handler_count = ARRAY_SIZE(btusb_hci_drv_specific_handlers),
+ .specific_handlers = btusb_hci_drv_specific_handlers,
+};
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -3918,12 +3987,13 @@ static int btusb_probe(struct usb_interface *intf,
data->reset_gpio = reset_gpio;
}
- hdev->open = btusb_open;
- hdev->close = btusb_close;
- hdev->flush = btusb_flush;
- hdev->send = btusb_send_frame;
- hdev->notify = btusb_notify;
- hdev->wakeup = btusb_wakeup;
+ hdev->open = btusb_open;
+ hdev->close = btusb_close;
+ hdev->flush = btusb_flush;
+ hdev->send = btusb_send_frame;
+ hdev->notify = btusb_notify;
+ hdev->wakeup = btusb_wakeup;
+ hdev->hci_drv = &btusb_hci_drv;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -4142,10 +4212,6 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc, data);
if (err < 0)
goto out_free_dev;
-
- err = device_create_file(&intf->dev, &dev_attr_isoc_alt);
- if (err)
- goto out_free_dev;
}
if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) {
@@ -4192,10 +4258,8 @@ static void btusb_disconnect(struct usb_interface *intf)
hdev = data->hdev;
usb_set_intfdata(data->intf, NULL);
- if (data->isoc) {
- device_remove_file(&intf->dev, &dev_attr_isoc_alt);
+ if (data->isoc)
usb_set_intfdata(data->isoc, NULL);
- }
if (data->diag)
usb_set_intfdata(data->diag, NULL);
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
index dc9541e76d81..1394c575aa6d 100644
--- a/drivers/bluetooth/hci_aml.c
+++ b/drivers/bluetooth/hci_aml.c
@@ -313,8 +313,7 @@ static int aml_download_firmware(struct hci_dev *hdev, const char *fw_name)
goto exit;
exit:
- if (firmware)
- release_firmware(firmware);
+ release_firmware(firmware);
return ret;
}
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index ee29162da4ee..91ef99c42344 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -395,10 +395,7 @@ static struct attribute *gisb_arb_sysfs_attrs[] = {
&dev_attr_gisb_arb_timeout.attr,
NULL,
};
-
-static struct attribute_group gisb_arb_sysfs_attr_group = {
- .attrs = gisb_arb_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(gisb_arb_sysfs);
static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
@@ -490,10 +487,6 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
- if (err)
- return err;
-
platform_set_drvdata(pdev, gdev);
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
@@ -550,6 +543,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .dev_groups = gisb_arb_sysfs_groups,
},
};
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 52053f7c6d9a..c63a7e688db6 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -806,8 +806,6 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev)
dev_set_msi_domain(&mc_dev->dev, NULL);
}
- fsl_mc_cleanup_all_resource_pools(mc_dev);
-
/* if this step fails we cannot go further with cleanup as there is no way of
* communicating with the firmware
*/
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index dd1b5c0fb7e2..38d40c09b719 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -489,7 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
cmd_params->obj_id = cpu_to_le32(obj_id);
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -561,7 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index;
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index 6c3beb82dd1b..d2ea59471323 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -555,27 +555,6 @@ void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
}
}
-static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
- enum fsl_mc_pool_type pool_type)
-{
- struct fsl_mc_resource *resource;
- struct fsl_mc_resource *next;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
-
- list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
- devm_kfree(&mc_bus_dev->dev, resource);
-}
-
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
- fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
-}
-
/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index a8be8cf246fb..7671bd158545 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -139,9 +139,9 @@ static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *e
static int fsl_mc_dma_configure(struct device *dev)
{
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
int ret;
@@ -153,8 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev)
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
- /* @mc_drv may not be valid when we're called from the IOMMU layer */
- if (!ret && dev->driver && !mc_drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_fsl_mc_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -906,8 +906,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
error_cleanup_dev:
kfree(mc_dev->regions);
- kfree(mc_bus);
- kfree(mc_dev);
+ if (mc_bus)
+ kfree(mc_bus);
+ else
+ kfree(mc_dev);
return error;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index e1b7ec3ed1a7..beed4c53533d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -629,8 +629,6 @@ int __init fsl_mc_allocator_driver_init(void);
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-
int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_resource
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
index 9c4c1395fcdb..823969e4159c 100644
--- a/drivers/bus/fsl-mc/fsl-mc-uapi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -48,6 +48,7 @@ enum fsl_mc_cmd_index {
DPRC_GET_POOL,
DPRC_GET_POOL_COUNT,
DPRC_GET_CONNECTION,
+ DPRC_GET_MEM,
DPCI_GET_LINK_STATE,
DPCI_GET_PEER_ATTR,
DPAIOP_GET_SL_VERSION,
@@ -194,6 +195,12 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.token = true,
.size = 32,
},
+ [DPRC_GET_MEM] = {
+ .cmdid_value = 0x16D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
[DPCI_GET_LINK_STATE] = {
.cmdid_value = 0x0E10,
@@ -275,13 +282,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.size = 8,
},
[DPSW_GET_TAILDROP] = {
- .cmdid_value = 0x0A80,
+ .cmdid_value = 0x0A90,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 14,
},
[DPSW_SET_TAILDROP] = {
- .cmdid_value = 0x0A90,
+ .cmdid_value = 0x0A80,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 24,
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index a0ad7866cbfc..cd8754763f40 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
goto error_cleanup_resource;
- dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
- &dpmcp_dev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER);
- if (!dpmcp_dev->consumer_link) {
- error = -EINVAL;
- goto error_cleanup_mc_io;
+ /* If the DPRC device itself tries to allocate a portal (usually for
+ * UAPI interaction), don't add a device link between them since the
+ * DPMCP device is an actual child device of the DPRC and a reverse
+ * dependency is not allowed.
+ */
+ if (mc_dev != mc_bus_dev) {
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
}
*new_mc_io = mc_io;
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index f2052cd0a051..b22c59d57c8f 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -19,7 +19,7 @@
/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
/*
* usleep_range() min and max values used to throttle down polling
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 1e57ebfb7622..6c3e5c5dae10 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -737,9 +737,9 @@ static int moxtet_irq_setup(struct moxtet *moxtet)
{
int i, ret;
- moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
- MOXTET_NIRQS, 0,
- &moxtet_irq_domain, moxtet);
+ moxtet->irq.domain = irq_domain_create_simple(of_fwnode_handle(moxtet->dev->of_node),
+ MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
if (moxtet->irq.domain == NULL) {
dev_err(moxtet->dev, "Could not add IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f67b927ae4ca..9f624e5da991 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -677,51 +677,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
return 0;
}
-/* Interconnect instances to probe before l4_per instances */
-static struct resource early_bus_ranges[] = {
- /* am3/4 l4_wkup */
- { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
- /* omap4/5 and dra7 l4_cfg */
- { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
- /* omap4 l4_wkup */
- { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
- /* omap5 and dra7 l4_wkup without dra7 dcan segment */
- { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
-};
-
-static atomic_t sysc_defer = ATOMIC_INIT(10);
-
-/**
- * sysc_defer_non_critical - defer non_critical interconnect probing
- * @ddata: device driver data
- *
- * We want to probe l4_cfg and l4_wkup interconnect instances before any
- * l4_per instances as l4_per instances depend on resources on l4_cfg and
- * l4_wkup interconnects.
- */
-static int sysc_defer_non_critical(struct sysc *ddata)
-{
- struct resource *res;
- int i;
-
- if (!atomic_read(&sysc_defer))
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
- res = &early_bus_ranges[i];
- if (ddata->module_pa >= res->start &&
- ddata->module_pa <= res->end) {
- atomic_set(&sysc_defer, 0);
-
- return 0;
- }
- }
-
- atomic_dec_if_positive(&sysc_defer);
-
- return -EPROBE_DEFER;
-}
-
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
@@ -947,10 +902,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
if (error)
return error;
- error = sysc_defer_non_critical(ddata);
- if (error)
- return error;
-
sysc_check_children(ddata);
if (!of_property_present(np, "reg"))
@@ -2036,6 +1987,21 @@ static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
}
+static void sysc_module_enable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /*
+ * Clearing the SYSC_PRUSS_STANDBY_INIT bit - Updates OCP master
+ * port configuration to enable memory access outside of the
+ * PRU-ICSS subsystem.
+ */
+ reg &= (~SYSC_PRUSS_STANDBY_INIT);
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
@@ -2088,8 +2054,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_pruss;
ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 6874b72ec59d..e1a283805ea7 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -118,6 +118,8 @@ static void ccache_config_read(void)
}
static const struct of_device_id sifive_ccache_ids[] = {
+ { .compatible = "eswin,eic7700-l3-cache",
+ .data = (void *)(QUIRK_NONSTANDARD_CACHE_OPS) },
{ .compatible = "sifive,fu540-c000-ccache" },
{ .compatible = "sifive,fu740-c000-ccache" },
{ .compatible = "starfive,jh7100-ccache",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index b163e043c687..21a10552da61 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3677,8 +3677,7 @@ static void cdrom_sysctl_register(void)
static void cdrom_sysctl_unregister(void)
{
- if (cdrom_sysctl_header)
- unregister_sysctl_table(cdrom_sysctl_header);
+ unregister_sysctl_table(cdrom_sysctl_header);
}
#else /* CONFIG_SYSCTL */
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 8e41731d3642..bf490967241a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -16,7 +16,7 @@
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
#include <asm/e820/api.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/gart.h>
#include "agp.h"
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index ef30445527a2..bcc26785175d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -53,6 +53,7 @@ struct intel_gtt_driver {
* of the mmio register file, that's done in the generic code. */
void (*cleanup)(void);
void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local);
/* Flags is a more or less chipset specific opaque value.
* For chipsets that need to support old ums (non-gem) code, this
* needs to be identical to the various supported agp memory types! */
@@ -336,6 +337,19 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i810_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = val & I810_PTE_LOCAL;
+
+ return val & ~0xfff;
+}
+
static resource_size_t intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
@@ -741,6 +755,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i830_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return val & ~0xfff;
+}
+
bool intel_gmch_enable_gtt(void)
{
u8 __iomem *reg;
@@ -878,6 +905,13 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
}
EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local)
+{
+ return intel_private.driver->read_entry(pg, is_present, is_local);
+}
+EXPORT_SYMBOL(intel_gmch_gtt_read_entry);
+
#if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
@@ -1126,6 +1160,19 @@ static void i965_write_entry(dma_addr_t addr,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i965_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u64 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return ((val & 0xf0) << 28) | (val & ~0xfff);
+}
+
static int i9xx_setup(void)
{
phys_addr_t reg_addr;
@@ -1187,6 +1234,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = {
.cleanup = i810_cleanup,
.check_flags = i830_check_flags,
.write_entry = i810_write_entry,
+ .read_entry = i810_read_entry,
};
static const struct intel_gtt_driver i8xx_gtt_driver = {
.gen = 2,
@@ -1194,6 +1242,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
.setup = i830_setup,
.cleanup = i830_cleanup,
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i830_chipset_flush,
@@ -1205,6 +1254,7 @@ static const struct intel_gtt_driver i915_gtt_driver = {
.cleanup = i9xx_cleanup,
/* i945 is the last gpu to need phys mem (for overlay and cursors). */
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1215,6 +1265,7 @@ static const struct intel_gtt_driver g33_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1225,6 +1276,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1235,6 +1287,7 @@ static const struct intel_gtt_driver i965_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1244,6 +1297,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1254,6 +1308,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index e424360fb4a1..4787391bb6b4 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -11,6 +11,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
+#include <asm/msr.h>
#include "agp.h"
/* NVIDIA registers */
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 143406bc6939..d2b00458761e 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -37,6 +37,7 @@ struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
bool has_half_rate;
};
@@ -59,9 +60,9 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
u32 *data = buf;
int ret;
- ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ ret = pm_runtime_get_sync(trng->dev);
if (ret < 0) {
- pm_runtime_put_sync((struct device *)trng->rng.priv);
+ pm_runtime_put_sync(trng->dev);
return ret;
}
@@ -79,8 +80,8 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
ret = 4;
out:
- pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ pm_runtime_mark_last_busy(trng->dev);
+ pm_runtime_put_sync_autosuspend(trng->dev);
return ret;
}
@@ -134,9 +135,9 @@ static int atmel_trng_probe(struct platform_device *pdev)
return -ENODEV;
trng->has_half_rate = data->has_half_rate;
+ trng->dev = &pdev->dev;
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
- trng->rng.priv = (unsigned long)&pdev->dev;
platform_set_drvdata(pdev, trng);
#ifndef CONFIG_PM
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 1e3048f2bb38..b7fa1bc1122b 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -36,6 +36,7 @@ struct mtk_rng {
void __iomem *base;
struct clk *clk;
struct hwrng rng;
+ struct device *dev;
};
static int mtk_rng_init(struct hwrng *rng)
@@ -85,7 +86,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct mtk_rng *priv = to_mtk_rng(rng);
int retval = 0;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max >= sizeof(u32)) {
if (!mtk_rng_wait_ready(rng, wait))
@@ -97,8 +98,8 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max -= sizeof(u32);
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -112,13 +113,13 @@ static int mtk_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->dev = &pdev->dev;
priv->rng.name = pdev->name;
#ifndef CONFIG_PM
priv->rng.init = mtk_rng_init;
priv->rng.cleanup = mtk_rng_cleanup;
#endif
priv->rng.read = mtk_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 9ff00f096f38..3e308c890bd2 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -32,6 +32,7 @@
struct npcm_rng {
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
u32 clkp;
};
@@ -57,7 +58,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
int retval = 0;
int ready;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max) {
if (wait) {
@@ -79,8 +80,8 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max--;
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -109,7 +110,7 @@ static int npcm_rng_probe(struct platform_device *pdev)
#endif
priv->rng.name = pdev->name;
priv->rng.read = npcm_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->dev = &pdev->dev;
priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev);
writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG);
diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c
index 161050591663..fb4a30b95507 100644
--- a/drivers/char/hw_random/rockchip-rng.c
+++ b/drivers/char/hw_random/rockchip-rng.c
@@ -93,6 +93,30 @@
#define TRNG_v1_VERSION_CODE 0x46bc
/* end of TRNG_V1 register definitions */
+/*
+ * RKRNG register definitions
+ * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP)
+ * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528
+ * SoCs. It can either output true randomness (TRNG) or "deterministic"
+ * randomness derived from hashing the true entropy (DRNG). This driver
+ * implementation uses just the true entropy, and leaves stretching the entropy
+ * up to Linux.
+ */
+#define RKRNG_CFG 0x0000
+#define RKRNG_CTRL 0x0010
+#define RKRNG_CTRL_REQ_TRNG BIT(4)
+#define RKRNG_STATE 0x0014
+#define RKRNG_STATE_TRNG_RDY BIT(4)
+#define RKRNG_TRNG_DATA0 0x0050
+#define RKRNG_TRNG_DATA1 0x0054
+#define RKRNG_TRNG_DATA2 0x0058
+#define RKRNG_TRNG_DATA3 0x005C
+#define RKRNG_TRNG_DATA4 0x0060
+#define RKRNG_TRNG_DATA5 0x0064
+#define RKRNG_TRNG_DATA6 0x0068
+#define RKRNG_TRNG_DATA7 0x006C
+#define RKRNG_READ_LEN 32
+
/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */
static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0),
"You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats.");
@@ -205,6 +229,46 @@ out:
return (ret < 0) ? ret : to_read;
}
+static int rk3576_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ return rk_rng_enable_clks(rk_rng);
+}
+
+static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RKRNG_READ_LEN);
+ int ret = 0;
+ u32 val;
+
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
+ if (ret < 0)
+ return ret;
+
+ rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16),
+ RKRNG_CTRL);
+
+ if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val,
+ (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US,
+ RK_RNG_POLL_TIMEOUT_US)) {
+ dev_err(rk_rng->dev, "timed out waiting for data\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE);
+
+ memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
+
+out:
+ pm_runtime_mark_last_busy(rk_rng->dev);
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
+
+ return (ret < 0) ? ret : to_read;
+}
+
static int rk3588_rng_init(struct hwrng *rng)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
@@ -305,6 +369,14 @@ static const struct rk_rng_soc_data rk3568_soc_data = {
.reset_optional = false,
};
+static const struct rk_rng_soc_data rk3576_soc_data = {
+ .rk_rng_init = rk3576_rng_init,
+ .rk_rng_read = rk3576_rng_read,
+ .rk_rng_cleanup = rk3588_rng_cleanup,
+ .quality = 999, /* as determined by actual testing */
+ .reset_optional = true,
+};
+
static const struct rk_rng_soc_data rk3588_soc_data = {
.rk_rng_init = rk3588_rng_init,
.rk_rng_read = rk3588_rng_read,
@@ -397,6 +469,7 @@ static const struct dev_pm_ops rk_rng_pm_ops = {
static const struct of_device_id rk_rng_dt_match[] = {
{ .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data },
+ { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data },
{ .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data },
{ /* sentinel */ },
};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 3ba9d7e9a6c7..064944ae9fdc 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -27,7 +27,6 @@
#include <linux/ipmi_smi.h>
#include <linux/notifier.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
@@ -41,11 +40,12 @@
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
-static void smi_recv_work(struct work_struct *t);
+static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
+static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
@@ -180,14 +180,8 @@ MODULE_PARM_DESC(max_msgs_per_user,
struct ipmi_user {
struct list_head link;
- /*
- * Set to NULL when the user is destroyed, a pointer to myself
- * so srcu_dereference can be used on it.
- */
- struct ipmi_user *self;
- struct srcu_struct release_barrier;
-
struct kref refcount;
+ refcount_t destroyed;
/* The upper layer that handles receive messages. */
const struct ipmi_user_hndl *handler;
@@ -200,30 +194,8 @@ struct ipmi_user {
bool gets_events;
atomic_t nr_msgs;
-
- /* Free must run in process context for RCU cleanup. */
- struct work_struct remove_work;
};
-static struct workqueue_struct *remove_work_wq;
-
-static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
- __acquires(user->release_barrier)
-{
- struct ipmi_user *ruser;
-
- *index = srcu_read_lock(&user->release_barrier);
- ruser = srcu_dereference(user->self, &user->release_barrier);
- if (!ruser)
- srcu_read_unlock(&user->release_barrier, *index);
- return ruser;
-}
-
-static void release_ipmi_user(struct ipmi_user *user, int index)
-{
- srcu_read_unlock(&user->release_barrier, index);
-}
-
struct cmd_rcvr {
struct list_head link;
@@ -327,6 +299,8 @@ struct bmc_device {
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+static struct workqueue_struct *bmc_remove_work_wq;
+
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
struct ipmi_device_id *id,
bool *guid_set, guid_t *guid);
@@ -451,11 +425,10 @@ struct ipmi_smi {
struct list_head link;
/*
- * The list of upper layers that are using me. seq_lock write
- * protects this. Read protection is with srcu.
+ * The list of upper layers that are using me.
*/
struct list_head users;
- struct srcu_struct users_srcu;
+ struct mutex users_mutex;
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
@@ -496,15 +469,22 @@ struct ipmi_smi {
int curr_seq;
/*
- * Messages queued for delivery. If delivery fails (out of memory
- * for instance), They will stay in here to be processed later in a
- * periodic timer interrupt. The workqueue is for handling received
- * messages directly from the handler.
+ * Messages queued for deliver to the user.
+ */
+ struct mutex user_msgs_mutex;
+ struct list_head user_msgs;
+
+ /*
+ * Messages queued for processing. If processing fails (out
+ * of memory for instance), They will stay in here to be
+ * processed later in a periodic timer interrupt. The
+ * workqueue is for handling received messages directly from
+ * the handler.
*/
spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
- struct work_struct recv_work;
+ struct work_struct smi_work;
spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs;
@@ -522,10 +502,9 @@ struct ipmi_smi {
* Events that were queues because no one was there to receive
* them.
*/
- spinlock_t events_lock; /* For dealing with event stuff. */
+ struct mutex events_mutex; /* For dealing with event stuff. */
struct list_head waiting_events;
unsigned int waiting_events_count; /* How many events in queue? */
- char delivering_events;
char event_msg_printed;
/* How many users are waiting for events? */
@@ -613,6 +592,28 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
bool guid_set, guid_t *guid, int intf_num);
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static void free_ipmi_user(struct kref *ref)
+{
+ struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+ struct module *owner;
+
+ owner = user->intf->owner;
+ kref_put(&user->intf->refcount, intf_free);
+ module_put(owner);
+ vfree(user);
+}
+
+static void release_ipmi_user(struct ipmi_user *user)
+{
+ kref_put(&user->refcount, free_ipmi_user);
+}
+
+static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
+{
+ if (!kref_get_unless_zero(&user->refcount))
+ return NULL;
+ return user;
+}
/*
* The driver model view of the IPMI messaging driver.
@@ -630,9 +631,6 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-#define ipmi_interfaces_mutex_held() \
- lockdep_is_held(&ipmi_interfaces_mutex)
-static struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@@ -698,27 +696,20 @@ static void free_smi_msg_list(struct list_head *q)
}
}
-static void clean_up_interface_data(struct ipmi_smi *intf)
+static void intf_free(struct kref *ref)
{
+ struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
int i;
struct cmd_rcvr *rcvr, *rcvr2;
- struct list_head list;
-
- cancel_work_sync(&intf->recv_work);
free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
/*
* Wholesale remove all the entries from the list in the
- * interface and wait for RCU to know that none are in use.
+ * interface. No need for locks, this is single-threaded.
*/
- mutex_lock(&intf->cmd_rcvrs_mutex);
- INIT_LIST_HEAD(&list);
- list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
- mutex_unlock(&intf->cmd_rcvrs_mutex);
-
- list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+ list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
kfree(rcvr);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
@@ -726,20 +717,17 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
&& (intf->seq_table[i].recv_msg))
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
-}
-
-static void intf_free(struct kref *ref)
-{
- struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
- clean_up_interface_data(intf);
kfree(intf);
}
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
- int index, rv;
+ unsigned int count = 0, i;
+ int *interfaces = NULL;
+ struct device **devices = NULL;
+ int rv = 0;
/*
* Make sure the driver is actually initialized, this handles
@@ -753,20 +741,53 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
list_add(&watcher->link, &smi_watchers);
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
- lockdep_is_held(&smi_watchers_mutex)) {
- int intf_num = READ_ONCE(intf->intf_num);
+ /*
+ * Build an array of ipmi interfaces and fill it in, and
+ * another array of the devices. We can't call the callback
+ * with ipmi_interfaces_mutex held. smi_watchers_mutex will
+ * keep things in order for the user.
+ */
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link)
+ count++;
+ if (count > 0) {
+ interfaces = kmalloc_array(count, sizeof(*interfaces),
+ GFP_KERNEL);
+ if (!interfaces) {
+ rv = -ENOMEM;
+ } else {
+ devices = kmalloc_array(count, sizeof(*devices),
+ GFP_KERNEL);
+ if (!devices) {
+ kfree(interfaces);
+ interfaces = NULL;
+ rv = -ENOMEM;
+ }
+ }
+ count = 0;
+ }
+ if (interfaces) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ int intf_num = READ_ONCE(intf->intf_num);
- if (intf_num == -1)
- continue;
- watcher->new_smi(intf_num, intf->si_dev);
+ if (intf_num == -1)
+ continue;
+ devices[count] = intf->si_dev;
+ interfaces[count++] = intf_num;
+ }
+ }
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ if (interfaces) {
+ for (i = 0; i < count; i++)
+ watcher->new_smi(interfaces[i], devices[i]);
+ kfree(interfaces);
+ kfree(devices);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
mutex_unlock(&smi_watchers_mutex);
- return 0;
+ return rv;
}
EXPORT_SYMBOL(ipmi_smi_watcher_register);
@@ -779,22 +800,17 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
}
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
-/*
- * Must be called with smi_watchers_mutex held.
- */
static void
call_smi_watchers(int i, struct device *dev)
{
struct ipmi_smi_watcher *w;
- mutex_lock(&smi_watchers_mutex);
list_for_each_entry(w, &smi_watchers, link) {
if (try_module_get(w->owner)) {
w->new_smi(i, dev);
module_put(w->owner);
}
}
- mutex_unlock(&smi_watchers_mutex);
}
static int
@@ -941,18 +957,14 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
ipmi_free_recv_msg(msg);
atomic_dec(&msg->user->nr_msgs);
} else {
- int index;
- struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
-
- if (user) {
- atomic_dec(&user->nr_msgs);
- user->handler->ipmi_recv_hndl(msg, user->handler_data);
- release_ipmi_user(user, index);
- } else {
- /* User went away, give up. */
- ipmi_free_recv_msg(msg);
- rv = -EINVAL;
- }
+ /*
+ * Deliver it in smi_work. The message will hold a
+ * refcount to the user.
+ */
+ mutex_lock(&intf->user_msgs_mutex);
+ list_add_tail(&msg->link, &intf->user_msgs);
+ mutex_unlock(&intf->user_msgs_mutex);
+ queue_work(system_wq, &intf->smi_work);
}
return rv;
@@ -1192,23 +1204,14 @@ static int intf_err_seq(struct ipmi_smi *intf,
return rv;
}
-static void free_user_work(struct work_struct *work)
-{
- struct ipmi_user *user = container_of(work, struct ipmi_user,
- remove_work);
-
- cleanup_srcu_struct(&user->release_barrier);
- vfree(user);
-}
-
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
struct ipmi_user **user)
{
unsigned long flags;
- struct ipmi_user *new_user;
- int rv, index;
+ struct ipmi_user *new_user = NULL;
+ int rv = 0;
struct ipmi_smi *intf;
/*
@@ -1230,30 +1233,31 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = vzalloc(sizeof(*new_user));
- if (!new_user)
- return -ENOMEM;
-
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (intf->intf_num == if_num)
goto found;
}
/* Not found, return an error */
rv = -EINVAL;
- goto out_kfree;
+ goto out_unlock;
found:
+ if (intf->in_shutdown) {
+ rv = -ENODEV;
+ goto out_unlock;
+ }
+
if (atomic_add_return(1, &intf->nr_users) > max_users) {
rv = -EBUSY;
goto out_kfree;
}
- INIT_WORK(&new_user->remove_work, free_user_work);
-
- rv = init_srcu_struct(&new_user->release_barrier);
- if (rv)
+ new_user = vzalloc(sizeof(*new_user));
+ if (!new_user) {
+ rv = -ENOMEM;
goto out_kfree;
+ }
if (!try_module_get(intf->owner)) {
rv = -ENODEV;
@@ -1265,64 +1269,58 @@ int ipmi_create_user(unsigned int if_num,
atomic_set(&new_user->nr_msgs, 0);
kref_init(&new_user->refcount);
+ refcount_set(&new_user->destroyed, 1);
+ kref_get(&new_user->refcount); /* Destroy owns a refcount. */
new_user->handler = handler;
new_user->handler_data = handler_data;
new_user->intf = intf;
new_user->gets_events = false;
- rcu_assign_pointer(new_user->self, new_user);
+ mutex_lock(&intf->users_mutex);
spin_lock_irqsave(&intf->seq_lock, flags);
- list_add_rcu(&new_user->link, &intf->users);
+ list_add(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->users_mutex);
+
if (handler->ipmi_watchdog_pretimeout)
/* User wants pretimeouts, so make sure to watch for them. */
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- *user = new_user;
- return 0;
out_kfree:
- atomic_dec(&intf->nr_users);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- vfree(new_user);
+ if (rv) {
+ atomic_dec(&intf->nr_users);
+ vfree(new_user);
+ } else {
+ *user = new_user;
+ }
+out_unlock:
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
- int rv, index;
+ int rv = -EINVAL;
struct ipmi_smi *intf;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- if (intf->intf_num == if_num)
- goto found;
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num) {
+ if (!intf->handlers->get_smi_info)
+ rv = -ENOTTY;
+ else
+ rv = intf->handlers->get_smi_info(intf->send_info, data);
+ break;
+ }
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
-
- /* Not found, return an error */
- return -EINVAL;
-
-found:
- if (!intf->handlers->get_smi_info)
- rv = -ENOTTY;
- else
- rv = intf->handlers->get_smi_info(intf->send_info, data);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_get_smi_info);
-static void free_user(struct kref *ref)
-{
- struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-
- /* SRCU cleanup must happen in workqueue context. */
- queue_work(remove_work_wq, &user->remove_work);
-}
-
+/* Must be called with intf->users_mutex held. */
static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
@@ -1330,21 +1328,10 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- struct module *owner;
+ struct ipmi_recv_msg *msg, *msg2;
- if (!acquire_ipmi_user(user, &i)) {
- /*
- * The user has already been cleaned up, just make sure
- * nothing is using it and return.
- */
- synchronize_srcu(&user->release_barrier);
+ if (!refcount_dec_if_one(&user->destroyed))
return;
- }
-
- rcu_assign_pointer(user->self, NULL);
- release_ipmi_user(user, i);
-
- synchronize_srcu(&user->release_barrier);
if (user->handler->shutdown)
user->handler->shutdown(user->handler_data);
@@ -1355,11 +1342,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
if (user->gets_events)
atomic_dec(&intf->event_waiters);
- /* Remove the user from the interface's sequence table. */
- spin_lock_irqsave(&intf->seq_lock, flags);
- list_del_rcu(&user->link);
+ /* Remove the user from the interface's list and sequence table. */
+ list_del(&user->link);
atomic_dec(&intf->nr_users);
+ spin_lock_irqsave(&intf->seq_lock, flags);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1374,7 +1361,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
* Remove the user from the command receiver's table. First
* we build a list of everything (not using the standard link,
* since other things may be using it till we do
- * synchronize_srcu()) then free everything in that list.
+ * synchronize_rcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
@@ -1386,23 +1373,33 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
}
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
- synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
}
- owner = intf->owner;
- kref_put(&intf->refcount, intf_free);
- module_put(owner);
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ if (msg->user != user)
+ continue;
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ release_ipmi_user(user);
}
void ipmi_destroy_user(struct ipmi_user *user)
{
+ struct ipmi_smi *intf = user->intf;
+
+ mutex_lock(&intf->users_mutex);
_ipmi_destroy_user(user);
+ mutex_unlock(&intf->users_mutex);
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
}
EXPORT_SYMBOL(ipmi_destroy_user);
@@ -1411,9 +1408,9 @@ int ipmi_get_version(struct ipmi_user *user,
unsigned char *minor)
{
struct ipmi_device_id id;
- int rv, index;
+ int rv;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1422,7 +1419,7 @@ int ipmi_get_version(struct ipmi_user *user,
*major = ipmi_version_major(&id);
*minor = ipmi_version_minor(&id);
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1432,9 +1429,9 @@ int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1444,7 +1441,7 @@ int ipmi_set_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].address = address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1454,9 +1451,9 @@ int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1466,7 +1463,7 @@ int ipmi_get_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1476,9 +1473,9 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1488,7 +1485,7 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].lun = LUN & 0x3;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1498,9 +1495,9 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1510,7 +1507,7 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].lun;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1518,17 +1515,17 @@ EXPORT_SYMBOL(ipmi_get_my_LUN);
int ipmi_get_maintenance_mode(struct ipmi_user *user)
{
- int mode, index;
+ int mode;
unsigned long flags;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
mode = user->intf->maintenance_mode;
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return mode;
}
@@ -1543,11 +1540,11 @@ static void maintenance_mode_update(struct ipmi_smi *intf)
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
{
- int rv = 0, index;
+ int rv = 0;
unsigned long flags;
struct ipmi_smi *intf = user->intf;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1577,7 +1574,7 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
}
out_unlock:
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1585,19 +1582,17 @@ EXPORT_SYMBOL(ipmi_set_maintenance_mode);
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
{
- unsigned long flags;
struct ipmi_smi *intf = user->intf;
struct ipmi_recv_msg *msg, *msg2;
struct list_head msgs;
- int index;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
if (user->gets_events == val)
goto out;
@@ -1610,13 +1605,6 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
atomic_dec(&intf->event_waiters);
}
- if (intf->delivering_events)
- /*
- * Another thread is delivering events for this, so
- * let it handle any new events.
- */
- goto out;
-
/* Deliver any queued events. */
while (user->gets_events && !list_empty(&intf->waiting_events)) {
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
@@ -1627,22 +1615,16 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
intf->event_msg_printed = 0;
}
- intf->delivering_events = 1;
- spin_unlock_irqrestore(&intf->events_lock, flags);
-
list_for_each_entry_safe(msg, msg2, &msgs, link) {
msg->user = user;
kref_get(&user->refcount);
deliver_local_response(intf, msg);
}
-
- spin_lock_irqsave(&intf->events_lock, flags);
- intf->delivering_events = 0;
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
- release_ipmi_user(user, index);
+ mutex_unlock(&intf->events_mutex);
+ release_ipmi_user(user);
return 0;
}
@@ -1687,9 +1669,9 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
{
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
- int rv = 0, index;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1719,7 +1701,7 @@ out_unlock:
if (rv)
kfree(rcvr);
out_release:
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1733,9 +1715,9 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- int i, rv = -ENOENT, index;
+ int i, rv = -ENOENT;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1758,7 +1740,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
while (rcvrs) {
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
rcvr = rcvrs;
@@ -1882,13 +1864,12 @@ static void smi_send(struct ipmi_smi *intf,
const struct ipmi_smi_handlers *handlers,
struct ipmi_smi_msg *smi_msg, int priority)
{
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
unsigned long flags = 0;
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
@@ -2304,6 +2285,7 @@ static int i_ipmi_request(struct ipmi_user *user,
{
struct ipmi_smi_msg *smi_msg;
struct ipmi_recv_msg *recv_msg;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
if (user) {
@@ -2337,7 +2319,8 @@ static int i_ipmi_request(struct ipmi_user *user,
}
}
- rcu_read_lock();
+ if (!run_to_completion)
+ mutex_lock(&intf->users_mutex);
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
@@ -2383,7 +2366,8 @@ out_err:
smi_send(intf, intf->handlers, smi_msg, priority);
}
- rcu_read_unlock();
+ if (!run_to_completion)
+ mutex_unlock(&intf->users_mutex);
out:
if (rv && user)
@@ -2414,12 +2398,12 @@ int ipmi_request_settime(struct ipmi_user *user,
unsigned int retry_time_ms)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2438,7 +2422,7 @@ int ipmi_request_settime(struct ipmi_user *user,
retries,
retry_time_ms);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_settime);
@@ -2453,12 +2437,12 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
int priority)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2477,7 +2461,7 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
lun,
-1, 0);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_supply_msgs);
@@ -3064,7 +3048,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
- queue_work(remove_work_wq, &bmc->remove_work);
+ queue_work(bmc_remove_work_wq, &bmc->remove_work);
}
/*
@@ -3520,15 +3504,14 @@ static ssize_t nr_msgs_show(struct device *dev,
char *buf)
{
struct ipmi_smi *intf = container_of(attr,
- struct ipmi_smi, nr_msgs_devattr);
+ struct ipmi_smi, nr_msgs_devattr);
struct ipmi_user *user;
- int index;
unsigned int count = 0;
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link)
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link)
count += atomic_read(&user->nr_msgs);
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
return sysfs_emit(buf, "%u\n", count);
}
@@ -3569,12 +3552,6 @@ int ipmi_add_smi(struct module *owner,
if (!intf)
return -ENOMEM;
- rv = init_srcu_struct(&intf->users_srcu);
- if (rv) {
- kfree(intf);
- return rv;
- }
-
intf->owner = owner;
intf->bmc = &intf->tmp_bmc;
INIT_LIST_HEAD(&intf->bmc->intfs);
@@ -3591,7 +3568,10 @@ int ipmi_add_smi(struct module *owner,
}
if (slave_addr != 0)
intf->addrinfo[0].address = slave_addr;
+ INIT_LIST_HEAD(&intf->user_msgs);
+ mutex_init(&intf->user_msgs_mutex);
INIT_LIST_HEAD(&intf->users);
+ mutex_init(&intf->users_mutex);
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
@@ -3603,12 +3583,12 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- INIT_WORK(&intf->recv_work, smi_recv_work);
+ INIT_WORK(&intf->smi_work, smi_work);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
- spin_lock_init(&intf->events_lock);
+ mutex_init(&intf->events_mutex);
spin_lock_init(&intf->watch_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -3621,12 +3601,16 @@ int ipmi_add_smi(struct module *owner,
for (i = 0; i < IPMI_NUM_STATS; i++)
atomic_set(&intf->stats[i], 0);
+ /*
+ * Grab the watchers mutex so we can deliver the new interface
+ * without races.
+ */
+ mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
/* Look for a hole in the numbers. */
i = 0;
link = &ipmi_interfaces;
- list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
- ipmi_interfaces_mutex_held()) {
+ list_for_each_entry(tintf, &ipmi_interfaces, link) {
if (tintf->intf_num != i) {
link = &tintf->link;
break;
@@ -3635,9 +3619,9 @@ int ipmi_add_smi(struct module *owner,
}
/* Add the new interface in numeric order. */
if (i == 0)
- list_add_rcu(&intf->link, &ipmi_interfaces);
+ list_add(&intf->link, &ipmi_interfaces);
else
- list_add_tail_rcu(&intf->link, link);
+ list_add_tail(&intf->link, link);
rv = handlers->start_processing(send_info, intf);
if (rv)
@@ -3669,18 +3653,14 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
- /*
- * Keep memory order straight for RCU readers. Make
- * sure everything else is committed to memory before
- * setting intf_num to mark the interface valid.
- */
- smp_wmb();
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
/* After this point the interface is legal to use. */
call_smi_watchers(i, intf->si_dev);
+ mutex_unlock(&smi_watchers_mutex);
+
return 0;
out_err_bmc_reg:
@@ -3689,10 +3669,9 @@ int ipmi_add_smi(struct module *owner,
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
out_err:
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- cleanup_srcu_struct(&intf->users_srcu);
+ mutex_unlock(&smi_watchers_mutex);
kref_put(&intf->refcount, intf_free);
return rv;
@@ -3758,19 +3737,28 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
void ipmi_unregister_smi(struct ipmi_smi *intf)
{
struct ipmi_smi_watcher *w;
- int intf_num, index;
+ int intf_num;
if (!intf)
return;
+
intf_num = intf->intf_num;
mutex_lock(&ipmi_interfaces_mutex);
+ cancel_work_sync(&intf->smi_work);
+ /* smi_work() can no longer be in progress after this. */
+
intf->intf_num = -1;
intf->in_shutdown = true;
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- /* At this point no users can be added to the interface. */
+ /*
+ * At this point no users can be added to the interface and no
+ * new messages can be sent.
+ */
+
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3784,24 +3772,19 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
w->smi_gone(intf_num);
mutex_unlock(&smi_watchers_mutex);
- index = srcu_read_lock(&intf->users_srcu);
+ mutex_lock(&intf->users_mutex);
while (!list_empty(&intf->users)) {
- struct ipmi_user *user =
- container_of(list_next_rcu(&intf->users),
- struct ipmi_user, link);
+ struct ipmi_user *user = list_first_entry(&intf->users,
+ struct ipmi_user, link);
_ipmi_destroy_user(user);
}
- srcu_read_unlock(&intf->users_srcu, index);
-
- if (intf->handlers->shutdown)
- intf->handlers->shutdown(intf->send_info);
+ mutex_unlock(&intf->users_mutex);
cleanup_smi_msgs(intf);
ipmi_bmc_unregister(intf);
- cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
}
EXPORT_SYMBOL(ipmi_unregister_smi);
@@ -3926,17 +3909,12 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
msg->data_size, msg->data);
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -3946,7 +3924,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
@@ -4017,17 +3995,12 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5;
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -4037,7 +4010,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
@@ -4206,14 +4179,33 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcu_read_unlock();
if (user == NULL) {
- /* We didn't find a user, just give up. */
+ /* We didn't find a user, just give up and return an error. */
ipmi_inc_stat(intf, unhandled_commands);
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = chan;
+ msg->data[3] = msg->rsp[4]; /* handle */
+ msg->data[4] = msg->rsp[8]; /* rsSWID */
+ msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3);
+ msg->data[6] = ipmb_checksum(&msg->data[3], 3);
+ msg->data[7] = msg->rsp[5]; /* rqSWID */
+ /* rqseq/lun */
+ msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3);
+ msg->data[9] = cmd;
+ msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[11] = ipmb_checksum(&msg->data[7], 4);
+ msg->data_size = 12;
+
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+ smi_send(intf, intf->handlers, msg, 0);
/*
- * Don't do anything with these messages, just allow
- * them to be freed.
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
*/
- rv = 0;
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -4222,7 +4214,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
* message, so requeue it for handling later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
@@ -4331,7 +4323,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/*
* OEM Messages are expected to be delivered via
@@ -4393,8 +4385,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg, *recv_msg2;
struct list_head msgs;
struct ipmi_user *user;
- int rv = 0, deliver_count = 0, index;
- unsigned long flags;
+ int rv = 0, deliver_count = 0;
if (msg->rsp_size < 19) {
/* Message is too small to be an IPMB event. */
@@ -4409,7 +4400,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
ipmi_inc_stat(intf, events);
@@ -4417,18 +4408,20 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* Allocate and fill in one message for every user that is
* getting events.
*/
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
if (!user->gets_events)
continue;
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
- rcu_read_unlock();
+ mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
+ user = recv_msg->user;
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
+ kref_put(&user->refcount, free_ipmi_user);
}
/*
* We couldn't allocate memory for the
@@ -4446,7 +4439,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
if (deliver_count) {
/* Now deliver all the messages. */
@@ -4484,7 +4477,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
+ mutex_unlock(&intf->events_mutex);
return rv;
}
@@ -4570,7 +4563,7 @@ return_unspecified:
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
- if (intf->in_shutdown)
+ if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
@@ -4642,6 +4635,9 @@ return_unspecified:
*/
struct ipmi_recv_msg *recv_msg;
+ if (intf->run_to_completion)
+ goto out;
+
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
@@ -4664,6 +4660,9 @@ process_response_response:
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
struct ipmi_channel *chans;
+ if (intf->run_to_completion)
+ goto out;
+
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
@@ -4738,6 +4737,9 @@ process_response_response:
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
/* It's an asynchronous event. */
+ if (intf->run_to_completion)
+ goto out;
+
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
@@ -4753,10 +4755,10 @@ process_response_response:
*/
static void handle_new_recv_msgs(struct ipmi_smi *intf)
{
- struct ipmi_smi_msg *smi_msg;
- unsigned long flags = 0;
- int rv;
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/* See if any waiting messages need to be processed. */
if (!run_to_completion)
@@ -4790,31 +4792,15 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
}
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
-
- /*
- * If the pretimout count is non-zero, decrement one from it and
- * deliver pretimeouts to all the users.
- */
- if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
- struct ipmi_user *user;
- int index;
-
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (user->handler->ipmi_watchdog_pretimeout)
- user->handler->ipmi_watchdog_pretimeout(
- user->handler_data);
- }
- srcu_read_unlock(&intf->users_srcu, index);
- }
}
-static void smi_recv_work(struct work_struct *t)
+static void smi_work(struct work_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = from_work(intf, t, recv_work);
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi *intf = from_work(intf, t, smi_work);
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
+ struct ipmi_recv_msg *msg, *msg2;
/*
* Start the next message if available.
@@ -4824,8 +4810,6 @@ static void smi_recv_work(struct work_struct *t)
* message delivery.
*/
- rcu_read_lock();
-
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4843,15 +4827,57 @@ static void smi_recv_work(struct work_struct *t)
intf->curr_msg = newmsg;
}
}
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
if (newmsg)
intf->handlers->sender(intf->send_info, newmsg);
- rcu_read_unlock();
-
handle_new_recv_msgs(intf);
+
+ /* Nothing below applies during panic time. */
+ if (run_to_completion)
+ return;
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ struct ipmi_user *user;
+
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ mutex_unlock(&intf->users_mutex);
+ }
+
+ /*
+ * Freeing the message can cause a user to be released, which
+ * can then cause the interface to be freed. Make sure that
+ * doesn't happen until we are ready.
+ */
+ kref_get(&intf->refcount);
+
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ struct ipmi_user *user = msg->user;
+
+ list_del(&msg->link);
+
+ if (refcount_read(&user->destroyed) == 0) {
+ ipmi_free_recv_msg(msg);
+ } else {
+ atomic_dec(&user->nr_msgs);
+ user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ }
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ kref_put(&intf->refcount, intf_free);
}
/* Handle a new message from the lower layer. */
@@ -4859,7 +4885,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/*
* To preserve message order, we keep a queue and deliver from
@@ -4884,9 +4910,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_work(&intf->recv_work);
+ smi_work(&intf->smi_work);
else
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
@@ -4896,7 +4922,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -5065,7 +5091,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags);
}
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
return need_timer;
}
@@ -5084,17 +5110,19 @@ static struct timer_list ipmi_timer;
static atomic_t stop_operation;
-static void ipmi_timeout(struct timer_list *unused)
+static void ipmi_timeout_work(struct work_struct *work)
{
+ if (atomic_read(&stop_operation))
+ return;
+
struct ipmi_smi *intf;
bool need_timer = false;
- int index;
if (atomic_read(&stop_operation))
return;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (atomic_read(&intf->event_waiters)) {
intf->ticks_to_req_ev--;
if (intf->ticks_to_req_ev == 0) {
@@ -5106,12 +5134,22 @@ static void ipmi_timeout(struct timer_list *unused)
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
if (need_timer)
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
+static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work);
+
+static void ipmi_timeout(struct timer_list *unused)
+{
+ if (atomic_read(&stop_operation))
+ return;
+
+ queue_work(system_wq, &ipmi_timer_work);
+}
+
static void need_waiter(struct ipmi_smi *intf)
{
/* Racy, but worst case we start the timer twice. */
@@ -5168,7 +5206,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
if (msg->user && !oops_in_progress)
- kref_put(&msg->user->refcount, free_user);
+ kref_put(&msg->user->refcount, free_ipmi_user);
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
@@ -5188,9 +5226,9 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
/*
* Inside a panic, send a message and wait for a response.
*/
-static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
- struct ipmi_addr *addr,
- struct kernel_ipmi_msg *msg)
+static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
{
struct ipmi_smi_msg smi_msg;
struct ipmi_recv_msg recv_msg;
@@ -5220,6 +5258,15 @@ static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
ipmi_poll(intf);
}
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ user->intf->run_to_completion = 1;
+ _ipmi_panic_request_and_wait(user->intf, addr, msg);
+}
+EXPORT_SYMBOL(ipmi_panic_request_and_wait);
+
static void event_receiver_fetcher(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg)
{
@@ -5288,7 +5335,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
}
/* Send the event announcing the panic. */
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
/*
* On every interface, dump a bunch of OEM event holding the
@@ -5324,7 +5371,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -5333,7 +5380,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -5385,7 +5432,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
memcpy_and_pad(data+5, 11, p, size, '\0');
p += size;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
@@ -5403,7 +5450,7 @@ static int panic_event(struct notifier_block *this,
has_panicked = 1;
/* For every registered interface, set it to run to completion. */
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (!intf->handlers || intf->intf_num == -1)
/* Interface is not ready. */
continue;
@@ -5433,7 +5480,7 @@ static int panic_event(struct notifier_block *this,
intf->handlers->set_run_to_completion(intf->send_info,
1);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ list_for_each_entry(user, &intf->users, link) {
if (user->handler->ipmi_panic_handler)
user->handler->ipmi_panic_handler(
user->handler_data);
@@ -5478,15 +5525,11 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
- rv = init_srcu_struct(&ipmi_interfaces_srcu);
- if (rv)
- goto out;
-
- remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
- if (!remove_work_wq) {
+ bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!bmc_remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
- goto out_wq;
+ goto out;
}
timer_setup(&ipmi_timer, ipmi_timeout, 0);
@@ -5496,9 +5539,6 @@ static int ipmi_init_msghandler(void)
initialized = true;
-out_wq:
- if (rv)
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
@@ -5522,7 +5562,7 @@ static void __exit cleanup_ipmi(void)
int count;
if (initialized) {
- destroy_workqueue(remove_work_wq);
+ destroy_workqueue(bmc_remove_work_wq);
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);
@@ -5539,6 +5579,7 @@ static void __exit cleanup_ipmi(void)
*/
atomic_set(&stop_operation, 1);
timer_delete_sync(&ipmi_timer);
+ cancel_work_sync(&ipmi_timer_work);
initialized = false;
@@ -5549,8 +5590,6 @@ static void __exit cleanup_ipmi(void)
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
pr_warn("recv message count %d at exit\n", count);
-
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
}
if (drvregistered)
driver_unregister(&ipmidriver.driver);
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index a7ead2a4c753..508c3fd45877 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -26,6 +26,14 @@ enum si_type {
/* Array is defined in the ipmi_si_intf.c */
extern const char *const si_to_str[];
+struct ipmi_match_info {
+ enum si_type type;
+};
+
+extern const struct ipmi_match_info ipmi_kcs_si_info;
+extern const struct ipmi_match_info ipmi_smic_si_info;
+extern const struct ipmi_match_info ipmi_bt_si_info;
+
enum ipmi_addr_space {
IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE
};
@@ -64,7 +72,7 @@ struct si_sm_io {
void (*irq_cleanup)(struct si_sm_io *io);
u8 slave_addr;
- enum si_type si_type;
+ const struct ipmi_match_info *si_info;
struct device *dev;
};
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 12b0b77eb1cc..7fe891783a37 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -73,6 +73,10 @@ enum si_intf_state {
/* 'invalid' to allow a firmware-specified interface to be disabled */
const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
+const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS };
+const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC };
+const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT };
+
static bool initialized;
/*
@@ -692,7 +696,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
@@ -1119,7 +1123,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data)
struct smi_info *smi_info = data;
unsigned long flags;
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
@@ -1164,7 +1168,7 @@ static int smi_start_processing(void *send_info,
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
- else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
+ else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq)
enable = 1;
if (enable) {
@@ -1235,7 +1239,7 @@ MODULE_PARM_DESC(kipmid_max_busy_us,
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Enable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1243,7 +1247,7 @@ void ipmi_irq_finish_setup(struct si_sm_io *io)
void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Disable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
@@ -1614,7 +1618,7 @@ static ssize_t type_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]);
}
static DEVICE_ATTR_RO(type);
@@ -1649,7 +1653,7 @@ static ssize_t params_show(struct device *dev,
return sysfs_emit(buf,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
- si_to_str[smi_info->io.si_type],
+ si_to_str[smi_info->io.si_info->type],
addr_space_to_str[smi_info->io.addr_space],
smi_info->io.addr_data,
smi_info->io.regspacing,
@@ -1803,7 +1807,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
- smi_info->io.si_type == SI_BT)
+ smi_info->io.si_info->type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
@@ -1907,13 +1911,13 @@ int ipmi_si_add_smi(struct si_sm_io *io)
/* We prefer ACPI over SMBIOS. */
dev_info(dup->io.dev,
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
cleanup_one_si(dup);
} else {
dev_info(new_smi->io.dev,
"%s-specified %s state machine: duplicate\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
rv = -EBUSY;
kfree(new_smi);
goto out_err;
@@ -1922,7 +1926,7 @@ int ipmi_si_add_smi(struct si_sm_io *io)
pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
list_add_tail(&new_smi->link, &smi_infos);
@@ -1945,12 +1949,12 @@ static int try_smi_init(struct smi_info *new_smi)
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type],
+ si_to_str[new_smi->io.si_info->type],
addr_space_to_str[new_smi->io.addr_space],
new_smi->io.addr_data,
new_smi->io.slave_addr, new_smi->io.irq);
- switch (new_smi->io.si_type) {
+ switch (new_smi->io.si_info->type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
@@ -2073,7 +2077,7 @@ static int try_smi_init(struct smi_info *new_smi)
smi_num++;
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
@@ -2091,9 +2095,18 @@ static int try_smi_init(struct smi_info *new_smi)
return rv;
}
+/*
+ * Devices in the same address space at the same address are the same.
+ */
+static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
+{
+ return (e1->io.addr_space == e2->io.addr_space &&
+ e1->io.addr_data == e2->io.addr_data);
+}
+
static int __init init_ipmi_si(void)
{
- struct smi_info *e;
+ struct smi_info *e, *e2;
enum ipmi_addr_src type = SI_INVALID;
if (initialized)
@@ -2109,37 +2122,70 @@ static int __init init_ipmi_si(void)
ipmi_si_parisc_init();
- /* We prefer devices with interrupts, but in the case of a machine
- with multiple BMCs we assume that there will be several instances
- of a given type so if we succeed in registering a type then also
- try to register everything else of the same type */
mutex_lock(&smi_infos_lock);
+
+ /*
+ * Scan through all the devices. We prefer devices with
+ * interrupts, so go through those first in case there are any
+ * duplicates that don't have the interrupt set.
+ */
list_for_each_entry(e, &smi_infos, link) {
- /* Try to register a device if it has an IRQ and we either
- haven't successfully registered a device yet or this
- device has the same type as one we successfully registered */
- if (e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ bool dup = false;
+
+ /* Register ones with interrupts first. */
+ if (!e->io.irq)
+ continue;
+
+ /*
+ * Go through the ones we have already seen to see if this
+ * is a dup.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (e2->io.irq && ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
- /* type will only have been set if we successfully registered an si */
- if (type)
- goto skip_fallback_noirq;
+ /*
+ * Now try devices without interrupts.
+ */
+ list_for_each_entry(e, &smi_infos, link) {
+ bool dup = false;
- /* Fall back to the preferred device */
+ if (e->io.irq)
+ continue;
- list_for_each_entry(e, &smi_infos, link) {
- if (!e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ /*
+ * Go through the ones we have already seen to see if
+ * this is a dup. We have already looked at the ones
+ * with interrupts.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (!e2->io.irq)
+ continue;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
+ }
+ }
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
-skip_fallback_noirq:
initialized = true;
mutex_unlock(&smi_infos_lock);
@@ -2267,7 +2313,7 @@ struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
if (e->io.addr_space != addr_space)
continue;
- if (e->io.si_type != si_type)
+ if (e->io.si_info->type != si_type)
continue;
if (e->io.addr_data == addr) {
dev = get_device(e->io.dev);
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 2be2967f6b5f..3b0a70d9adbb 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -13,7 +13,7 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
memset(&io, 0, sizeof(io));
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
io.addr_source = SI_DEVICETREE;
io.addr_space = IPMI_MEM_ADDR_SPACE;
io.addr_data = dev->hpa.start;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 8c0ea637aba0..17f72763322d 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -23,30 +23,32 @@ MODULE_PARM_DESC(trypci,
static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
{
- if (io->si_type == SI_KCS) {
- unsigned char status;
- int regspacing;
-
- io->regsize = DEFAULT_REGSIZE;
- io->regshift = 0;
-
- /* detect 1, 4, 16byte spacing */
- for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
- io->regspacing = regspacing;
- if (io->io_setup(io)) {
- dev_err(io->dev, "Could not setup I/O space\n");
- return DEFAULT_REGSPACING;
- }
- /* write invalid cmd */
- io->outputb(io, 1, 0x10);
- /* read status back */
- status = io->inputb(io, 1);
- io->io_cleanup(io);
- if (status)
- return regspacing;
- regspacing *= 4;
+ unsigned char status;
+ int regspacing;
+
+ if (io->si_info->type != SI_KCS)
+ return DEFAULT_REGSPACING;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev, "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
}
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
}
+
return DEFAULT_REGSPACING;
}
@@ -74,15 +76,15 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
switch (pdev->class) {
case PCI_CLASS_SERIAL_IPMI_SMIC:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_KCS:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_BT:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
default:
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 550cabd43ae6..fb6e359ae494 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -163,9 +163,13 @@ static int platform_ipmi_probe(struct platform_device *pdev)
switch (type) {
case SI_KCS:
+ io.si_info = &ipmi_kcs_si_info;
+ break;
case SI_SMIC:
+ io.si_info = &ipmi_smic_si_info;
+ break;
case SI_BT:
- io.si_type = type;
+ io.si_info = &ipmi_bt_si_info;
break;
case SI_TYPE_INVALID: /* User disabled this in hardcode. */
return -ENODEV;
@@ -213,13 +217,10 @@ static int platform_ipmi_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id of_ipmi_match[] = {
- { .type = "ipmi", .compatible = "ipmi-kcs",
- .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic",
- .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt",
- .data = (void *)(unsigned long) SI_BT },
- {},
+ { .type = "ipmi", .compatible = "ipmi-kcs", .data = &ipmi_kcs_si_info },
+ { .type = "ipmi", .compatible = "ipmi-smic", .data = &ipmi_smic_si_info },
+ { .type = "ipmi", .compatible = "ipmi-bt", .data = &ipmi_bt_si_info },
+ {}
};
MODULE_DEVICE_TABLE(of, of_ipmi_match);
@@ -265,7 +266,7 @@ static int of_ipmi_probe(struct platform_device *pdev)
}
memset(&io, 0, sizeof(io));
- io.si_type = (enum si_type)device_get_match_data(&pdev->dev);
+ io.si_info = device_get_match_data(&pdev->dev);
io.addr_source = SI_DEVICETREE;
io.irq_setup = ipmi_std_irq_setup;
@@ -296,7 +297,7 @@ static int find_slave_address(struct si_sm_io *io, int slave_addr)
{
#ifdef CONFIG_IPMI_DMI_DECODE
if (!slave_addr)
- slave_addr = ipmi_dmi_get_slave_addr(io->si_type,
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_info->type,
io->addr_space,
io->addr_data);
#endif
@@ -335,13 +336,13 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
switch (tmp) {
case 1:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case 2:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case 3:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
case 4: /* SSIF, just ignore */
return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0b45b07dec22..5bf038e620c7 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -481,8 +481,6 @@ static int ipmi_ssif_thread(void *data)
/* Wait for something to do */
result = wait_for_completion_interruptible(
&ssif_info->wake_thread);
- if (ssif_info->stopping)
- break;
if (result == -ERESTARTSYS)
continue;
init_completion(&ssif_info->wake_thread);
@@ -1270,10 +1268,8 @@ static void shutdown_ssif(void *send_info)
ssif_info->stopping = true;
timer_delete_sync(&ssif_info->watch_timer);
timer_delete_sync(&ssif_info->retry_timer);
- if (ssif_info->thread) {
- complete(&ssif_info->wake_thread);
+ if (ssif_info->thread)
kthread_stop(ssif_info->thread);
- }
}
static void ssif_remove(struct i2c_client *client)
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f1875b2bebbc..ab759b492fdd 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -150,7 +150,7 @@ static char preaction[16] = "pre_none";
static unsigned char preop_val = WDOG_PREOP_NONE;
static char preop[16] = "preop_none";
-static DEFINE_SPINLOCK(ipmi_read_lock);
+static DEFINE_MUTEX(ipmi_read_mutex);
static char data_to_read;
static DECLARE_WAIT_QUEUE_HEAD(read_q);
static struct fasync_struct *fasync_q;
@@ -363,7 +363,7 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
{
struct kernel_ipmi_msg msg;
unsigned char data[6];
- int rv;
+ int rv = 0;
struct ipmi_system_interface_addr addr;
int hbnow = 0;
@@ -405,14 +405,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
msg.cmd = IPMI_WDOG_SET_TIMER;
msg.data = data;
msg.data_len = sizeof(data);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- smi_msg,
- recv_msg,
- 1);
+ if (smi_msg)
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ else
+ ipmi_panic_request_and_wait(watchdog_user,
+ (struct ipmi_addr *) &addr, &msg);
if (rv)
pr_warn("set timeout error: %d\n", rv);
else if (send_heartbeat_now)
@@ -431,9 +435,7 @@ static int _ipmi_set_timeout(int do_heartbeat)
atomic_set(&msg_tofree, 2);
- rv = __ipmi_set_timeout(&smi_msg,
- &recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(&smi_msg, &recv_msg, &send_heartbeat_now);
if (rv) {
atomic_set(&msg_tofree, 0);
return rv;
@@ -460,27 +462,10 @@ static int ipmi_set_timeout(int do_heartbeat)
return rv;
}
-static atomic_t panic_done_count = ATOMIC_INIT(0);
-
-static void panic_smi_free(struct ipmi_smi_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-static void panic_recv_free(struct ipmi_recv_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
static void panic_halt_ipmi_heartbeat(void)
{
struct kernel_ipmi_msg msg;
struct ipmi_system_interface_addr addr;
- int rv;
/*
* Don't reset the timer if we have the timer turned off, that
@@ -497,24 +482,10 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(2, &panic_done_count);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- &panic_halt_heartbeat_smi_msg,
- &panic_halt_heartbeat_recv_msg,
- 1);
- if (rv)
- atomic_sub(2, &panic_done_count);
+ ipmi_panic_request_and_wait(watchdog_user, (struct ipmi_addr *) &addr,
+ &msg);
}
-static struct ipmi_smi_msg panic_halt_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
/*
* Special call, doesn't claim any locks. This is only to be called
* at panic or halt time, in run-to-completion mode, when the caller
@@ -526,22 +497,13 @@ static void panic_halt_ipmi_set_timeout(void)
int send_heartbeat_now;
int rv;
- /* Wait for the messages to be free. */
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
- atomic_add(2, &panic_done_count);
- rv = __ipmi_set_timeout(&panic_halt_smi_msg,
- &panic_halt_recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(NULL, NULL, &send_heartbeat_now);
if (rv) {
- atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
panic_halt_ipmi_heartbeat();
}
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
}
static int __ipmi_heartbeat(void)
@@ -793,7 +755,7 @@ static ssize_t ipmi_read(struct file *file,
* Reading returns if the pretimeout has gone off, and it only does
* it once per pretimeout.
*/
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (!data_to_read) {
if (file->f_flags & O_NONBLOCK) {
rv = -EAGAIN;
@@ -804,9 +766,9 @@ static ssize_t ipmi_read(struct file *file,
add_wait_queue(&read_q, &wait);
while (!data_to_read && !signal_pending(current)) {
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
schedule();
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
}
remove_wait_queue(&read_q, &wait);
@@ -818,7 +780,7 @@ static ssize_t ipmi_read(struct file *file,
data_to_read = 0;
out:
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
if (rv == 0) {
if (copy_to_user(buf, &data_to_read, 1))
@@ -856,10 +818,10 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait)
poll_wait(file, &read_q, wait);
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (data_to_read)
mask |= (EPOLLIN | EPOLLRDNORM);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
return mask;
}
@@ -932,13 +894,11 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data)
if (atomic_inc_and_test(&preop_panic_excl))
panic("Watchdog pre-timeout");
} else if (preop_val == WDOG_PREOP_GIVE_DATA) {
- unsigned long flags;
-
- spin_lock_irqsave(&ipmi_read_lock, flags);
+ mutex_lock(&ipmi_read_mutex);
data_to_read = 1;
wake_up_interruptible(&read_q);
kill_fasync(&fasync_q, SIGIO, POLL_IN);
- spin_unlock_irqrestore(&ipmi_read_lock, flags);
+ mutex_unlock(&ipmi_read_mutex);
}
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 169eed162a7f..48839958b0b1 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -61,29 +61,11 @@ static inline int page_is_allowed(unsigned long pfn)
{
return devmem_is_allowed(pfn);
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- u64 from = ((u64)pfn) << PAGE_SHIFT;
- u64 to = from + size;
- u64 cursor = from;
-
- while (cursor < to) {
- if (!devmem_is_allowed(pfn))
- return 0;
- cursor += PAGE_SIZE;
- pfn++;
- }
- return 1;
-}
#else
static inline int page_is_allowed(unsigned long pfn)
{
return 1;
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- return 1;
-}
#endif
static inline bool should_stop_iteration(void)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 38f2fab29c56..b8b24b6ed3fe 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -309,11 +309,11 @@ static void crng_reseed(struct work_struct *work)
* key value, at index 4, so the state should always be zeroed out
* immediately after using in order to maintain forward secrecy.
* If the state cannot be erased in a timely manner, then it is
- * safer to set the random_data parameter to &chacha_state[4] so
- * that this function overwrites it before returning.
+ * safer to set the random_data parameter to &chacha_state->x[4]
+ * so that this function overwrites it before returning.
*/
static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
- u32 chacha_state[CHACHA_STATE_WORDS],
+ struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
u8 first_block[CHACHA_BLOCK_SIZE];
@@ -321,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
BUG_ON(random_data_len > 32);
chacha_init_consts(chacha_state);
- memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
- memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state->x[12], 0, sizeof(u32) * 4);
chacha20_block(chacha_state, first_block);
memcpy(key, first_block, CHACHA_KEY_SIZE);
@@ -335,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
* random data. It also returns up to 32 bytes on its own of random data
* that may be used; random_data_len may not be greater than 32.
*/
-static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+static void crng_make_state(struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
unsigned long flags;
@@ -395,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
static void _get_random_bytes(void *buf, size_t len)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 tmp[CHACHA_BLOCK_SIZE];
size_t first_block_len;
@@ -403,26 +403,26 @@ static void _get_random_bytes(void *buf, size_t len)
return;
first_block_len = min_t(size_t, 32, len);
- crng_make_state(chacha_state, buf, first_block_len);
+ crng_make_state(&chacha_state, buf, first_block_len);
len -= first_block_len;
buf += first_block_len;
while (len) {
if (len < CHACHA_BLOCK_SIZE) {
- chacha20_block(chacha_state, tmp);
+ chacha20_block(&chacha_state, tmp);
memcpy(buf, tmp, len);
memzero_explicit(tmp, sizeof(tmp));
break;
}
- chacha20_block(chacha_state, buf);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, buf);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
len -= CHACHA_BLOCK_SIZE;
buf += CHACHA_BLOCK_SIZE;
}
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
}
/*
@@ -441,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(struct iov_iter *iter)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 block[CHACHA_BLOCK_SIZE];
size_t ret = 0, copied;
@@ -453,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
* bytes, in case userspace causes copy_to_iter() below to sleep
* forever, so that we still retain forward secrecy in that case.
*/
- crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4],
+ CHACHA_KEY_SIZE);
/*
* However, if we're doing a read of len <= 32, we don't need to
* use chacha_state after, so we can simply return those bytes to
* the user directly.
*/
if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
- ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+ ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter);
goto out_zero_chacha;
}
for (;;) {
- chacha20_block(chacha_state, block);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, block);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
copied = copy_to_iter(block, sizeof(block), iter);
ret += copied;
@@ -484,7 +485,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
memzero_explicit(block, sizeof(block));
out_zero_chacha:
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
return ret ? ret : -EFAULT;
}
@@ -726,6 +727,7 @@ static void __cold _credit_init_bits(size_t bits)
static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
+ int m;
if (!bits)
return;
@@ -748,9 +750,9 @@ static void __cold _credit_init_bits(size_t bits)
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
- if (urandom_warning.missed)
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
+ m = ratelimit_state_get_miss(&urandom_warning);
+ if (m)
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m);
} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
spin_lock_irqsave(&base_crng.lock, flags);
/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
@@ -1311,9 +1313,9 @@ static void __cold try_to_generate_entropy(void)
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
- * executing by checking try_to_del_timer_sync(), before queueing the next one.
+ * executing by checking timer_delete_sync_try(), before queueing the next one.
*/
- if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
+ if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
struct cpumask timer_cpus;
unsigned int num_cpus;
@@ -1353,7 +1355,7 @@ static void __cold try_to_generate_entropy(void)
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
timer_delete_sync(&stack->timer);
- destroy_timer_on_stack(&stack->timer);
+ timer_destroy_on_stack(&stack->timer);
}
@@ -1466,7 +1468,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
if (!crng_ready()) {
if (!ratelimit_disable && maxwarn <= 0)
- ++urandom_warning.missed;
+ ratelimit_state_inc_miss(&urandom_warning);
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
--maxwarn;
pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index fe4f3a609934..dddd702b2454 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -234,5 +234,15 @@ config TCG_FTPM_TEE
help
This driver proxies for firmware TPM running in TEE.
+config TCG_SVSM
+ tristate "SNP SVSM vTPM interface"
+ depends on AMD_MEM_ENCRYPT
+ help
+ This is a driver for the AMD SVSM vTPM protocol that a SEV-SNP guest
+ OS can use to discover and talk to a vTPM emulated by the Secure VM
+ Service Module (SVSM) in the guest context, but at a more privileged
+ level (usually VMPL0). To compile this driver as a module, choose M
+ here; the module will be called tpm_svsm.
+
source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 2b004df8c04b..9de1b3ea34a9 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -45,3 +45,4 @@ obj-$(CONFIG_TCG_CRB) += tpm_crb.o
obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
+obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 12ee42a31c71..e7913b2853d5 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -257,11 +257,8 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
(unsigned char *)(v + sizeof(struct tcpa_event));
eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
- if (!eventname) {
- printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
- __func__);
- return -EFAULT;
- }
+ if (!eventname)
+ return -ENOMEM;
/* 1st: PCR */
seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index));
diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c
index 3169a87a56b6..4ead61f01299 100644
--- a/drivers/char/tpm/tpm_crb_ffa.c
+++ b/drivers/char/tpm/tpm_crb_ffa.c
@@ -38,9 +38,11 @@
* messages.
*
* All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
- * are using the AArch32 SMC calling convention with register usage as
- * defined in FF-A specification:
- * w0: Function ID (0x8400006F or 0x84000070)
+ * are using the AArch32 or AArch64 SMC calling convention with register usage
+ * as defined in FF-A specification:
+ * w0: Function ID
+ * -for 32-bit: 0x8400006F or 0x84000070
+ * -for 64-bit: 0xC400006F or 0xC4000070
* w1: Source/Destination IDs
* w2: Reserved (MBZ)
* w3-w7: Implementation defined, free to be used below
@@ -68,7 +70,8 @@
#define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001
/*
- * Return information on a given feature of the TPM service
+ * Notifies the TPM service that a TPM command or TPM locality request is
+ * ready to be processed, and allows the TPM service to process it.
* Call register usage:
* w3: Not used (MBZ)
* w4: TPM service function ID, CRB_FFA_START
@@ -105,7 +108,10 @@ struct tpm_crb_ffa {
u16 minor_version;
/* lock to protect sending of FF-A messages: */
struct mutex msg_data_lock;
- struct ffa_send_direct_data direct_msg_data;
+ union {
+ struct ffa_send_direct_data direct_msg_data;
+ struct ffa_send_direct_data2 direct_msg_data2;
+ };
};
static struct tpm_crb_ffa *tpm_crb_ffa;
@@ -185,18 +191,34 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops;
- memset(&tpm_crb_ffa->direct_msg_data, 0x00,
- sizeof(struct ffa_send_direct_data));
-
- tpm_crb_ffa->direct_msg_data.data1 = func_id;
- tpm_crb_ffa->direct_msg_data.data2 = a0;
- tpm_crb_ffa->direct_msg_data.data3 = a1;
- tpm_crb_ffa->direct_msg_data.data4 = a2;
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ memset(&tpm_crb_ffa->direct_msg_data2, 0x00,
+ sizeof(struct ffa_send_direct_data2));
+
+ tpm_crb_ffa->direct_msg_data2.data[0] = func_id;
+ tpm_crb_ffa->direct_msg_data2.data[1] = a0;
+ tpm_crb_ffa->direct_msg_data2.data[2] = a1;
+ tpm_crb_ffa->direct_msg_data2.data[3] = a2;
+
+ ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data2);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]);
+ } else {
+ memset(&tpm_crb_ffa->direct_msg_data, 0x00,
+ sizeof(struct ffa_send_direct_data));
+
+ tpm_crb_ffa->direct_msg_data.data1 = func_id;
+ tpm_crb_ffa->direct_msg_data.data2 = a0;
+ tpm_crb_ffa->direct_msg_data.data3 = a1;
+ tpm_crb_ffa->direct_msg_data.data4 = a2;
+
+ ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
+ }
- ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
- &tpm_crb_ffa->direct_msg_data);
- if (!ret)
- ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
return ret;
}
@@ -231,8 +253,13 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
if (!rc) {
- *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
- *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ } else {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ }
}
return rc;
@@ -277,8 +304,9 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure
- if (!ffa_partition_supports_direct_recv(ffa_dev)) {
- pr_err("TPM partition doesn't support direct message receive.\n");
+ if (!ffa_partition_supports_direct_recv(ffa_dev) &&
+ !ffa_partition_supports_direct_req2_recv(ffa_dev)) {
+ dev_warn(&ffa_dev->dev, "partition doesn't support direct message receive.\n");
return -EINVAL;
}
@@ -299,17 +327,17 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version,
&tpm_crb_ffa->minor_version);
if (rc) {
- pr_err("failed to get crb interface version. rc:%d", rc);
+ dev_err(&ffa_dev->dev, "failed to get crb interface version. rc:%d\n", rc);
goto out;
}
- pr_info("ABI version %u.%u", tpm_crb_ffa->major_version,
+ dev_info(&ffa_dev->dev, "ABI version %u.%u\n", tpm_crb_ffa->major_version,
tpm_crb_ffa->minor_version);
if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR ||
(tpm_crb_ffa->minor_version > 0 &&
tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) {
- pr_err("Incompatible ABI version");
+ dev_warn(&ffa_dev->dev, "Incompatible ABI version\n");
goto out;
}
diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c
new file mode 100644
index 000000000000..4280edf427d6
--- /dev/null
+++ b/drivers/char/tpm/tpm_svsm.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ *
+ * Driver for the vTPM defined by the AMD SVSM spec [1].
+ *
+ * The specification defines a protocol that a SEV-SNP guest OS can use to
+ * discover and talk to a vTPM emulated by the Secure VM Service Module (SVSM)
+ * in the guest context, but at a more privileged level (usually VMPL0).
+ *
+ * [1] "Secure VM Service Module for SEV-SNP Guests"
+ * Publication # 58019 Revision: 1.00
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/tpm_svsm.h>
+
+#include <asm/sev.h>
+
+#include "tpm.h"
+
+struct tpm_svsm_priv {
+ void *buffer;
+};
+
+static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+ int ret;
+
+ ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, len);
+ if (ret)
+ return ret;
+
+ /*
+ * The SVSM call uses the same buffer for the command and for the
+ * response, so after this call, the buffer will contain the response
+ * that can be used by .recv() op.
+ */
+ return snp_svsm_vtpm_send_command(priv->buffer);
+}
+
+static int tpm_svsm_recv(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+
+ /*
+ * The internal buffer contains the response after we send the command
+ * to SVSM.
+ */
+ return svsm_vtpm_cmd_response_parse(priv->buffer, buf, len);
+}
+
+static struct tpm_class_ops tpm_chip_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = tpm_svsm_recv,
+ .send = tpm_svsm_send,
+};
+
+static int __init tpm_svsm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tpm_svsm_priv *priv;
+ struct tpm_chip *chip;
+ int err;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * The maximum buffer supported is one page (see SVSM_VTPM_MAX_BUFFER
+ * in tpm_svsm.h).
+ */
+ priv->buffer = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!priv->buffer)
+ return -ENOMEM;
+
+ chip = tpmm_chip_alloc(dev, &tpm_chip_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ err = tpm2_probe(chip);
+ if (err)
+ return err;
+
+ err = tpm_chip_register(chip);
+ if (err)
+ return err;
+
+ dev_info(dev, "SNP SVSM vTPM %s device\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2");
+
+ return 0;
+}
+
+static void __exit tpm_svsm_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = platform_get_drvdata(pdev);
+
+ tpm_chip_unregister(chip);
+}
+
+/*
+ * tpm_svsm_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound
+ * at runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver tpm_svsm_driver __refdata = {
+ .remove = __exit_p(tpm_svsm_remove),
+ .driver = {
+ .name = "tpm-svsm",
+ },
+};
+
+module_platform_driver_probe(tpm_svsm_driver, tpm_svsm_probe);
+
+MODULE_DESCRIPTION("SNP SVSM vTPM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:tpm-svsm");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 713573b6c86c..19c1ed280fd7 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -517,6 +517,7 @@ source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
source "drivers/clk/socfpga/Kconfig"
source "drivers/clk/sophgo/Kconfig"
+source "drivers/clk/spacemit/Kconfig"
source "drivers/clk/sprd/Kconfig"
source "drivers/clk/starfive/Kconfig"
source "drivers/clk/sunxi/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index bf4bd45adc3a..42867cd37c33 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -145,6 +145,7 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-y += socfpga/
obj-y += sophgo/
+obj-y += spacemit/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 2b0ea882f1e4..0171e6b2bfca 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -53,24 +53,6 @@ static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
}
-/*
- * Build a scaled divider value as close as possible to the
- * given whole part (div_value) and fractional part (expressed
- * in billionths).
- */
-u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
-{
- u64 combined;
-
- BUG_ON(!div_value);
- BUG_ON(billionths >= BILLION);
-
- combined = (u64)div_value * BILLION + billionths;
- combined <<= div->u.s.frac_width;
-
- return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
-}
-
/* The scaled minimum divisor representable by a divider */
static inline u64
scaled_div_min(struct bcm_clk_div *div)
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index e09655024ac2..348a3454ce40 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -492,8 +492,6 @@ extern struct clk_ops kona_peri_clk_ops;
/* Externally visible functions */
extern u64 scaled_div_max(struct bcm_clk_div *div);
-extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
- u32 billionths);
extern void __init kona_dt_ccu_setup(struct ccu_data *ccu,
struct device_node *node);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 0e1fe3759530..8e4fde03ed23 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -286,6 +286,8 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
init.name = devm_kasprintf(rpi->dev, GFP_KERNEL,
"fw-clk-%s",
rpi_firmware_clk_names[id]);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
init.ops = &raspberrypi_firmware_clk_ops;
init.flags = CLK_GET_RATE_NOCACHE;
@@ -480,4 +482,3 @@ module_platform_driver(raspberrypi_clk_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
MODULE_DESCRIPTION("Raspberry Pi firmware clock driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:raspberrypi-clk");
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 6807a2efa93b..bfb6bbdc036c 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -763,13 +763,14 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
return PTR_ERR(clk);
}
- child = of_get_child_by_name(node, "pllout");
- if (of_device_is_available(child))
+ child = of_get_available_child_by_name(node, "pllout");
+ if (child) {
of_clk_add_provider(child, of_clk_src_simple_get, clk);
- of_node_put(child);
+ of_node_put(child);
+ }
- child = of_get_child_by_name(node, "sysclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "sysclk");
+ if (child) {
struct clk_onecell_data *clk_data;
struct clk **clks;
int n_clks = max_sysclk_id + 1;
@@ -803,11 +804,11 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
clks[(*div_info)->id] = clk;
}
of_clk_add_provider(child, of_clk_src_onecell_get, clk_data);
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "auxclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "auxclk");
+ if (child) {
char child_name[MAX_NAME_SIZE];
snprintf(child_name, MAX_NAME_SIZE, "%s_auxclk", info->name);
@@ -818,11 +819,12 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
child_name, PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "obsclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "obsclk");
+ if (child) {
if (obsclk_info)
clk = davinci_pll_obsclk_register(dev, obsclk_info, base);
else
@@ -833,8 +835,8 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ of_node_put(child);
}
- of_node_put(child);
return 0;
}
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index be2e3a5f8336..ff003dc5ab20 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -55,7 +55,7 @@ config COMMON_CLK_MESON_CPU_DYNDIV
config COMMON_CLK_MESON8B
bool "Meson8 SoC Clock controller support"
depends on ARM
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
@@ -70,7 +70,7 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
tristate "GXBB and GXL SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_VID_PLL_DIV
@@ -86,7 +86,7 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
tristate "AXG SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -136,7 +136,7 @@ config COMMON_CLK_A1_PERIPHERALS
config COMMON_CLK_C3_PLL
tristate "Amlogic C3 PLL clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_CLKC_UTILS
@@ -149,7 +149,7 @@ config COMMON_CLK_C3_PLL
config COMMON_CLK_C3_PERIPHERALS
tristate "Amlogic C3 peripherals clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_CLKC_UTILS
@@ -163,7 +163,7 @@ config COMMON_CLK_C3_PERIPHERALS
config COMMON_CLK_G12A
tristate "G12 and SM1 SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -181,7 +181,7 @@ config COMMON_CLK_G12A
config COMMON_CLK_S4_PLL
tristate "S4 SoC PLL clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
@@ -194,7 +194,7 @@ config COMMON_CLK_S4_PLL
config COMMON_CLK_S4_PERIPHERALS
tristate "S4 SoC peripherals clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index ceabebb1863d..d9e546e006d7 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -4093,6 +4093,7 @@ static const struct clk_parent_data spicc_sclk_parent_data[] = {
{ .hw = &g12a_clk81.hw },
{ .hw = &g12a_fclk_div4.hw },
{ .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div2.hw },
{ .hw = &g12a_fclk_div5.hw },
{ .hw = &g12a_fclk_div7.hw },
};
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index 76ece6c4a969..3ba01622d8f0 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -111,7 +111,11 @@ static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev)
* driver, there seems to be no better place to do this. So do it here!
*/
cpu_dev = get_cpu_device(0);
- dev_pm_domain_attach(cpu_dev, true);
+ ret = dev_pm_domain_attach(cpu_dev, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "can't get PM domain: %d\n", ret);
+ goto err;
+ }
return 0;
diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c
index 11bd2e234811..50e5a131261b 100644
--- a/drivers/clk/qcom/camcc-sa8775p.c
+++ b/drivers/clk/qcom/camcc-sa8775p.c
@@ -10,7 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <dt-bindings/clock/qcom,sa8775p-camcc.h>
+#include <dt-bindings/clock/qcom,qcs8300-camcc.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -1681,6 +1681,24 @@ static struct clk_branch cam_cc_sm_obs_clk = {
},
};
+static struct clk_branch cam_cc_titan_top_accu_shift_clk = {
+ .halt_reg = 0x131f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x131f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_titan_top_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc cam_cc_titan_top_gdsc = {
.gdscr = 0x131bc,
.en_rest_wait_val = 0x2,
@@ -1775,6 +1793,7 @@ static struct clk_regmap *cam_cc_sa8775p_clocks[] = {
[CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
[CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr,
+ [CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = NULL,
[CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
[CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
};
@@ -1811,6 +1830,7 @@ static const struct qcom_cc_desc cam_cc_sa8775p_desc = {
};
static const struct of_device_id cam_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-camcc" },
{ .compatible = "qcom,sa8775p-camcc" },
{ }
};
@@ -1841,10 +1861,83 @@ static int cam_cc_sa8775p_probe(struct platform_device *pdev)
clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- /* Keep some clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
- qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ if (device_is_compatible(&pdev->dev, "qcom,qcs8300-camcc")) {
+ cam_cc_camnoc_axi_clk_src.cmd_rcgr = 0x13154;
+ cam_cc_camnoc_axi_clk.halt_reg = 0x1316c;
+ cam_cc_camnoc_axi_clk.clkr.enable_reg = 0x1316c;
+ cam_cc_camnoc_dcd_xo_clk.halt_reg = 0x13174;
+ cam_cc_camnoc_dcd_xo_clk.clkr.enable_reg = 0x13174;
+
+ cam_cc_csi0phytimer_clk_src.cmd_rcgr = 0x15054;
+ cam_cc_csi1phytimer_clk_src.cmd_rcgr = 0x15078;
+ cam_cc_csi2phytimer_clk_src.cmd_rcgr = 0x15098;
+ cam_cc_csid_clk_src.cmd_rcgr = 0x13134;
+
+ cam_cc_mclk0_clk_src.cmd_rcgr = 0x15000;
+ cam_cc_mclk1_clk_src.cmd_rcgr = 0x1501c;
+ cam_cc_mclk2_clk_src.cmd_rcgr = 0x15038;
+
+ cam_cc_fast_ahb_clk_src.cmd_rcgr = 0x13104;
+ cam_cc_slow_ahb_clk_src.cmd_rcgr = 0x1311c;
+ cam_cc_xo_clk_src.cmd_rcgr = 0x131b8;
+ cam_cc_sleep_clk_src.cmd_rcgr = 0x131d4;
+
+ cam_cc_core_ahb_clk.halt_reg = 0x131b4;
+ cam_cc_core_ahb_clk.clkr.enable_reg = 0x131b4;
+
+ cam_cc_cpas_ahb_clk.halt_reg = 0x130f4;
+ cam_cc_cpas_ahb_clk.clkr.enable_reg = 0x130f4;
+ cam_cc_cpas_fast_ahb_clk.halt_reg = 0x130fc;
+ cam_cc_cpas_fast_ahb_clk.clkr.enable_reg = 0x130fc;
+
+ cam_cc_csi0phytimer_clk.halt_reg = 0x1506c;
+ cam_cc_csi0phytimer_clk.clkr.enable_reg = 0x1506c;
+ cam_cc_csi1phytimer_clk.halt_reg = 0x15090;
+ cam_cc_csi1phytimer_clk.clkr.enable_reg = 0x15090;
+ cam_cc_csi2phytimer_clk.halt_reg = 0x150b0;
+ cam_cc_csi2phytimer_clk.clkr.enable_reg = 0x150b0;
+ cam_cc_csid_clk.halt_reg = 0x1314c;
+ cam_cc_csid_clk.clkr.enable_reg = 0x1314c;
+ cam_cc_csid_csiphy_rx_clk.halt_reg = 0x15074;
+ cam_cc_csid_csiphy_rx_clk.clkr.enable_reg = 0x15074;
+ cam_cc_csiphy0_clk.halt_reg = 0x15070;
+ cam_cc_csiphy0_clk.clkr.enable_reg = 0x15070;
+ cam_cc_csiphy1_clk.halt_reg = 0x15094;
+ cam_cc_csiphy1_clk.clkr.enable_reg = 0x15094;
+ cam_cc_csiphy2_clk.halt_reg = 0x150b4;
+ cam_cc_csiphy2_clk.clkr.enable_reg = 0x150b4;
+
+ cam_cc_mclk0_clk.halt_reg = 0x15018;
+ cam_cc_mclk0_clk.clkr.enable_reg = 0x15018;
+ cam_cc_mclk1_clk.halt_reg = 0x15034;
+ cam_cc_mclk1_clk.clkr.enable_reg = 0x15034;
+ cam_cc_mclk2_clk.halt_reg = 0x15050;
+ cam_cc_mclk2_clk.clkr.enable_reg = 0x15050;
+ cam_cc_qdss_debug_xo_clk.halt_reg = 0x1319c;
+ cam_cc_qdss_debug_xo_clk.clkr.enable_reg = 0x1319c;
+
+ cam_cc_titan_top_gdsc.gdscr = 0x131a0;
+
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSIPHY3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] =
+ &cam_cc_titan_top_accu_shift_clk.clkr;
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13178); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131d0); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_SLEEP_CLK */
+ } else {
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ }
ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap);
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
index 1871970fb046..8aac97d29ce3 100644
--- a/drivers/clk/qcom/camcc-sm6350.c
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -1695,6 +1695,9 @@ static struct clk_branch camcc_sys_tmr_clk = {
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "bps_gdsc",
},
@@ -1704,6 +1707,9 @@ static struct gdsc bps_gdsc = {
static struct gdsc ipe_0_gdsc = {
.gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ipe_0_gdsc",
},
@@ -1713,6 +1719,9 @@ static struct gdsc ipe_0_gdsc = {
static struct gdsc ife_0_gdsc = {
.gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_0_gdsc",
},
@@ -1721,6 +1730,9 @@ static struct gdsc ife_0_gdsc = {
static struct gdsc ife_1_gdsc = {
.gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_1_gdsc",
},
@@ -1729,6 +1741,9 @@ static struct gdsc ife_1_gdsc = {
static struct gdsc ife_2_gdsc = {
.gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_2_gdsc",
},
@@ -1737,6 +1752,9 @@ static struct gdsc ife_2_gdsc = {
static struct gdsc titan_top_gdsc = {
.gdscr = 0x14004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "titan_top_gdsc",
},
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index c7675930fde1..00fb3e53a388 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -66,6 +66,8 @@ struct clk_rpmh {
struct clk_rpmh_desc {
struct clk_hw **clks;
size_t num_clks;
+ /* RPMh clock clkaN are optional for this platform */
+ bool clka_optional;
};
static DEFINE_MUTEX(rpmh_clk_lock);
@@ -648,6 +650,7 @@ static struct clk_hw *sm8550_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8550 = {
.clks = sm8550_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8550_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sm8650_rpmh_clocks[] = {
@@ -679,6 +682,7 @@ static struct clk_hw *sm8650_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8650 = {
.clks = sm8650_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8650_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sc7280_rpmh_clocks[] = {
@@ -847,6 +851,7 @@ static struct clk_hw *sm8750_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
.clks = sm8750_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8750_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
@@ -890,6 +895,12 @@ static int clk_rpmh_probe(struct platform_device *pdev)
rpmh_clk = to_clk_rpmh(hw_clks[i]);
res_addr = cmd_db_read_addr(rpmh_clk->res_name);
if (!res_addr) {
+ hw_clks[i] = NULL;
+
+ if (desc->clka_optional &&
+ !strncmp(rpmh_clk->res_name, "clka", sizeof("clka") - 1))
+ continue;
+
dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
rpmh_clk->res_name);
return -ENODEV;
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index e703ecf00e44..b0bd163a449c 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -681,6 +681,9 @@ static struct clk_branch disp_cc_xo_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x1004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 7431c9a65044..45193b3d714b 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -432,7 +432,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
{ P_GPLL1_AUX, 2 },
- { P_GPLL6, 2 },
+ { P_GPLL6, 3 },
{ P_SLEEP_CLK, 6 },
};
@@ -1113,7 +1113,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
};
static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
- F(24000000, P_GPLL0, 1, 1, 45),
+ F(24000000, P_GPLL6, 1, 1, 45),
F(66670000, P_GPLL0, 12, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 74346dc02606..a4d6dff9d0f7 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -2320,6 +2320,9 @@ static struct clk_branch gcc_video_xo_clk = {
static struct gdsc usb30_prim_gdsc = {
.gdscr = 0x1a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "usb30_prim_gdsc",
},
@@ -2328,6 +2331,9 @@ static struct gdsc usb30_prim_gdsc = {
static struct gdsc ufs_phy_gdsc = {
.gdscr = 0x3a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ufs_phy_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index fa1672c4e7d8..24f98062b9dd 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3817,7 +3817,9 @@ static int gcc_sm8650_probe(struct platform_device *pdev)
qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_VIDEO_AHB_CLK */
qcom_branch_set_clk_en(regmap, 0x32030); /* GCC_VIDEO_XO_CLK */
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52150, 0x0);
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
index b36d70976095..8092dd6b37b5 100644
--- a/drivers/clk/qcom/gcc-sm8750.c
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -3244,8 +3244,9 @@ static int gcc_sm8750_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x52010, BIT(20), BIT(20));
regmap_update_bits(regmap, 0x52010, BIT(21), BIT(21));
- /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
return qcom_cc_really_probe(&pdev->dev, &gcc_sm8750_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 009f39139b64..3e44757e25d3 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -6753,6 +6753,10 @@ static int gcc_x1e80100_probe(struct platform_device *pdev)
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52224, 0x0);
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
+
return qcom_cc_really_probe(&pdev->dev, &gcc_x1e80100_desc, regmap);
}
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
index 35ed0500bc59..ee89c42413f8 100644
--- a/drivers/clk/qcom/gpucc-sm6350.c
+++ b/drivers/clk/qcom/gpucc-sm6350.c
@@ -413,6 +413,9 @@ static struct clk_branch gpu_cc_gx_vsense_clk = {
static struct gdsc gpu_cx_gdsc = {
.gdscr = 0x106c,
.gds_hw_ctrl = 0x1540,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
.pd = {
.name = "gpu_cx_gdsc",
},
@@ -423,6 +426,9 @@ static struct gdsc gpu_cx_gdsc = {
static struct gdsc gpu_gx_gdsc = {
.gdscr = 0x100c,
.clamp_io_ctrl = 0x1508,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
.pd = {
.name = "gpu_gx_gdsc",
.power_on = gdsc_gx_do_nothing_enable,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 5a4bc3f94d49..50c20119d12a 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -41,6 +41,7 @@ config CLK_RENESAS
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
select CLK_R9A09G047 if ARCH_R9A09G047
+ select CLK_R9A09G056 if ARCH_R9A09G056
select CLK_R9A09G057 if ARCH_R9A09G057
select CLK_SH73A0 if ARCH_SH73A0
@@ -199,6 +200,10 @@ config CLK_R9A09G047
bool "RZ/G3E clock support" if COMPILE_TEST
select CLK_RZV2H
+config CLK_R9A09G056
+ bool "RZ/V2N clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 2d6e746939c4..f9075bca6e95 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
+obj-$(CONFIG_CLK_R9A09G056) += r9a09g056-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
index e9cf4342d0cf..21699999cedd 100644
--- a/drivers/clk/renesas/r9a09g047-cpg.c
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G047_IOTOP_0_SHCLK,
+ LAST_DT_CORE_CLK = R9A09G047_GBETH_1_CLK_PTP_REF_I,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -31,7 +31,14 @@ enum clk_ids {
CLK_PLLVDO,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
@@ -41,6 +48,7 @@ enum clk_ids {
CLK_PLLDTY_ACPU_DIV4,
CLK_PLLDTY_DIV16,
CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_GPU,
/* Module Clocks */
MOD_CLK_BASE,
@@ -60,6 +68,14 @@ static const struct clk_div_table dtable_2_4[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -69,6 +85,10 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+/* Mux clock tables */
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
@@ -79,12 +99,21 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
- DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
@@ -96,6 +125,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_gpu", CLK_PLLVDO_GPU, CLK_PLLVDO, CDDIV3_DIVCTL1, dtable_2_64),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
@@ -108,6 +138,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G047_CA55_0_CORECLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("spi_clk_spi", R9A09G047_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2),
};
static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
@@ -153,6 +184,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(10, BIT(14))),
DEF_MOD("canfd_0_clkc", CLK_PLLCLN_DIV20, 9, 14, 4, 30,
BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD_NO_PM("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
BUS_MSTOP(8, BIT(2))),
DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
@@ -183,6 +220,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(4))),
DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("ge3d_clk", CLK_PLLVDO_GPU, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
DEF_MOD("tsu_1_pclk", CLK_QEXTAL, 16, 10, 8, 10,
BUS_MSTOP(2, BIT(15))),
};
@@ -207,12 +250,17 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
DEF_RST(10, 1, 4, 18), /* CANFD_0_RSTP_N */
DEF_RST(10, 2, 4, 19), /* CANFD_0_RSTC_N */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GE3D_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GE3D_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GE3D_ACE_RESETN */
DEF_RST(15, 8, 7, 9), /* TSU_1_PRESETN */
};
diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c
new file mode 100644
index 000000000000..e2712a25c43a
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g056-cpg.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2N CPG driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g056-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G056_GBETH_1_CLK_PTP_REF_I,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV2,
+ CLK_PLLCLN_DIV8,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
+ DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G056_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G056_CA55_0_CORE_CLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G056_CA55_0_CORE_CLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G056_CA55_0_CORE_CLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G056_CA55_0_CORE_CLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
+ DEF_FIXED("iotop_0_shclk", R9A09G056_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+};
+
+static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
+ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
+ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+};
+
+const struct rzv2h_cpg_info r9a09g056_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g056_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g056_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g056_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g056_mod_clks),
+ .num_hw_mod_clks = 25 * 16,
+
+ /* Resets */
+ .resets = r9a09g056_resets,
+ .num_resets = ARRAY_SIZE(r9a09g056_resets),
+
+ .num_mstop_bits = 192,
+};
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index d63eafbca780..3c40e36259fe 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G057_IOTOP_0_SHCLK,
+ LAST_DT_CORE_CLK = R9A09G057_GBETH_1_CLK_PTP_REF_I,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -29,6 +29,7 @@ enum clk_ids {
CLK_PLLDTY,
CLK_PLLCA55,
CLK_PLLVDO,
+ CLK_PLLGPU,
/* Internal Core Clocks */
CLK_PLLCM33_DIV4,
@@ -40,6 +41,7 @@ enum clk_ids {
CLK_PLLDTY_ACPU,
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
CLK_PLLDTY_DIV16,
CLK_PLLDTY_RCPU,
CLK_PLLDTY_RCPU_DIV4,
@@ -47,6 +49,7 @@ enum clk_ids {
CLK_PLLVDO_CRU1,
CLK_PLLVDO_CRU2,
CLK_PLLVDO_CRU3,
+ CLK_PLLGPU_GEAR,
/* Module Clocks */
MOD_CLK_BASE,
@@ -85,8 +88,9 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
- DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
+ DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
@@ -101,6 +105,7 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
@@ -110,6 +115,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
+ DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
+
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
DEF_DDIV("ca55_0_coreclk0", R9A09G057_CA55_0_CORE_CLK0, CLK_PLLCA55,
@@ -121,6 +128,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G057_CA55_0_CORE_CLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G057_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb2_0_clk_core1", R9A09G057_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
@@ -214,6 +223,16 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20,
+ BUS_MSTOP(7, BIT(8))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23,
+ BUS_MSTOP(7, BIT(11))),
DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
BUS_MSTOP(9, BIT(4))),
DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
@@ -238,6 +257,12 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(7))),
DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
@@ -275,6 +300,10 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
@@ -287,6 +316,9 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index da021ee446ec..71431970d6e6 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -27,6 +27,7 @@
#include <linux/psci.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -204,7 +205,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
spin_lock_irqsave(&priv->rmw_lock, flags);
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index b91dfbfb01e3..a8628f64a03b 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -27,6 +27,7 @@
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/units.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -1217,7 +1218,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
}
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
value = bitmask << 16;
if (enable)
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index 2b9771ab2b3f..bcc496e8cbcd 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -25,6 +25,7 @@
#include <linux/pm_domain.h>
#include <linux/refcount.h>
#include <linux/reset-controller.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -44,10 +45,18 @@
#define CPG_BUS_1_MSTOP (0xd00)
#define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
-#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
-#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
-#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
-#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
+#define CPG_PLL_STBY(x) ((x))
+#define CPG_PLL_STBY_RESETB BIT(0)
+#define CPG_PLL_STBY_RESETB_WEN BIT(16)
+#define CPG_PLL_CLK1(x) ((x) + 0x004)
+#define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x)))
+#define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x))
+#define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x))
+#define CPG_PLL_CLK2(x) ((x) + 0x008)
+#define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x))
+#define CPG_PLL_MON(x) ((x) + 0x010)
+#define CPG_PLL_MON_RESETB BIT(0)
+#define CPG_PLL_MON_LOCK BIT(4)
#define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
@@ -94,8 +103,7 @@ struct pll_clk {
struct rzv2h_cpg_priv *priv;
void __iomem *base;
struct clk_hw hw;
- unsigned int conf;
- unsigned int type;
+ struct pll pll;
};
#define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
@@ -110,7 +118,7 @@ struct pll_clk {
* @on_index: register offset
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
- * @mon_bit: montor bit
+ * @mon_bit: monitor bit
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
@@ -140,27 +148,78 @@ struct ddiv_clk {
#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
+static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset));
+
+ /* Ensure both RESETB and LOCK bits are set */
+ return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK);
+}
+
+static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
+ u32 stby_offset;
+ u32 mon_offset;
+ u32 val;
+ int ret;
+
+ if (rzv2h_cpg_pll_clk_is_enabled(hw))
+ return 0;
+
+ stby_offset = CPG_PLL_STBY(pll.offset);
+ mon_offset = CPG_PLL_MON(pll.offset);
+
+ writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
+ priv->base + stby_offset);
+
+ /*
+ * Ensure PLL enters into normal mode
+ *
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Since this latency might depend on external crystal or PLL rate,
+ * use a "super" safe timeout value.
+ */
+ ret = readl_poll_timeout_atomic(priv->base + mon_offset, val,
+ (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n",
+ stby_offset, hw->clk);
+
+ return ret;
+}
+
static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct pll_clk *pll_clk = to_pll(hw);
struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
unsigned int clk1, clk2;
u64 rate;
- if (!PLL_CLK_ACCESS(pll_clk->conf))
+ if (!pll.has_clkn)
return 0;
- clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
- clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
+ clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
+ clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
- rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
- 16 + SDIV(clk2));
+ rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) +
+ CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2));
- return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
+ return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1));
}
static const struct clk_ops rzv2h_cpg_pll_ops = {
+ .is_enabled = rzv2h_cpg_pll_clk_is_enabled,
+ .enable = rzv2h_cpg_pll_clk_enable,
.recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
};
@@ -193,10 +252,9 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
init.num_parents = 1;
pll_clk->hw.init = &init;
- pll_clk->conf = core->cfg.conf;
+ pll_clk->pll = core->cfg.pll;
pll_clk->base = base;
pll_clk->priv = priv;
- pll_clk->type = core->type;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret)
@@ -241,6 +299,9 @@ static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon
u32 bitmask = BIT(mon);
u32 val;
+ if (mon == CSDIV_NO_MON)
+ return 0;
+
return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
}
@@ -272,12 +333,6 @@ static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
writel(val, divider->reg);
ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
- if (ret)
- goto ddiv_timeout;
-
- spin_unlock_irqrestore(divider->lock, flags);
-
- return 0;
ddiv_timeout:
spin_unlock_irqrestore(divider->lock, flags);
@@ -320,7 +375,10 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return ERR_PTR(-ENOMEM);
init.name = core->name;
- init.ops = &rzv2h_ddiv_clk_divider_ops;
+ if (cfg_ddiv.no_rmw)
+ init.ops = &clk_divider_ops;
+ else
+ init.ops = &rzv2h_ddiv_clk_divider_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -342,6 +400,24 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return div->hw.clk;
}
+static struct clk * __init
+rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct smuxed mux = core->cfg.smux;
+ const struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag, priv->base + mux.offset,
+ mux.shift, mux.width,
+ core->mux_flags, &priv->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
static struct clk
*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
void *data)
@@ -426,6 +502,9 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_DDIV:
clk = rzv2h_cpg_ddiv_clk_register(core, priv);
break;
+ case CLK_TYPE_SMUX:
+ clk = rzv2h_cpg_mux_clk_register(core, priv);
+ break;
default:
goto fail;
}
@@ -494,11 +573,14 @@ static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
if (clock->mon_index >= 0) {
offset = GET_CLK_MON_OFFSET(clock->mon_index);
bitmask = BIT(clock->mon_bit);
- } else {
- offset = GET_CLK_ON_OFFSET(clock->on_index);
- bitmask = BIT(clock->on_bit);
+
+ if (!(readl(priv->base + offset) & bitmask))
+ return 0;
}
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+
return readl(priv->base + offset) & bitmask;
}
@@ -514,7 +596,7 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
if (enabled == enable)
return 0;
@@ -658,8 +740,8 @@ fail:
mod->name, PTR_ERR(clk));
}
-static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
@@ -667,35 +749,31 @@ static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
u8 monbit = priv->resets[id].mon_bit;
u32 value = mask << 16;
- dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
+ dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
+ assert ? "assert" : "deassert", id, reg);
+ if (!assert)
+ value |= mask;
writel(value, priv->base + reg);
reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
mask = BIT(monbit);
return readl_poll_timeout_atomic(priv->base + reg, value,
- value & mask, 10, 200);
+ assert ? (value & mask) : !(value & mask),
+ 10, 200);
+}
+
+static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return __rzv2h_cpg_assert(rcdev, id, true);
}
static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
- unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
- u32 mask = BIT(priv->resets[id].reset_bit);
- u8 monbit = priv->resets[id].mon_bit;
- u32 value = (mask << 16) | mask;
-
- dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
-
- writel(value, priv->base + reg);
-
- reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
- mask = BIT(monbit);
-
- return readl_poll_timeout_atomic(priv->base + reg, value,
- !(value & mask), 10, 200);
+ return __rzv2h_cpg_assert(rcdev, id, false);
}
static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
@@ -967,18 +1045,24 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
}
static const struct of_device_id rzv2h_cpg_match[] = {
-#ifdef CONFIG_CLK_R9A09G057
- {
- .compatible = "renesas,r9a09g057-cpg",
- .data = &r9a09g057_cpg_info,
- },
-#endif
#ifdef CONFIG_CLK_R9A09G047
{
.compatible = "renesas,r9a09g047-cpg",
.data = &r9a09g047_cpg_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G056
+ {
+ .compatible = "renesas,r9a09g056-cpg",
+ .data = &r9a09g056_cpg_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R9A09G057
+ {
+ .compatible = "renesas,r9a09g057-cpg",
+ .data = &r9a09g057_cpg_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 576a070763cb..9104b1cd276c 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -11,20 +11,51 @@
#include <linux/bitfield.h>
/**
+ * struct pll - Structure for PLL configuration
+ *
+ * @offset: STBY register offset
+ * @has_clkn: Flag to indicate if CLK1/2 are accessible or not
+ */
+struct pll {
+ unsigned int offset:9;
+ unsigned int has_clkn:1;
+};
+
+#define PLL_PACK(_offset, _has_clkn) \
+ ((struct pll){ \
+ .offset = _offset, \
+ .has_clkn = _has_clkn \
+ })
+
+#define PLLCA55 PLL_PACK(0x60, 1)
+#define PLLGPU PLL_PACK(0x120, 1)
+
+/**
* struct ddiv - Structure for dynamic switching divider
*
* @offset: register offset
* @shift: position of the divider bit
* @width: width of the divider
* @monbit: monitor bit in CPG_CLKSTATUS0 register
+ * @no_rmw: flag to indicate if the register is read-modify-write
+ * (1: no RMW, 0: RMW)
*/
struct ddiv {
unsigned int offset:11;
unsigned int shift:4;
unsigned int width:4;
unsigned int monbit:5;
+ unsigned int no_rmw:1;
};
+/*
+ * On RZ/V2H(P), the dynamic divider clock supports up to 19 monitor bits,
+ * while on RZ/G3E, it supports up to 16 monitor bits. Use the maximum value
+ * `0x1f` to indicate that monitor bits are not supported for static divider
+ * clocks.
+ */
+#define CSDIV_NO_MON (0x1f)
+
#define DDIV_PACK(_offset, _shift, _width, _monbit) \
((struct ddiv){ \
.offset = _offset, \
@@ -33,10 +64,41 @@ struct ddiv {
.monbit = _monbit \
})
+#define DDIV_PACK_NO_RMW(_offset, _shift, _width, _monbit) \
+ ((struct ddiv){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .monbit = (_monbit), \
+ .no_rmw = 1 \
+ })
+
+/**
+ * struct smuxed - Structure for static muxed clocks
+ *
+ * @offset: register offset
+ * @shift: position of the divider field
+ * @width: width of the divider field
+ */
+struct smuxed {
+ unsigned int offset:11;
+ unsigned int shift:4;
+ unsigned int width:4;
+};
+
+#define SMUX_PACK(_offset, _shift, _width) \
+ ((struct smuxed){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ })
+
+#define CPG_SSEL1 (0x304)
#define CPG_CDDIV0 (0x400)
#define CPG_CDDIV1 (0x404)
#define CPG_CDDIV3 (0x40C)
#define CPG_CDDIV4 (0x410)
+#define CPG_CSDIV0 (0x500)
#define CDDIV0_DIVCTL1 DDIV_PACK(CPG_CDDIV0, 4, 3, 1)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
@@ -44,12 +106,18 @@ struct ddiv {
#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV3_DIVCTL1 DDIV_PACK(CPG_CDDIV3, 4, 3, 13)
#define CDDIV3_DIVCTL2 DDIV_PACK(CPG_CDDIV3, 8, 3, 14)
#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
#define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16)
#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+#define CSDIV0_DIVCTL3 DDIV_PACK_NO_RMW(CPG_CSDIV0, 12, 2, CSDIV_NO_MON)
+
+#define SSEL1_SELCTL2 SMUX_PACK(CPG_SSEL1, 8, 1)
+#define SSEL1_SELCTL3 SMUX_PACK(CPG_SSEL1, 12, 1)
+
#define BUS_MSTOP_IDX_MASK GENMASK(31, 16)
#define BUS_MSTOP_BITS_MASK GENMASK(15, 0)
#define BUS_MSTOP(idx, mask) (FIELD_PREP_CONST(BUS_MSTOP_IDX_MASK, (idx)) | \
@@ -74,8 +142,13 @@ struct cpg_core_clk {
union {
unsigned int conf;
struct ddiv ddiv;
+ struct pll pll;
+ struct smuxed smux;
} cfg;
const struct clk_div_table *dtable;
+ const char * const *parent_names;
+ unsigned int num_parents;
+ u8 mux_flags;
u32 flag;
};
@@ -85,20 +158,15 @@ enum clk_types {
CLK_TYPE_FF, /* Fixed Factor Clock */
CLK_TYPE_PLL,
CLK_TYPE_DDIV, /* Dynamic Switching Divider */
+ CLK_TYPE_SMUX, /* Static Mux */
};
-/* BIT(31) indicates if CLK1/2 are accessible or not */
-#define PLL_CONF(n) (BIT(31) | ((n) & ~GENMASK(31, 16)))
-#define PLL_CLK_ACCESS(n) ((n) & BIT(31) ? 1 : 0)
-#define PLL_CLK1_OFFSET(n) ((n) & ~GENMASK(31, 16))
-#define PLL_CLK2_OFFSET(n) (((n) & ~GENMASK(31, 16)) + (0x4))
-
#define DEF_TYPE(_name, _id, _type...) \
{ .name = _name, .id = _id, .type = _type }
#define DEF_BASE(_name, _id, _type, _parent...) \
DEF_TYPE(_name, _id, _type, .parent = _parent)
-#define DEF_PLL(_name, _id, _parent, _conf) \
- DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.conf = _conf)
+#define DEF_PLL(_name, _id, _parent, _pll_packed) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.pll = _pll_packed)
#define DEF_INPUT(_name, _id) \
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
@@ -109,6 +177,15 @@ enum clk_types {
.parent = _parent, \
.dtable = _dtable, \
.flag = CLK_DIVIDER_HIWORD_MASK)
+#define DEF_CSDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
+ DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable)
+#define DEF_SMUX(_name, _id, _smux_packed, _parent_names) \
+ DEF_TYPE(_name, _id, CLK_TYPE_SMUX, \
+ .cfg.smux = _smux_packed, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .flag = CLK_SET_RATE_PARENT, \
+ .mux_flags = CLK_MUX_HIWORD_MASK)
/**
* struct rzv2h_mod_clk - Module Clocks definitions
@@ -221,6 +298,7 @@ struct rzv2h_cpg_info {
};
extern const struct rzv2h_cpg_info r9a09g047_cpg_info;
+extern const struct rzv2h_cpg_info r9a09g056_cpg_info;
extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index e8ece20aebfd..c281a9738d9f 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_COMMON_CLK_ROCKCHIP) += clk-rockchip.o
clk-rockchip-y += clk.o
clk-rockchip-y += clk-pll.o
clk-rockchip-y += clk-cpu.o
+clk-rockchip-y += clk-gate-grf.o
clk-rockchip-y += clk-half-divider.o
clk-rockchip-y += clk-inverter.o
clk-rockchip-y += clk-mmc-phase.o
diff --git a/drivers/clk/rockchip/clk-gate-grf.c b/drivers/clk/rockchip/clk-gate-grf.c
new file mode 100644
index 000000000000..8122f471f391
--- /dev/null
+++ b/drivers/clk/rockchip/clk-gate-grf.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025 Collabora Ltd.
+ * Author: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ *
+ * Certain clocks on Rockchip are "gated" behind an additional register bit
+ * write in a GRF register, such as the SAI MCLKs on RK3576. This code
+ * implements a clock driver for these types of gates, based on regmaps.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+struct rockchip_gate_grf {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int reg;
+ unsigned int shift;
+ u8 flags;
+};
+
+#define to_gate_grf(_hw) container_of(_hw, struct rockchip_gate_grf, hw)
+
+static int rockchip_gate_grf_enable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? BIT(gate->shift) : 0;
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+ int ret;
+
+ ret = regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+
+ return ret;
+}
+
+static void rockchip_gate_grf_disable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : BIT(gate->shift);
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+
+ regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+}
+
+static int rockchip_gate_grf_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ bool invert = !!(gate->flags & CLK_GATE_SET_TO_DISABLE);
+ int ret;
+
+ ret = regmap_test_bits(gate->regmap, gate->reg, BIT(gate->shift));
+ if (ret < 0)
+ ret = 0;
+
+ return invert ? 1 - ret : ret;
+
+}
+
+static const struct clk_ops rockchip_gate_grf_ops = {
+ .enable = rockchip_gate_grf_enable,
+ .disable = rockchip_gate_grf_disable,
+ .is_enabled = rockchip_gate_grf_is_enabled,
+};
+
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg, unsigned int shift,
+ u8 gate_flags)
+{
+ struct rockchip_gate_grf *gate;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ if (IS_ERR(regmap)) {
+ pr_err("%s: regmap not available\n", __func__);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags;
+ init.num_parents = parent_name ? 1 : 0;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.ops = &rockchip_gate_grf_ops;
+
+ gate->hw.init = &init;
+ gate->regmap = regmap;
+ gate->reg = reg;
+ gate->shift = shift;
+ gate->flags = gate_flags;
+
+ clk = clk_register(NULL, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 91012078681b..b3ed8e7523e5 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -9,11 +9,14 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/regmap.h>
#include "clk.h"
struct rockchip_mmc_clock {
struct clk_hw hw;
void __iomem *reg;
+ struct regmap *grf;
+ int grf_reg;
int shift;
int cached_phase;
struct notifier_block clk_rate_change_nb;
@@ -54,7 +57,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
if (!rate)
return 0;
- raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+ if (mmc_clock->grf)
+ regmap_read(mmc_clock->grf, mmc_clock->grf_reg, &raw_value);
+ else
+ raw_value = readl(mmc_clock->reg);
+
+ raw_value >>= mmc_clock->shift;
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -134,8 +142,12 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
raw_value |= nineties;
- writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift),
- mmc_clock->reg);
+ raw_value = HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift);
+
+ if (mmc_clock->grf)
+ regmap_write(mmc_clock->grf, mmc_clock->grf_reg, raw_value);
+ else
+ writel(raw_value, mmc_clock->reg);
pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
clk_hw_get_name(hw), degrees, delay_num,
@@ -189,7 +201,9 @@ static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift)
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift)
{
struct clk_init_data init;
struct rockchip_mmc_clock *mmc_clock;
@@ -208,6 +222,8 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->hw.init = &init;
mmc_clock->reg = reg;
+ mmc_clock->grf = grf;
+ mmc_clock->grf_reg = grf_reg;
mmc_clock->shift = shift;
clk = clk_register(NULL, &mmc_clock->hw);
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 2c2abb3b4210..af74439a7457 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -1027,16 +1027,6 @@ static int rockchip_rk3588_pll_is_enabled(struct clk_hw *hw)
return !(pllcon & RK3588_PLLCON1_PWRDOWN);
}
-static int rockchip_rk3588_pll_init(struct clk_hw *hw)
-{
- struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
-
- if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
- return 0;
-
- return 0;
-}
-
static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
.recalc_rate = rockchip_rk3588_pll_recalc_rate,
.enable = rockchip_rk3588_pll_enable,
@@ -1051,7 +1041,6 @@ static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
.enable = rockchip_rk3588_pll_enable,
.disable = rockchip_rk3588_pll_disable,
.is_enabled = rockchip_rk3588_pll_is_enabled,
- .init = rockchip_rk3588_pll_init,
};
/*
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index d341ce0708aa..df9330958c83 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -123,6 +123,7 @@ PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
PNAME(mux_pll_src_dmyapll_dpll_gpll_xin24_p) = { "dummy_apll", "dpll", "gpll", "xin24m" };
+PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" };
PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" };
@@ -423,6 +424,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
+
+ MUX(SCLK_USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ RK2928_MISC_CON, 15, 1, MFLAGS),
};
static const char *const rk3036_critical_clocks[] __initconst = {
@@ -431,6 +435,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_peri",
"pclk_ddrupctl",
+ "ddrphy",
};
static void __init rk3036_clk_init(struct device_node *np)
@@ -438,7 +443,6 @@ static void __init rk3036_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -462,11 +466,6 @@ static void __init rk3036_clk_init(struct device_node *np)
return;
}
- clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock usb480m: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_plls(ctx, rk3036_pll_clks,
ARRAY_SIZE(rk3036_pll_clks),
RK3036_GRF_SOC_STATUS0);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 90d329216064..0a1e017df7c6 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -418,7 +418,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 11, GFLAGS),
MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
- RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
+ RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS, grf_type_sys),
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index cf60fcf2fa5c..cd5f65b6cdf5 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -677,9 +677,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3328_CLKGATE_CON(3), 5, GFLAGS),
MUXGRF(SCLK_MAC2IO, "clk_mac2io", mux_mac2io_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON1, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON1, 10, 1, MFLAGS, grf_type_sys),
MUXGRF(SCLK_MAC2IO_EXT, "clk_mac2io_ext", mux_mac2io_ext_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_SOC_CON4, 14, 1, MFLAGS),
+ RK3328_GRF_SOC_CON4, 14, 1, MFLAGS, grf_type_sys),
COMPOSITE(SCLK_MAC2PHY_SRC, "clk_mac2phy_src", mux_2plls_p, 0,
RK3328_CLKSEL_CON(26), 7, 1, MFLAGS, 0, 5, DFLAGS,
@@ -692,7 +692,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(26), 8, 2, DFLAGS,
RK3328_CLKGATE_CON(9), 2, GFLAGS),
MUXGRF(SCLK_MAC2PHY, "clk_mac2phy", mux_mac2phy_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON2, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON2, 10, 1, MFLAGS, grf_type_sys),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
diff --git a/drivers/clk/rockchip/clk-rk3528.c b/drivers/clk/rockchip/clk-rk3528.c
index b8b577b902a0..a5ff64b93f8f 100644
--- a/drivers/clk/rockchip/clk-rk3528.c
+++ b/drivers/clk/rockchip/clk-rk3528.c
@@ -10,6 +10,9 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/rockchip,rk3528-cru.h>
@@ -1061,23 +1064,65 @@ static struct rockchip_clk_branch rk3528_clk_branches[] __initdata = {
0, 1, 1),
};
+static struct rockchip_clk_branch rk3528_vo_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDMMC_DRV, "sdmmc_drv", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(0), 1, grf_type_vo),
+ MMC_GRF(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(1), 1, grf_type_vo),
+};
+
+static struct rockchip_clk_branch rk3528_vpu_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDIO0_DRV, "sdio0_drv", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO0_SAMPLE, "sdio0_sample", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(1), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_DRV, "sdio1_drv", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_SAMPLE, "sdio1_sample", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(1), 1, grf_type_vpu),
+};
+
static int __init clk_rk3528_probe(struct platform_device *pdev)
{
- struct rockchip_clk_provider *ctx;
+ unsigned long nr_vpu_branches = ARRAY_SIZE(rk3528_vpu_clk_branches);
+ unsigned long nr_vo_branches = ARRAY_SIZE(rk3528_vo_clk_branches);
+ unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
+ unsigned long nr_clks, nr_vo_clks, nr_vpu_clks;
+ struct rockchip_aux_grf *vo_grf_e, *vpu_grf_e;
+ struct regmap *vo_grf, *vpu_grf;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
- unsigned long nr_clks;
+ struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
- nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
- nr_branches) + 1;
-
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return dev_err_probe(dev, PTR_ERR(reg_base),
"could not map cru region");
+ nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
+ nr_branches) + 1;
+
+ vo_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vo-grf");
+ if (!IS_ERR(vo_grf)) {
+ nr_vo_clks = rockchip_clk_find_max_clk_id(rk3528_vo_clk_branches,
+ nr_vo_branches) + 1;
+ nr_clks = max(nr_clks, nr_vo_clks);
+ } else if (PTR_ERR(vo_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vo_grf),
+ "failed to look up VO GRF\n");
+ }
+
+ vpu_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vpu-grf");
+ if (!IS_ERR(vpu_grf)) {
+ nr_vpu_clks = rockchip_clk_find_max_clk_id(rk3528_vpu_clk_branches,
+ nr_vpu_branches) + 1;
+ nr_clks = max(nr_clks, nr_vpu_clks);
+ } else if (PTR_ERR(vpu_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vpu_grf),
+ "failed to look up VPU GRF\n");
+ }
+
ctx = rockchip_clk_init(np, reg_base, nr_clks);
if (IS_ERR(ctx))
return dev_err_probe(dev, PTR_ERR(ctx),
@@ -1092,6 +1137,32 @@ static int __init clk_rk3528_probe(struct platform_device *pdev)
ARRAY_SIZE(rk3528_cpuclk_rates));
rockchip_clk_register_branches(ctx, rk3528_clk_branches, nr_branches);
+ if (!IS_ERR(vo_grf)) {
+ vo_grf_e = devm_kzalloc(dev, sizeof(*vo_grf_e), GFP_KERNEL);
+ if (!vo_grf_e)
+ return -ENOMEM;
+
+ vo_grf_e->grf = vo_grf;
+ vo_grf_e->type = grf_type_vo;
+ hash_add(ctx->aux_grf_table, &vo_grf_e->node, grf_type_vo);
+
+ rockchip_clk_register_branches(ctx, rk3528_vo_clk_branches,
+ nr_vo_branches);
+ }
+
+ if (!IS_ERR(vpu_grf)) {
+ vpu_grf_e = devm_kzalloc(dev, sizeof(*vpu_grf_e), GFP_KERNEL);
+ if (!vpu_grf_e)
+ return -ENOMEM;
+
+ vpu_grf_e->grf = vpu_grf;
+ vpu_grf_e->type = grf_type_vpu;
+ hash_add(ctx->aux_grf_table, &vpu_grf_e->node, grf_type_vpu);
+
+ rockchip_clk_register_branches(ctx, rk3528_vpu_clk_branches,
+ nr_vpu_branches);
+ }
+
rk3528_rst_init(np, reg_base);
rockchip_register_restart_notifier(ctx, RK3528_GLB_SRST_FST, NULL);
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 7d9279291e76..d48ab9d6c064 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -89,6 +89,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
RK3036_PLL_RATE(78750000, 4, 315, 6, 4, 1, 0),
RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE(33300000, 4, 111, 5, 4, 1, 0),
{ /* sentinel */ },
};
@@ -590,7 +591,7 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
RK3568_CLKSEL_CON(9), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3568_CLKGATE_CON(4), 0, GFLAGS),
MUXGRF(CLK_DDR1X, "clk_ddr1x", clk_ddr1x_p, CLK_SET_RATE_PARENT,
- RK3568_CLKSEL_CON(9), 15, 1, MFLAGS),
+ RK3568_CLKSEL_CON(9), 15, 1, MFLAGS, grf_type_sys),
COMPOSITE_NOMUX(CLK_MSCH, "clk_msch", "clk_ddr1x", CLK_IGNORE_UNUSED,
RK3568_CLKSEL_CON(10), 0, 2, DFLAGS,
diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c
index be703f250197..9bc0ef51ef68 100644
--- a/drivers/clk/rockchip/clk-rk3576.c
+++ b/drivers/clk/rockchip/clk-rk3576.c
@@ -10,11 +10,13 @@
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/rockchip,rk3576-cru.h>
#include "clk.h"
#define RK3576_GRF_SOC_STATUS0 0x600
#define RK3576_PMU0_GRF_OSC_CON6 0x18
+#define RK3576_VCCIO_IOC_MISC_CON0 0x6400
enum rk3576_plls {
bpll, lpll, vpll, aupll, cpll, gpll, ppll,
@@ -1481,6 +1483,14 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
RK3576_CLKGATE_CON(10), 0, GFLAGS),
GATE(CLK_SAI0_MCLKOUT, "clk_sai0_mclkout", "mclk_sai0_8ch", 0,
RK3576_CLKGATE_CON(10), 1, GFLAGS),
+ GATE_GRF(CLK_SAI0_MCLKOUT_TO_IO, "mclk_sai0_to_io", "clk_sai0_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 0, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI1_MCLKOUT_TO_IO, "mclk_sai1_to_io", "clk_sai1_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 1, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI2_MCLKOUT_TO_IO, "mclk_sai2_to_io", "clk_sai2_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 2, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI3_MCLKOUT_TO_IO, "mclk_sai3_to_io", "clk_sai3_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 3, GFLAGS, grf_type_ioc),
/* sdgmac */
COMPOSITE_NODIV(HCLK_SDGMAC_ROOT, "hclk_sdgmac_root", mux_200m_100m_50m_24m_p, 0,
@@ -1678,13 +1688,13 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
/* phy ref */
MUXGRF(CLK_PHY_REF_SRC, "clk_phy_ref_src", clk_phy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_USBPHY_REF_SRC, "clk_usbphy_ref_src", clk_usbphy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_CPLL_REF_SRC, "clk_cpll_ref_src", clk_cpll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_AUPLL_REF_SRC, "clk_aupll_ref_src", clk_aupll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS, grf_type_pmu0),
/* secure ns */
COMPOSITE_NODIV(ACLK_SECURE_NS, "aclk_secure_ns", mux_350m_175m_116m_24m_p, CLK_IS_CRITICAL,
@@ -1727,17 +1737,26 @@ static void __init rk3576_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct regmap *grf;
+ struct rockchip_aux_grf *ioc_grf_e;
+ struct rockchip_aux_grf *pmu0_grf_e;
+ struct regmap *ioc_grf;
+ struct regmap *pmu0_grf;
clk_nr_clks = rockchip_clk_find_max_clk_id(rk3576_clk_branches,
ARRAY_SIZE(rk3576_clk_branches)) + 1;
- grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
- if (IS_ERR(grf)) {
+ pmu0_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
+ if (IS_ERR(pmu0_grf)) {
pr_err("%s: could not get PMU0 GRF syscon\n", __func__);
return;
}
+ ioc_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-ioc-grf");
+ if (IS_ERR(ioc_grf)) {
+ pr_err("%s: could not get IOC GRF syscon\n", __func__);
+ return;
+ }
+
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
@@ -1747,11 +1766,24 @@ static void __init rk3576_clk_init(struct device_node *np)
ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
- iounmap(reg_base);
- return;
+ goto err_unmap;
}
- ctx->grf = grf;
+ pmu0_grf_e = kzalloc(sizeof(*pmu0_grf_e), GFP_KERNEL);
+ if (!pmu0_grf_e)
+ goto err_unmap;
+
+ pmu0_grf_e->grf = pmu0_grf;
+ pmu0_grf_e->type = grf_type_pmu0;
+ hash_add(ctx->aux_grf_table, &pmu0_grf_e->node, grf_type_pmu0);
+
+ ioc_grf_e = kzalloc(sizeof(*ioc_grf_e), GFP_KERNEL);
+ if (!ioc_grf_e)
+ goto err_free_pmu0;
+
+ ioc_grf_e->grf = ioc_grf;
+ ioc_grf_e->type = grf_type_ioc;
+ hash_add(ctx->aux_grf_table, &ioc_grf_e->node, grf_type_ioc);
rockchip_clk_register_plls(ctx, rk3576_pll_clks,
ARRAY_SIZE(rk3576_pll_clks),
@@ -1774,6 +1806,14 @@ static void __init rk3576_clk_init(struct device_node *np)
rockchip_register_restart_notifier(ctx, RK3576_GLB_SRST_FST, NULL);
rockchip_clk_of_add_provider(np, ctx);
+
+ return;
+
+err_free_pmu0:
+ kfree(pmu0_grf_e);
+err_unmap:
+ iounmap(reg_base);
+ return;
}
CLK_OF_DECLARE(rk3576_cru, "rockchip,rk3576-cru", rk3576_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 4031733def4e..1694223f4f84 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -64,6 +64,7 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = {
RK3588_PLL_RATE(1560000000, 2, 260, 1, 0),
RK3588_PLL_RATE(1536000000, 2, 256, 1, 0),
RK3588_PLL_RATE(1512000000, 2, 252, 1, 0),
+ RK3588_PLL_RATE(1500000000, 2, 250, 1, 0),
RK3588_PLL_RATE(1488000000, 2, 248, 1, 0),
RK3588_PLL_RATE(1464000000, 2, 244, 1, 0),
RK3588_PLL_RATE(1440000000, 2, 240, 1, 0),
diff --git a/drivers/clk/rockchip/clk-rv1126.c b/drivers/clk/rockchip/clk-rv1126.c
index fc19c5522490..15e7bfe84506 100644
--- a/drivers/clk/rockchip/clk-rv1126.c
+++ b/drivers/clk/rockchip/clk-rv1126.c
@@ -857,7 +857,7 @@ static struct rockchip_clk_branch rv1126_clk_branches[] __initdata = {
RV1126_GMAC_CON, 5, 1, MFLAGS),
MUXGRF(CLK_GMAC_SRC, "clk_gmac_src", mux_clk_gmac_src_p, CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT,
- RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS),
+ RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS, grf_type_sys),
GATE(CLK_GMAC_REF, "clk_gmac_ref", "clk_gmac_src", 0,
RV1126_CLKGATE_CON(20), 7, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index cbf93ea119a9..19caf26c991b 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -382,6 +382,8 @@ static struct rockchip_clk_provider *rockchip_clk_init_base(
ctx->cru_node = np;
spin_lock_init(&ctx->lock);
+ hash_init(ctx->aux_grf_table);
+
ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
"rockchip,grf");
@@ -496,6 +498,8 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
{
+ struct regmap *grf = ctx->grf;
+ struct rockchip_aux_grf *agrf;
struct clk *clk;
unsigned int idx;
unsigned long flags;
@@ -504,6 +508,19 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
flags = list->flags;
clk = NULL;
+ /* for GRF-dependent branches, choose the right grf first */
+ if ((list->branch_type == branch_grf_mux ||
+ list->branch_type == branch_grf_gate ||
+ list->branch_type == branch_grf_mmc) &&
+ list->grf_type != grf_type_sys) {
+ hash_for_each_possible(ctx->aux_grf_table, agrf, node, list->grf_type) {
+ if (agrf->type == list->grf_type) {
+ grf = agrf->grf;
+ break;
+ }
+ }
+ }
+
/* catch simple muxes */
switch (list->branch_type) {
case branch_mux:
@@ -523,10 +540,10 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->mux_shift, list->mux_width,
list->mux_flags, &ctx->lock);
break;
- case branch_muxgrf:
+ case branch_grf_mux:
clk = rockchip_clk_register_muxgrf(list->name,
list->parent_names, list->num_parents,
- flags, ctx->grf, list->muxdiv_offset,
+ flags, grf, list->muxdiv_offset,
list->mux_shift, list->mux_width,
list->mux_flags);
break;
@@ -573,6 +590,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
ctx->reg_base + list->gate_offset,
list->gate_shift, list->gate_flags, &ctx->lock);
break;
+ case branch_grf_gate:
+ flags |= CLK_SET_RATE_PARENT;
+ clk = rockchip_clk_register_gate_grf(list->name,
+ list->parent_names[0], flags, grf,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags);
+ break;
case branch_composite:
clk = rockchip_clk_register_branch(list->name,
list->parent_names, list->num_parents,
@@ -590,6 +614,16 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->name,
list->parent_names, list->num_parents,
ctx->reg_base + list->muxdiv_offset,
+ NULL, 0,
+ list->div_shift
+ );
+ break;
+ case branch_grf_mmc:
+ clk = rockchip_clk_register_mmc(
+ list->name,
+ list->parent_names, list->num_parents,
+ NULL,
+ grf, list->muxdiv_offset,
list->div_shift
);
break;
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index df2b2d706450..1e9c3c0d31e3 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/clk-provider.h>
+#include <linux/hashtable.h>
struct clk;
@@ -217,6 +218,9 @@ struct clk;
#define RK3528_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
#define RK3528_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
#define RK3528_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3528_SDMMC_CON(x) ((x) * 0x4 + 0x24)
+#define RK3528_SDIO0_CON(x) ((x) * 0x4 + 0x4)
+#define RK3528_SDIO1_CON(x) ((x) * 0x4 + 0xc)
#define RK3528_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PMU_CRU_BASE)
#define RK3528_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_PMU_CRU_BASE)
#define RK3528_PCIE_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PCIE_CRU_BASE)
@@ -440,12 +444,37 @@ enum rockchip_pll_type {
.k = _k, \
}
+enum rockchip_grf_type {
+ grf_type_sys = 0,
+ grf_type_pmu0,
+ grf_type_pmu1,
+ grf_type_ioc,
+ grf_type_vo,
+ grf_type_vpu,
+};
+
+/* ceil(sqrt(enums in rockchip_grf_type - 1)) */
+#define GRF_HASH_ORDER 2
+
+/**
+ * struct rockchip_aux_grf - entry for the aux_grf_table hashtable
+ * @grf: pointer to the grf this entry references
+ * @type: what type of GRF this is
+ * @node: hlist node
+ */
+struct rockchip_aux_grf {
+ struct regmap *grf;
+ enum rockchip_grf_type type;
+ struct hlist_node node;
+};
+
/**
* struct rockchip_clk_provider - information about clock provider
* @reg_base: virtual address for the register base.
* @clk_data: holds clock related data like clk* and number of clocks.
* @cru_node: device-node of the clock-provider
* @grf: regmap of the general-register-files syscon
+ * @aux_grf_table: hashtable of auxiliary GRF regmaps, indexed by grf_type
* @lock: maintains exclusion between callbacks for a given clock-provider.
*/
struct rockchip_clk_provider {
@@ -453,6 +482,7 @@ struct rockchip_clk_provider {
struct clk_onecell_data clk_data;
struct device_node *cru_node;
struct regmap *grf;
+ DECLARE_HASHTABLE(aux_grf_table, GRF_HASH_ORDER);
spinlock_t lock;
};
@@ -594,7 +624,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift);
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift);
/*
* DDRCLK flags, including method of setting the rate
@@ -622,17 +654,24 @@ struct clk *rockchip_clk_register_muxgrf(const char *name,
int flags, struct regmap *grf, int reg,
int shift, int width, int mux_flags);
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg,
+ unsigned int shift, u8 gate_flags);
+
#define PNAME(x) static const char *const x[] __initconst
enum rockchip_clk_branch_type {
branch_composite,
branch_mux,
- branch_muxgrf,
+ branch_grf_mux,
branch_divider,
branch_fraction_divider,
branch_gate,
+ branch_grf_gate,
branch_linked_gate,
branch_mmc,
+ branch_grf_mmc,
branch_inverter,
branch_factor,
branch_ddrclk,
@@ -660,6 +699,7 @@ struct rockchip_clk_branch {
u8 gate_shift;
u8 gate_flags;
unsigned int linked_clk_id;
+ enum rockchip_grf_type grf_type;
struct rockchip_clk_branch *child;
};
@@ -900,10 +940,10 @@ struct rockchip_clk_branch {
.mux_table = mt, \
}
-#define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \
+#define MUXGRF(_id, cname, pnames, f, o, s, w, mf, gt) \
{ \
.id = _id, \
- .branch_type = branch_muxgrf, \
+ .branch_type = branch_grf_mux, \
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
@@ -913,6 +953,7 @@ struct rockchip_clk_branch {
.mux_width = w, \
.mux_flags = mf, \
.gate_offset = -1, \
+ .grf_type = gt, \
}
#define DIV(_id, cname, pname, f, o, s, w, df) \
@@ -958,6 +999,20 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define GATE_GRF(_id, cname, pname, f, o, b, gf, gt) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_gate, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .gate_offset = o, \
+ .gate_shift = b, \
+ .gate_flags = gf, \
+ .grf_type = gt, \
+ }
+
#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
{ \
.id = _id, \
@@ -983,6 +1038,18 @@ struct rockchip_clk_branch {
.div_shift = shift, \
}
+#define MMC_GRF(_id, cname, pname, offset, shift, grftype) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_mmc, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .muxdiv_offset = offset, \
+ .div_shift = shift, \
+ .grf_type = grftype, \
+ }
+
#define INVERTER(_id, cname, pname, io, is, if) \
{ \
.id = _id, \
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 374c26e5d9fd..cc5c1644c41c 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1269,6 +1269,45 @@ static const struct samsung_cpu_clock exynos4412_cpu_clks[] __initconst = {
CPUCLK_LAYOUT_E4210, e4412_armclk_d),
};
+static const struct samsung_cmu_info cmu_info_exynos4 __initconst = {
+ .mux_clks = exynos4_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4_mux_clks),
+ .div_clks = exynos4_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4_div_clks),
+ .gate_clks = exynos4_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4_gate_clks),
+ .fixed_factor_clks = exynos4_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4_fixed_factor_clks),
+ .fixed_clks = exynos4_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4_fixed_rate_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4210 __initconst = {
+ .mux_clks = exynos4210_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4210_mux_clks),
+ .div_clks = exynos4210_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4210_div_clks),
+ .gate_clks = exynos4210_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4210_gate_clks),
+ .fixed_factor_clks = exynos4210_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4210_fixed_factor_clks),
+ .fixed_clks = exynos4210_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4210_fixed_rate_clks),
+ .cpu_clks = exynos4210_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(exynos4210_cpu_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4x12 __initconst = {
+ .mux_clks = exynos4x12_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4x12_mux_clks),
+ .div_clks = exynos4x12_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4x12_div_clks),
+ .gate_clks = exynos4x12_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4x12_gate_clks),
+ .fixed_factor_clks = exynos4x12_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4x12_fixed_factor_clks),
+};
+
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
@@ -1322,41 +1361,12 @@ static void __init exynos4_clk_init(struct device_node *np,
ARRAY_SIZE(exynos4x12_plls));
}
- samsung_clk_register_fixed_rate(ctx, exynos4_fixed_rate_clks,
- ARRAY_SIZE(exynos4_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4_mux_clks,
- ARRAY_SIZE(exynos4_mux_clks));
- samsung_clk_register_div(ctx, exynos4_div_clks,
- ARRAY_SIZE(exynos4_div_clks));
- samsung_clk_register_gate(ctx, exynos4_gate_clks,
- ARRAY_SIZE(exynos4_gate_clks));
- samsung_clk_register_fixed_factor(ctx, exynos4_fixed_factor_clks,
- ARRAY_SIZE(exynos4_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4);
if (exynos4_soc == EXYNOS4210) {
- samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks,
- ARRAY_SIZE(exynos4210_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4210_mux_clks,
- ARRAY_SIZE(exynos4210_mux_clks));
- samsung_clk_register_div(ctx, exynos4210_div_clks,
- ARRAY_SIZE(exynos4210_div_clks));
- samsung_clk_register_gate(ctx, exynos4210_gate_clks,
- ARRAY_SIZE(exynos4210_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4210_fixed_factor_clks,
- ARRAY_SIZE(exynos4210_fixed_factor_clks));
- samsung_clk_register_cpu(ctx, exynos4210_cpu_clks,
- ARRAY_SIZE(exynos4210_cpu_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4210);
} else {
- samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
- ARRAY_SIZE(exynos4x12_mux_clks));
- samsung_clk_register_div(ctx, exynos4x12_div_clks,
- ARRAY_SIZE(exynos4x12_div_clks));
- samsung_clk_register_gate(ctx, exynos4x12_gate_clks,
- ARRAY_SIZE(exynos4x12_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4x12_fixed_factor_clks,
- ARRAY_SIZE(exynos4x12_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4x12);
if (soc == EXYNOS4412)
samsung_clk_register_cpu(ctx, exynos4412_cpu_clks,
ARRAY_SIZE(exynos4412_cpu_clks));
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index dc8d4240f6de..da4afe8ac2ab 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -18,6 +18,9 @@
/* NOTE: Must be equal to the last clock ID increased by one */
#define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1)
+#define CLKS_NR_CPUCL0 (CLK_DOUT_CPUCL0_NOCP + 1)
+#define CLKS_NR_CPUCL1 (CLK_DOUT_CPUCL1_NOCP + 1)
+#define CLKS_NR_CPUCL2 (CLK_DOUT_CPUCL2_NOCP + 1)
#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1)
#define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_I3C + 1)
#define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1)
@@ -1005,6 +1008,339 @@ static void __init exynosautov920_cmu_top_init(struct device_node *np)
CLK_OF_DECLARE(exynosautov920_cmu_top, "samsung,exynosautov920-cmu-top",
exynosautov920_cmu_top_init);
+/* ---- CMU_CPUCL0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL0 (0x1EC00000) */
+#define PLL_LOCKTIME_PLL_CPUCL0 0x0000
+#define PLL_CON0_PLL_CPUCL0 0x0100
+#define PLL_CON1_PLL_CPUCL0 0x0104
+#define PLL_CON3_PLL_CPUCL0 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER 0x0620
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC 0x181c
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG 0x1820
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP 0x1824
+
+static const unsigned long cpucl0_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL0,
+ PLL_CON0_PLL_CPUCL0,
+ PLL_CON1_PLL_CPUCL0,
+ PLL_CON3_PLL_CPUCL0,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL0 */
+PNAME(mout_pll_cpucl0_p) = { "oscclk", "fout_cpucl0_pll" };
+PNAME(mout_cpucl0_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl0_cluster" };
+PNAME(mout_cpucl0_dbg_user_p) = { "oscclk", "dout_clkcmu_cpucl0_dbg" };
+PNAME(mout_cpucl0_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl0_switch" };
+PNAME(mout_cpucl0_cluster_p) = { "oscclk", "mout_cpucl0_cluster_user",
+ "mout_cpucl0_switch_user"};
+PNAME(mout_cpucl0_core_p) = { "oscclk", "mout_pll_cpucl0",
+ "mout_cpucl0_switch_user"};
+
+static const struct samsung_pll_rate_table cpu_pll_rates[] __initconst = {
+ PLL_35XX_RATE(38400000U, 2400000000U, 250, 4, 0),
+ PLL_35XX_RATE(38400000U, 2304000000U, 240, 4, 0),
+ PLL_35XX_RATE(38400000U, 2208000000U, 230, 4, 0),
+ PLL_35XX_RATE(38400000U, 2112000000U, 220, 4, 0),
+ PLL_35XX_RATE(38400000U, 2016000000U, 210, 4, 0),
+ PLL_35XX_RATE(38400000U, 1824000000U, 190, 4, 0),
+ PLL_35XX_RATE(38400000U, 1680000000U, 175, 4, 0),
+ PLL_35XX_RATE(38400000U, 1344000000U, 140, 4, 0),
+ PLL_35XX_RATE(38400000U, 1152000000U, 120, 4, 0),
+ PLL_35XX_RATE(38400000U, 576000000U, 120, 4, 1),
+ PLL_35XX_RATE(38400000U, 288000000U, 120, 4, 2),
+};
+
+static const struct samsung_pll_clock cpucl0_pll_clks[] __initconst = {
+ /* CMU_CPUCL0_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL0_PLL, "fout_cpucl0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL0, PLL_CON3_PLL_CPUCL0, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL0, "mout_pll_cpucl0", mout_pll_cpucl0_p,
+ PLL_CON0_PLL_CPUCL0, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER_USER, "mout_cpucl0_cluster_user", mout_cpucl0_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_DBG_USER, "mout_cpucl0_dbg_user", mout_cpucl0_dbg_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_SWITCH_USER, "mout_cpucl0_switch_user", mout_cpucl0_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER, "mout_cpucl0_cluster", mout_cpucl0_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL0_CORE, "mout_cpucl0_core", mout_cpucl0_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER0_ACLK, "dout_cluster0_aclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_ATCLK, "dout_cluster0_atclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_MPCLK, "dout_cluster0_mpclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PCLK, "dout_cluster0_pclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PERIPHCLK, "dout_cluster0_periphclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL0_DBG_NOC, "dout_cpucl0_dbg_noc",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_DBG_PCLKDBG, "dout_cpucl0_dbg_pclkdbg",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_NOCP, "dout_cpucl0_nocp",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl0_cmu_info __initconst = {
+ .pll_clks = cpucl0_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl0_pll_clks),
+ .mux_clks = cpucl0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl0_mux_clks),
+ .div_clks = cpucl0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl0_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL0,
+ .clk_regs = cpucl0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl0_clk_regs),
+ .clk_name = "cpucl0",
+};
+
+static void __init exynosautov920_cmu_cpucl0_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl0_cmu_info);
+}
+
+/* Register CMU_CPUCL0 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl0, "samsung,exynosautov920-cmu-cpucl0",
+ exynosautov920_cmu_cpucl0_init);
+
+/* ---- CMU_CPUCL1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL1 (0x1ED00000) */
+#define PLL_LOCKTIME_PLL_CPUCL1 0x0000
+#define PLL_CON0_PLL_CPUCL1 0x0100
+#define PLL_CON1_PLL_CPUCL1 0x0104
+#define PLL_CON3_PLL_CPUCL1 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP 0x181c
+
+static const unsigned long cpucl1_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL1,
+ PLL_CON0_PLL_CPUCL1,
+ PLL_CON1_PLL_CPUCL1,
+ PLL_CON3_PLL_CPUCL1,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL1 */
+PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
+PNAME(mout_cpucl1_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl1_cluster" };
+PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl1_switch" };
+PNAME(mout_cpucl1_cluster_p) = { "oscclk", "mout_cpucl1_cluster_user",
+ "mout_cpucl1_switch_user"};
+PNAME(mout_cpucl1_core_p) = { "oscclk", "mout_pll_cpucl1",
+ "mout_cpucl1_switch_user"};
+
+static const struct samsung_pll_clock cpucl1_pll_clks[] __initconst = {
+ /* CMU_CPUCL1_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL1_PLL, "fout_cpucl1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL1, PLL_CON3_PLL_CPUCL1, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL1, "mout_pll_cpucl1", mout_pll_cpucl1_p,
+ PLL_CON0_PLL_CPUCL1, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER_USER, "mout_cpucl1_cluster_user", mout_cpucl1_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_SWITCH_USER, "mout_cpucl1_switch_user", mout_cpucl1_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER, "mout_cpucl1_cluster", mout_cpucl1_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL1_CORE, "mout_cpucl1_core", mout_cpucl1_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER1_ACLK, "dout_cluster1_aclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_ATCLK, "dout_cluster1_atclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_MPCLK, "dout_cluster1_mpclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PCLK, "dout_cluster1_pclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PERIPHCLK, "dout_cluster1_periphclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL1_NOCP, "dout_cpucl1_nocp",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl1_cmu_info __initconst = {
+ .pll_clks = cpucl1_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl1_pll_clks),
+ .mux_clks = cpucl1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl1_mux_clks),
+ .div_clks = cpucl1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl1_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL1,
+ .clk_regs = cpucl1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl1_clk_regs),
+ .clk_name = "cpucl1",
+};
+
+static void __init exynosautov920_cmu_cpucl1_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl1_cmu_info);
+}
+
+/* Register CMU_CPUCL1 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl1, "samsung,exynosautov920-cmu-cpucl1",
+ exynosautov920_cmu_cpucl1_init);
+
+/* ---- CMU_CPUCL2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL2 (0x1EE00000) */
+#define PLL_LOCKTIME_PLL_CPUCL2 0x0000
+#define PLL_CON0_PLL_CPUCL2 0x0100
+#define PLL_CON1_PLL_CPUCL2 0x0104
+#define PLL_CON3_PLL_CPUCL2 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP 0x181c
+
+static const unsigned long cpucl2_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL2,
+ PLL_CON0_PLL_CPUCL2,
+ PLL_CON1_PLL_CPUCL2,
+ PLL_CON3_PLL_CPUCL2,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL2 */
+PNAME(mout_pll_cpucl2_p) = { "oscclk", "fout_cpucl2_pll" };
+PNAME(mout_cpucl2_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl2_cluster" };
+PNAME(mout_cpucl2_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl2_switch" };
+PNAME(mout_cpucl2_cluster_p) = { "oscclk", "mout_cpucl2_cluster_user",
+ "mout_cpucl2_switch_user"};
+PNAME(mout_cpucl2_core_p) = { "oscclk", "mout_pll_cpucl2",
+ "mout_cpucl2_switch_user"};
+
+static const struct samsung_pll_clock cpucl2_pll_clks[] __initconst = {
+ /* CMU_CPUCL2_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL2_PLL, "fout_cpucl2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL2, PLL_CON3_PLL_CPUCL2, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL2, "mout_pll_cpucl2", mout_pll_cpucl2_p,
+ PLL_CON0_PLL_CPUCL2, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER_USER, "mout_cpucl2_cluster_user", mout_cpucl2_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_SWITCH_USER, "mout_cpucl2_switch_user", mout_cpucl2_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER, "mout_cpucl2_cluster", mout_cpucl2_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL2_CORE, "mout_cpucl2_core", mout_cpucl2_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER2_ACLK, "dout_cluster2_aclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_ATCLK, "dout_cluster2_atclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_MPCLK, "dout_cluster2_mpclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PCLK, "dout_cluster2_pclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PERIPHCLK, "dout_cluster2_periphclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL2_NOCP, "dout_cpucl2_nocp",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl2_cmu_info __initconst = {
+ .pll_clks = cpucl2_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl2_pll_clks),
+ .mux_clks = cpucl2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl2_mux_clks),
+ .div_clks = cpucl2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl2_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL2,
+ .clk_regs = cpucl2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl2_clk_regs),
+ .clk_name = "cpucl2",
+};
+
+static void __init exynosautov920_cmu_cpucl2_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl2_cmu_info);
+}
+
+/* Register CMU_CPUCL2 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl2, "samsung,exynosautov920-cmu-cpucl2",
+ exynosautov920_cmu_cpucl2_init);
+
/* ---- CMU_PERIC0 --------------------------------------------------------- */
/* Register Offset definitions for CMU_PERIC0 (0x10800000) */
@@ -1393,7 +1729,7 @@ static const unsigned long hsi1_clk_regs[] __initconst = {
/* List of parent clocks for Muxes in CMU_HSI1 */
PNAME(mout_hsi1_mmc_card_user_p) = {"oscclk", "dout_clkcmu_hsi1_mmc_card"};
PNAME(mout_hsi1_noc_user_p) = { "oscclk", "dout_clkcmu_hsi1_noc" };
-PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "mout_clkcmu_hsi1_usbdrd" };
+PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "dout_clkcmu_hsi1_usbdrd" };
PNAME(mout_hsi1_usbdrd_p) = { "dout_tcxo_div2", "mout_hsi1_usbdrd_user" };
static const struct samsung_mux_clock hsi1_mux_clks[] __initconst = {
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 1d82737befd3..a88c212bda12 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -83,9 +83,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long mdiv;
- unsigned long refdiv;
- unsigned long reg;
+ u32 mdiv;
+ u32 refdiv;
+ u32 reg;
unsigned long long vco_freq;
/* read VCO1 reg for numerator and denominator */
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 9dcc1b2d2cc0..03a96139a576 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -39,9 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, reg;
+ u32 divf, divq, reg;
unsigned long long vco_freq;
- unsigned long bypass;
+ u32 bypass;
reg = readl(socfpgaclk->hw.reg);
bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
diff --git a/drivers/clk/sophgo/Kconfig b/drivers/clk/sophgo/Kconfig
index 8b1367e3a95e..e14e802f28bf 100644
--- a/drivers/clk/sophgo/Kconfig
+++ b/drivers/clk/sophgo/Kconfig
@@ -37,3 +37,22 @@ config CLK_SOPHGO_SG2042_RPGATE
This clock IP depends on SG2042 Clock Generator because it uses
clock from Clock Generator IP as input.
This driver provides Gate function for RP.
+
+config CLK_SOPHGO_SG2044
+ tristate "Sophgo SG2044 clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This driver supports the clock controller on the Sophgo SG2044
+ SoC. This controller requires mulitple PLL clock as input.
+ This clock control provides PLL clocks and common clock function
+ for various IPs on the SoC.
+
+config CLK_SOPHGO_SG2044_PLL
+ tristate "Sophgo SG2044 PLL clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ select MFD_SYSCON
+ select REGMAP_MMIO
+ help
+ This driver supports the PLL clock controller on the Sophgo
+ SG2044 SoC. This controller requires 25M oscillator as input.
+ This clock control provides PLL clocks on the SoC.
diff --git a/drivers/clk/sophgo/Makefile b/drivers/clk/sophgo/Makefile
index 53506845a044..26b2fd121582 100644
--- a/drivers/clk/sophgo/Makefile
+++ b/drivers/clk/sophgo/Makefile
@@ -9,3 +9,5 @@ clk-sophgo-cv1800-y += clk-cv18xx-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_CLKGEN) += clk-sg2042-clkgen.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_PLL) += clk-sg2042-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_RPGATE) += clk-sg2042-rpgate.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044) += clk-sg2044.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044_PLL) += clk-sg2044-pll.o
diff --git a/drivers/clk/sophgo/clk-cv1800.c b/drivers/clk/sophgo/clk-cv1800.c
index e0c4dc347579..a4116ac1adcb 100644
--- a/drivers/clk/sophgo/clk-cv1800.c
+++ b/drivers/clk/sophgo/clk-cv1800.c
@@ -1519,7 +1519,9 @@ static int cv1800_clk_probe(struct platform_device *pdev)
static const struct of_device_id cv1800_clk_ids[] = {
{ .compatible = "sophgo,cv1800-clk", .data = &cv1800_desc },
+ { .compatible = "sophgo,cv1800b-clk", .data = &cv1800_desc },
{ .compatible = "sophgo,cv1810-clk", .data = &cv1810_desc },
+ { .compatible = "sophgo,cv1812h-clk", .data = &cv1810_desc },
{ .compatible = "sophgo,sg2000-clk", .data = &sg2000_desc },
{ }
};
diff --git a/drivers/clk/sophgo/clk-sg2044-pll.c b/drivers/clk/sophgo/clk-sg2044-pll.c
new file mode 100644
index 000000000000..94c0f519ba6d
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044-pll.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 PLL clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-pll.h>
+
+/* Low Control part */
+#define PLL_VCOSEL_MASK GENMASK(17, 16)
+
+/* High Control part */
+#define PLL_FBDIV_MASK GENMASK(11, 0)
+#define PLL_REFDIV_MASK GENMASK(17, 12)
+#define PLL_POSTDIV1_MASK GENMASK(20, 18)
+#define PLL_POSTDIV2_MASK GENMASK(23, 21)
+
+#define PLL_CALIBRATE_EN BIT(24)
+#define PLL_CALIBRATE_MASK GENMASK(29, 27)
+#define PLL_CALIBRATE_DEFAULT FIELD_PREP(PLL_CALIBRATE_MASK, 2)
+#define PLL_UPDATE_EN BIT(30)
+
+#define PLL_HIGH_CTRL_MASK \
+ (PLL_FBDIV_MASK | PLL_REFDIV_MASK | \
+ PLL_POSTDIV1_MASK | PLL_POSTDIV2_MASK | \
+ PLL_CALIBRATE_EN | PLL_CALIBRATE_MASK | \
+ PLL_UPDATE_EN)
+
+#define PLL_HIGH_CTRL_OFFSET 4
+
+#define PLL_VCOSEL_1G6 0x2
+#define PLL_VCOSEL_2G4 0x3
+
+#define PLL_LIMIT_FOUTVCO 0
+#define PLL_LIMIT_FOUT 1
+#define PLL_LIMIT_REFDIV 2
+#define PLL_LIMIT_FBDIV 3
+#define PLL_LIMIT_POSTDIV1 4
+#define PLL_LIMIT_POSTDIV2 5
+
+#define for_each_pll_limit_range(_var, _limit) \
+ for (_var = (_limit)->min; _var <= (_limit)->max; _var++)
+
+struct sg2044_pll_limit {
+ u64 min;
+ u64 max;
+};
+
+struct sg2044_pll_internal {
+ u32 ctrl_offset;
+ u32 status_offset;
+ u32 enable_offset;
+
+ u8 status_lock_bit;
+ u8 status_updating_bit;
+ u8 enable_bit;
+
+ const struct sg2044_pll_limit *limits;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_pll {
+ struct sg2044_clk_common common;
+ struct sg2044_pll_internal pll;
+ unsigned int syscon_offset;
+};
+
+struct sg2044_pll_desc_data {
+ struct sg2044_clk_common * const *pll;
+ u16 num_pll;
+};
+
+#define SG2044_SYSCON_PLL_OFFSET 0x98
+
+struct sg2044_pll_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline bool sg2044_clk_fit_limit(u64 value,
+ const struct sg2044_pll_limit *limit)
+{
+ return value >= limit->min && value <= limit->max;
+}
+
+static inline struct sg2044_pll *hw_to_sg2044_pll(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_pll, common);
+}
+
+static unsigned long sg2044_pll_calc_vco_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv)
+{
+ u64 numerator = parent_rate * fbdiv;
+
+ return div64_ul(numerator, refdiv);
+}
+
+static unsigned long sg2044_pll_calc_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv,
+ unsigned long postdiv1,
+ unsigned long postdiv2)
+{
+ u64 numerator, denominator;
+
+ numerator = parent_rate * fbdiv;
+ denominator = refdiv * (postdiv1 + 1) * (postdiv2 + 1);
+
+ return div64_u64(numerator, denominator);
+}
+
+static unsigned long sg2044_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ u32 value;
+ int ret;
+
+ ret = regmap_read(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset + PLL_HIGH_CTRL_OFFSET,
+ &value);
+ if (ret < 0)
+ return 0;
+
+ return sg2044_pll_calc_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+}
+
+static bool pll_is_better_rate(unsigned long target, unsigned long now,
+ unsigned long best)
+{
+ return abs_diff(target, now) < abs_diff(target, best);
+}
+
+static int sg2042_pll_compute_postdiv(const struct sg2044_pll_limit *limits,
+ unsigned long target,
+ unsigned long parent_rate,
+ unsigned int refdiv,
+ unsigned int fbdiv,
+ unsigned int *postdiv1,
+ unsigned int *postdiv2)
+{
+ unsigned int div1, div2;
+ unsigned long tmp, best_rate = 0;
+ unsigned int best_div1 = 0, best_div2 = 0;
+
+ for_each_pll_limit_range(div2, &limits[PLL_LIMIT_POSTDIV2]) {
+ for_each_pll_limit_range(div1, &limits[PLL_LIMIT_POSTDIV1]) {
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ div1, div2);
+
+ if (tmp > target)
+ continue;
+
+ if (pll_is_better_rate(target, tmp, best_rate)) {
+ best_div1 = div1;
+ best_div2 = div2;
+ best_rate = tmp;
+
+ if (tmp == target)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *postdiv1 = best_div1;
+ *postdiv2 = best_div2;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_compute_pll_setting(const struct sg2044_pll_limit *limits,
+ unsigned long req_rate,
+ unsigned long parent_rate,
+ unsigned int *value)
+{
+ unsigned int refdiv, fbdiv, postdiv1, postdiv2;
+ unsigned int best_refdiv, best_fbdiv, best_postdiv1, best_postdiv2;
+ unsigned long tmp, best_rate = 0;
+ int ret;
+
+ for_each_pll_limit_range(fbdiv, &limits[PLL_LIMIT_FBDIV]) {
+ for_each_pll_limit_range(refdiv, &limits[PLL_LIMIT_REFDIV]) {
+ u64 vco = sg2044_pll_calc_vco_rate(parent_rate,
+ refdiv, fbdiv);
+ if (!sg2044_clk_fit_limit(vco, &limits[PLL_LIMIT_FOUTVCO]))
+ continue;
+
+ ret = sg2042_pll_compute_postdiv(limits,
+ req_rate, parent_rate,
+ refdiv, fbdiv,
+ &postdiv1, &postdiv2);
+ if (ret)
+ continue;
+
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ postdiv1, postdiv2);
+
+ if (pll_is_better_rate(req_rate, tmp, best_rate)) {
+ best_refdiv = refdiv;
+ best_fbdiv = fbdiv;
+ best_postdiv1 = postdiv1;
+ best_postdiv2 = postdiv2;
+ best_rate = tmp;
+
+ if (tmp == req_rate)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *value = FIELD_PREP(PLL_REFDIV_MASK, best_refdiv) |
+ FIELD_PREP(PLL_FBDIV_MASK, best_fbdiv) |
+ FIELD_PREP(PLL_POSTDIV1_MASK, best_postdiv1) |
+ FIELD_PREP(PLL_POSTDIV2_MASK, best_postdiv2);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 target;
+ int ret;
+
+ target = clamp(req->rate, pll->pll.limits[PLL_LIMIT_FOUT].min,
+ pll->pll.limits[PLL_LIMIT_FOUT].max);
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, target,
+ req->best_parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ req->rate = sg2044_pll_calc_rate(req->best_parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+
+ return 0;
+}
+
+static int sg2044_pll_poll_update(struct sg2044_pll *pll)
+{
+ int ret;
+ unsigned int value;
+
+ ret = regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (value & BIT(pll->pll.status_lock_bit)),
+ 1, 100000);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (!(value & BIT(pll->pll.status_updating_bit))),
+ 1, 100000);
+}
+
+static int sg2044_pll_enable(struct sg2044_pll *pll, bool en)
+{
+ if (en) {
+ if (sg2044_pll_poll_update(pll) < 0)
+ pr_warn("%s: fail to lock pll\n", clk_hw_get_name(&pll->common.hw));
+
+ return regmap_set_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+ }
+
+ return regmap_clear_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+}
+
+static int sg2044_pll_update_vcosel(struct sg2044_pll *pll, u64 rate)
+{
+ unsigned int sel;
+
+ if (rate < U64_C(2400000000))
+ sel = PLL_VCOSEL_1G6;
+ else
+ sel = PLL_VCOSEL_2G4;
+
+ return regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset,
+ PLL_VCOSEL_MASK,
+ FIELD_PREP(PLL_VCOSEL_MASK, sel));
+}
+
+static int sg2044_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 vco;
+ int ret;
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, rate,
+ parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ vco = sg2044_pll_calc_vco_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value));
+
+ value |= PLL_CALIBRATE_EN;
+ value |= PLL_CALIBRATE_DEFAULT;
+ value |= PLL_UPDATE_EN;
+
+ guard(spinlock_irqsave)(pll->common.lock);
+
+ ret = sg2044_pll_enable(pll, false);
+ if (ret)
+ return ret;
+
+ sg2044_pll_update_vcosel(pll, vco);
+
+ regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset +
+ PLL_HIGH_CTRL_OFFSET,
+ PLL_HIGH_CTRL_MASK, value);
+
+ sg2044_pll_enable(pll, true);
+
+ return ret;
+}
+
+static const struct clk_ops sg2044_pll_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+ .determine_rate = sg2044_pll_determine_rate,
+ .set_rate = sg2044_pll_set_rate,
+};
+
+static const struct clk_ops sg2044_pll_ro_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+};
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_PLL(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+#define DEFINE_SG2044_PLL_RO(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ro_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+static const struct clk_parent_data osc_parents[] = {
+ { .index = 0 },
+};
+
+static const struct sg2044_pll_limit pll_limits[] = {
+ [PLL_LIMIT_FOUTVCO] = {
+ .min = U64_C(1600000000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_FOUT] = {
+ .min = U64_C(25000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_REFDIV] = {
+ .min = U64_C(1),
+ .max = U64_C(63),
+ },
+ [PLL_LIMIT_FBDIV] = {
+ .min = U64_C(8),
+ .max = U64_C(1066),
+ },
+ [PLL_LIMIT_POSTDIV1] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+ [PLL_LIMIT_POSTDIV2] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+};
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL0, clk_fpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x58, 0x00, 22, 6,
+ 0x04, 6, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL1, clk_fpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x60, 0x00, 23, 7,
+ 0x04, 7, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL2, clk_fpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x20, 0x08, 16, 0,
+ 0x0c, 0, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL0, clk_dpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x68, 0x00, 24, 8,
+ 0x04, 8, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL1, clk_dpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x70, 0x00, 25, 9,
+ 0x04, 9, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL2, clk_dpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x78, 0x00, 26, 10,
+ 0x04, 10, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL3, clk_dpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x80, 0x00, 27, 11,
+ 0x04, 11, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL4, clk_dpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x88, 0x00, 28, 12,
+ 0x04, 12, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL5, clk_dpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x90, 0x00, 29, 13,
+ 0x04, 13, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL6, clk_dpll6, osc_parents, CLK_IS_CRITICAL,
+ 0x98, 0x00, 30, 14,
+ 0x04, 14, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL7, clk_dpll7, osc_parents, CLK_IS_CRITICAL,
+ 0xa0, 0x00, 31, 15,
+ 0x04, 15, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL0, clk_mpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x28, 0x00, 16, 0,
+ 0x04, 0, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL1, clk_mpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x30, 0x00, 17, 1,
+ 0x04, 1, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL2, clk_mpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x38, 0x00, 18, 2,
+ 0x04, 2, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL3, clk_mpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x40, 0x00, 19, 3,
+ 0x04, 3, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL4, clk_mpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x48, 0x00, 20, 4,
+ 0x04, 4, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL5, clk_mpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x50, 0x00, 21, 5,
+ 0x04, 5, pll_limits);
+
+static struct sg2044_clk_common * const sg2044_pll_commons[] = {
+ &clk_fpll0.common,
+ &clk_fpll1.common,
+ &clk_fpll2.common,
+ &clk_dpll0.common,
+ &clk_dpll1.common,
+ &clk_dpll2.common,
+ &clk_dpll3.common,
+ &clk_dpll4.common,
+ &clk_dpll5.common,
+ &clk_dpll6.common,
+ &clk_dpll7.common,
+ &clk_mpll0.common,
+ &clk_mpll1.common,
+ &clk_mpll2.common,
+ &clk_mpll3.common,
+ &clk_mpll4.common,
+ &clk_mpll5.common,
+};
+
+static int sg2044_pll_init_ctrl(struct device *dev, struct regmap *regmap,
+ struct sg2044_pll_ctrl *ctrl,
+ const struct sg2044_pll_desc_data *desc)
+{
+ int ret, i;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_pll; i++) {
+ struct sg2044_clk_common *common = desc->pll[i];
+ struct sg2044_pll *pll = hw_to_sg2044_pll(&common->hw);
+
+ common->lock = &ctrl->lock;
+ common->regmap = regmap;
+ pll->syscon_offset = SG2044_SYSCON_PLL_OFFSET;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_pll_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_pll_ctrl *ctrl;
+ const struct sg2044_pll_desc_data *desc;
+ struct regmap *regmap;
+
+ regmap = device_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "fail to get the regmap for PLL\n");
+
+ desc = (const struct sg2044_pll_desc_data *)platform_get_device_id(pdev)->driver_data;
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, desc->num_pll), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = desc->num_pll;
+
+ return sg2044_pll_init_ctrl(dev, regmap, ctrl, desc);
+}
+
+static const struct sg2044_pll_desc_data sg2044_pll_desc_data = {
+ .pll = sg2044_pll_commons,
+ .num_pll = ARRAY_SIZE(sg2044_pll_commons),
+};
+
+static const struct platform_device_id sg2044_pll_match[] = {
+ { .name = "sg2044-pll",
+ .driver_data = (unsigned long)&sg2044_pll_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sg2044_pll_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_pll_probe,
+ .driver = {
+ .name = "sg2044-pll",
+ },
+ .id_table = sg2044_pll_match,
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 pll clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sophgo/clk-sg2044.c b/drivers/clk/sophgo/clk-sg2044.c
new file mode 100644
index 000000000000..f67f99c926b6
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044.c
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-clk.h>
+
+#define DIV_ASSERT BIT(0)
+#define DIV_FACTOR_REG_SOURCE BIT(3)
+#define DIV_BRANCH_EN BIT(4)
+
+#define DIV_ASSERT_TIME 2
+
+struct sg2044_div_internal {
+ u32 offset;
+ u32 initval;
+ u8 shift;
+ u8 width;
+ u16 flags;
+};
+
+struct sg2044_mux_internal {
+ const u32 *table;
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_gate_internal {
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ void __iomem *base;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_div {
+ struct sg2044_clk_common common;
+ struct sg2044_div_internal div;
+};
+
+struct sg2044_mux {
+ struct sg2044_clk_common common;
+ struct sg2044_mux_internal mux;
+ struct notifier_block nb;
+ u8 saved_parent;
+};
+
+struct sg2044_gate {
+ struct sg2044_clk_common common;
+ struct sg2044_gate_internal gate;
+};
+
+struct sg2044_clk_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+struct sg2044_clk_desc_data {
+ struct sg2044_clk_common * const *pll;
+ struct sg2044_clk_common * const *div;
+ struct sg2044_clk_common * const *mux;
+ struct sg2044_clk_common * const *gate;
+ u16 num_pll;
+ u16 num_div;
+ u16 num_mux;
+ u16 num_gate;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline struct sg2044_div *hw_to_sg2044_div(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_div, common);
+}
+
+static u32 sg2044_div_get_reg_div(u32 reg, struct sg2044_div_internal *div)
+{
+ if ((reg & DIV_FACTOR_REG_SOURCE))
+ return (reg >> div->shift) & clk_div_mask(div->width);
+
+ return div->initval == 0 ? 1 : div->initval;
+}
+
+static unsigned long _sg2044_div_recalc_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ unsigned long parent_rate)
+{
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_recalc_rate(&common->hw, parent_rate, val, NULL,
+ div->flags, div->width);
+}
+
+static unsigned long sg2044_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_recalc_rate(&div->common, &div->div,
+ parent_rate);
+}
+
+static int _sg2044_div_determine_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ struct clk_rate_request *req)
+{
+ if (div->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_ro_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags,
+ val);
+ }
+
+ return divider_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags);
+}
+
+static int sg2044_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_determine_rate(&div->common, &div->div, req);
+}
+
+static void sg2044_div_set_reg_div(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ u32 value)
+{
+ void __iomem *addr = common->base + div->offset;
+ u32 reg;
+
+ reg = readl(addr);
+
+ /* assert */
+ reg &= ~DIV_ASSERT;
+ writel(reg, addr);
+
+ /* set value */
+ reg = readl(addr);
+ reg &= ~(clk_div_mask(div->width) << div->shift);
+ reg |= (value << div->shift) | DIV_FACTOR_REG_SOURCE;
+ writel(reg, addr);
+
+ /* de-assert */
+ reg |= DIV_ASSERT;
+ writel(reg, addr);
+}
+
+static int sg2044_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ u32 value;
+
+ value = divider_get_val(rate, parent_rate, NULL,
+ div->div.width, div->div.flags);
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ sg2044_div_set_reg_div(&div->common, &div->div, value);
+
+ return 0;
+}
+
+static int sg2044_div_enable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value |= DIV_BRANCH_EN;
+ writel(value, addr);
+
+ return 0;
+}
+
+static void sg2044_div_disable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value &= ~DIV_BRANCH_EN;
+ writel(value, addr);
+}
+
+static int sg2044_div_is_enabled(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return readl(div->common.base + div->div.offset) & DIV_BRANCH_EN;
+}
+
+static const struct clk_ops sg2044_gateable_div_ops = {
+ .enable = sg2044_div_enable,
+ .disable = sg2044_div_disable,
+ .is_enabled = sg2044_div_is_enabled,
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ro_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+};
+
+static inline struct sg2044_mux *hw_to_sg2044_mux(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_mux, common);
+}
+
+static inline struct sg2044_mux *nb_to_sg2044_mux(struct notifier_block *nb)
+{
+ return container_of(nb, struct sg2044_mux, nb);
+}
+
+static const u32 sg2044_mux_table[] = {0, 1};
+
+static int sg2044_mux_notifier_cb(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct sg2044_mux *mux = nb_to_sg2044_mux(nb);
+ const struct clk_ops *ops = &clk_mux_ops;
+ struct clk_notifier_data *ndata = data;
+ struct clk_hw *hw = __clk_get_hw(ndata->clk);
+ int ret = 0;
+
+ if (event == PRE_RATE_CHANGE) {
+ mux->saved_parent = ops->get_parent(hw);
+ if (mux->saved_parent)
+ ret = ops->set_parent(hw, 0);
+ } else if (event == POST_RATE_CHANGE) {
+ ret = ops->set_parent(hw, mux->saved_parent);
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static inline struct sg2044_gate *hw_to_sg2044_gate(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_gate, common);
+}
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define SG2044_CLK_COMMON_PHWS(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_GATEABLE_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_gateable_div_ops,\
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_PDATA(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_RO(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ro_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags) | CLK_DIVIDER_READ_ONLY,\
+ }, \
+ }
+
+#define DEFINE_SG2044_MUX(_id, _name, _parent, _flags, \
+ _mux_offset, _mux_shift, \
+ _mux_table, _mux_flags) \
+ struct sg2044_mux _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &clk_mux_ops, (_flags)),\
+ .mux = { \
+ .table = (_mux_table), \
+ .offset = (_mux_offset), \
+ .shift = (_mux_shift), \
+ .flags = (_mux_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_GATE(_id, _name, _parent, _flags, \
+ _gate_offset, _gate_shift, _gate_flags) \
+ struct sg2044_gate _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &clk_gate_ops, (_flags)),\
+ .gate = { \
+ .offset = (_gate_offset), \
+ .shift = (_gate_shift), \
+ .flags = (_gate_flags), \
+ }, \
+ }
+
+static const struct clk_parent_data clk_fpll0_parent[] = {
+ { .fw_name = "fpll0" },
+};
+
+static const struct clk_parent_data clk_fpll1_parent[] = {
+ { .fw_name = "fpll1" },
+};
+
+static const struct clk_parent_data clk_fpll2_parent[] = {
+ { .fw_name = "fpll2" },
+};
+
+static const struct clk_parent_data clk_dpll0_parent[] = {
+ { .fw_name = "dpll0" },
+};
+
+static const struct clk_parent_data clk_dpll1_parent[] = {
+ { .fw_name = "dpll1" },
+};
+
+static const struct clk_parent_data clk_dpll2_parent[] = {
+ { .fw_name = "dpll2" },
+};
+
+static const struct clk_parent_data clk_dpll3_parent[] = {
+ { .fw_name = "dpll3" },
+};
+
+static const struct clk_parent_data clk_dpll4_parent[] = {
+ { .fw_name = "dpll4" },
+};
+
+static const struct clk_parent_data clk_dpll5_parent[] = {
+ { .fw_name = "dpll5" },
+};
+
+static const struct clk_parent_data clk_dpll6_parent[] = {
+ { .fw_name = "dpll6" },
+};
+
+static const struct clk_parent_data clk_dpll7_parent[] = {
+ { .fw_name = "dpll7" },
+};
+
+static const struct clk_parent_data clk_mpll0_parent[] = {
+ { .fw_name = "mpll0" },
+};
+
+static const struct clk_parent_data clk_mpll1_parent[] = {
+ { .fw_name = "mpll1" },
+};
+
+static const struct clk_parent_data clk_mpll2_parent[] = {
+ { .fw_name = "mpll2" },
+};
+
+static const struct clk_parent_data clk_mpll3_parent[] = {
+ { .fw_name = "mpll3" },
+};
+
+static const struct clk_parent_data clk_mpll4_parent[] = {
+ { .fw_name = "mpll4" },
+};
+
+static const struct clk_parent_data clk_mpll5_parent[] = {
+ { .fw_name = "mpll5" },
+};
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_FIXED, clk_div_ap_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x044, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_MAIN, clk_div_ap_sys_main,
+ clk_mpll0_parent, 0,
+ 0x040, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_FIXED, clk_div_rp_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x050, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_MAIN, clk_div_rp_sys_main,
+ clk_mpll1_parent, 0,
+ 0x04c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_FIXED, clk_div_tpu_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x058, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_MAIN, clk_div_tpu_sys_main,
+ clk_mpll2_parent, 0,
+ 0x054, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_FIXED, clk_div_noc_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x070, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_MAIN, clk_div_noc_sys_main,
+ clk_mpll3_parent, 0,
+ 0x06c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_FIXED, clk_div_vc_src0_fixed,
+ clk_fpll0_parent, 0,
+ 0x078, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_MAIN, clk_div_vc_src0_main,
+ clk_mpll4_parent, 0,
+ 0x074, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_FIXED, clk_div_vc_src1_fixed,
+ clk_fpll0_parent, 0,
+ 0x080, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 3);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_MAIN, clk_div_vc_src1_main,
+ clk_mpll5_parent, 0,
+ 0x07c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_FIXED, clk_div_cxp_mac_fixed,
+ clk_fpll0_parent, 0,
+ 0x088, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_MAIN, clk_div_cxp_mac_main,
+ clk_fpll1_parent, 0,
+ 0x084, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_FIXED, clk_div_ddr0_fixed,
+ clk_fpll0_parent, 0,
+ 0x124, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_MAIN, clk_div_ddr0_main,
+ clk_dpll0_parent, 0,
+ 0x120, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_FIXED, clk_div_ddr1_fixed,
+ clk_fpll0_parent, 0,
+ 0x12c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_MAIN, clk_div_ddr1_main,
+ clk_dpll1_parent, 0,
+ 0x128, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_FIXED, clk_div_ddr2_fixed,
+ clk_fpll0_parent, 0,
+ 0x134, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_MAIN, clk_div_ddr2_main,
+ clk_dpll2_parent, 0,
+ 0x130, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_FIXED, clk_div_ddr3_fixed,
+ clk_fpll0_parent, 0,
+ 0x13c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_MAIN, clk_div_ddr3_main,
+ clk_dpll3_parent, 0,
+ 0x138, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_FIXED, clk_div_ddr4_fixed,
+ clk_fpll0_parent, 0,
+ 0x144, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_MAIN, clk_div_ddr4_main,
+ clk_dpll4_parent, 0,
+ 0x140, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_FIXED, clk_div_ddr5_fixed,
+ clk_fpll0_parent, 0,
+ 0x14c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_MAIN, clk_div_ddr5_main,
+ clk_dpll5_parent, 0,
+ 0x148, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_FIXED, clk_div_ddr6_fixed,
+ clk_fpll0_parent, 0,
+ 0x154, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_MAIN, clk_div_ddr6_main,
+ clk_dpll6_parent, 0,
+ 0x150, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_FIXED, clk_div_ddr7_fixed,
+ clk_fpll0_parent, 0,
+ 0x15c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_MAIN, clk_div_ddr7_main,
+ clk_dpll7_parent, 0,
+ 0x158, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_50M, clk_div_top_50m,
+ clk_fpll0_parent, 0,
+ 0x048, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static const struct clk_hw *clk_div_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_TOP_AXI0, clk_div_top_axi0,
+ clk_fpll0_parent, 0,
+ 0x118, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 20);
+
+static const struct clk_hw *clk_div_top_axi0_parent[] = {
+ &clk_div_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_AXI_HSPERI, clk_div_top_axi_hsperi,
+ clk_fpll0_parent, 0,
+ 0x11c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 8);
+
+static const struct clk_hw *clk_div_top_axi_hsperi_parent[] = {
+ &clk_div_top_axi_hsperi.common.hw,
+};
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER0, clk_div_timer0,
+ clk_div_top_50m_parent, 0,
+ 0x0d0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER1, clk_div_timer1,
+ clk_div_top_50m_parent, 0,
+ 0x0d4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER2, clk_div_timer2,
+ clk_div_top_50m_parent, 0,
+ 0x0d8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER3, clk_div_timer3,
+ clk_div_top_50m_parent, 0,
+ 0x0dc, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER4, clk_div_timer4,
+ clk_div_top_50m_parent, 0,
+ 0x0e0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER5, clk_div_timer5,
+ clk_div_top_50m_parent, 0,
+ 0x0e4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER6, clk_div_timer6,
+ clk_div_top_50m_parent, 0,
+ 0x0e8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER7, clk_div_timer7,
+ clk_div_top_50m_parent, 0,
+ 0x0ec, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_PHY, clk_div_cxp_test_phy,
+ clk_fpll0_parent, 0,
+ 0x064, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_ETH_PHY, clk_div_cxp_test_eth_phy,
+ clk_fpll2_parent, 0,
+ 0x068, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C0_TEST_PHY, clk_div_c2c0_test_phy,
+ clk_fpll0_parent, 0,
+ 0x05c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C1_TEST_PHY, clk_div_c2c1_test_phy,
+ clk_fpll0_parent, 0,
+ 0x060, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PCIE_1G, clk_div_pcie_1g,
+ clk_fpll1_parent, 0,
+ 0x160, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_UART_500M, clk_div_uart_500m,
+ clk_fpll0_parent, 0,
+ 0x0cc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 4);
+
+static DEFINE_SG2044_DIV(CLK_DIV_GPIO_DB, clk_div_gpio_db,
+ clk_div_top_axi0_parent, 0,
+ 0x0f8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_SD, clk_div_sd,
+ clk_fpll0_parent, 0,
+ 0x110, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_SD_100K, clk_div_sd_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x114, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EMMC, clk_div_emmc,
+ clk_fpll0_parent, 0,
+ 0x108, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_EMMC_100K, clk_div_emmc_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x10c, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EFUSE, clk_div_efuse,
+ clk_fpll0_parent, 0,
+ 0x0f4, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TX_ETH0, clk_div_tx_eth0,
+ clk_fpll0_parent, 0,
+ 0x0fc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 16);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PTP_REF_I_ETH0, clk_div_ptp_ref_i_eth0,
+ clk_fpll0_parent, 0,
+ 0x100, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_REF_ETH0, clk_div_ref_eth0,
+ clk_fpll0_parent, 0,
+ 0x104, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PKA, clk_div_pka,
+ clk_fpll0_parent, 0,
+ 0x0f0, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static const struct clk_parent_data clk_mux_ddr0_parents[] = {
+ { .hw = &clk_div_ddr0_fixed.common.hw },
+ { .hw = &clk_div_ddr0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR0, clk_mux_ddr0,
+ clk_mux_ddr0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 7, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr1_parents[] = {
+ { .hw = &clk_div_ddr1_fixed.common.hw },
+ { .hw = &clk_div_ddr1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR1, clk_mux_ddr1,
+ clk_mux_ddr1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 8, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr2_parents[] = {
+ { .hw = &clk_div_ddr2_fixed.common.hw },
+ { .hw = &clk_div_ddr2_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR2, clk_mux_ddr2,
+ clk_mux_ddr2_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 9, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr3_parents[] = {
+ { .hw = &clk_div_ddr3_fixed.common.hw },
+ { .hw = &clk_div_ddr3_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR3, clk_mux_ddr3,
+ clk_mux_ddr3_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 10, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr4_parents[] = {
+ { .hw = &clk_div_ddr4_fixed.common.hw },
+ { .hw = &clk_div_ddr4_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR4, clk_mux_ddr4,
+ clk_mux_ddr4_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 11, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr5_parents[] = {
+ { .hw = &clk_div_ddr5_fixed.common.hw },
+ { .hw = &clk_div_ddr5_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR5, clk_mux_ddr5,
+ clk_mux_ddr5_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 12, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr6_parents[] = {
+ { .hw = &clk_div_ddr6_fixed.common.hw },
+ { .hw = &clk_div_ddr6_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR6, clk_mux_ddr6,
+ clk_mux_ddr6_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 13, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr7_parents[] = {
+ { .hw = &clk_div_ddr7_fixed.common.hw },
+ { .hw = &clk_div_ddr7_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR7, clk_mux_ddr7,
+ clk_mux_ddr7_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 14, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_noc_sys_parents[] = {
+ { .hw = &clk_div_noc_sys_fixed.common.hw },
+ { .hw = &clk_div_noc_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_NOC_SYS, clk_mux_noc_sys,
+ clk_mux_noc_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 3, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_tpu_sys_parents[] = {
+ { .hw = &clk_div_tpu_sys_fixed.common.hw },
+ { .hw = &clk_div_tpu_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_TPU_SYS, clk_mux_tpu_sys,
+ clk_mux_tpu_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 2, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_rp_sys_parents[] = {
+ { .hw = &clk_div_rp_sys_fixed.common.hw },
+ { .hw = &clk_div_rp_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_RP_SYS, clk_mux_rp_sys,
+ clk_mux_rp_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 1, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_ap_sys_parents[] = {
+ { .hw = &clk_div_ap_sys_fixed.common.hw },
+ { .hw = &clk_div_ap_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_AP_SYS, clk_mux_ap_sys,
+ clk_mux_ap_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 0, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src0_parents[] = {
+ { .hw = &clk_div_vc_src0_fixed.common.hw },
+ { .hw = &clk_div_vc_src0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC0, clk_mux_vc_src0,
+ clk_mux_vc_src0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 4, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src1_parents[] = {
+ { .hw = &clk_div_vc_src1_fixed.common.hw },
+ { .hw = &clk_div_vc_src1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC1, clk_mux_vc_src1,
+ clk_mux_vc_src1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 5, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_cxp_mac_parents[] = {
+ { .hw = &clk_div_cxp_mac_fixed.common.hw },
+ { .hw = &clk_div_cxp_mac_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_CXP_MAC, clk_mux_cxp_mac,
+ clk_mux_cxp_mac_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 6, sg2044_mux_table, 0);
+
+static const struct clk_hw *clk_gate_ap_sys_parent[] = {
+ &clk_mux_ap_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_AP_SYS, clk_gate_ap_sys,
+ clk_gate_ap_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 0, 0);
+
+static const struct clk_hw *clk_gate_rp_sys_parent[] = {
+ &clk_mux_rp_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_RP_SYS, clk_gate_rp_sys,
+ clk_gate_rp_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 2, 0);
+
+static const struct clk_hw *clk_gate_tpu_sys_parent[] = {
+ &clk_mux_tpu_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TPU_SYS, clk_gate_tpu_sys,
+ clk_gate_tpu_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 3, 0);
+
+static const struct clk_hw *clk_gate_noc_sys_parent[] = {
+ &clk_mux_noc_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_NOC_SYS, clk_gate_noc_sys,
+ clk_gate_noc_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 8, 0);
+
+static const struct clk_hw *clk_gate_vc_src0_parent[] = {
+ &clk_mux_vc_src0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC0, clk_gate_vc_src0,
+ clk_gate_vc_src0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 9, 0);
+
+static const struct clk_hw *clk_gate_vc_src1_parent[] = {
+ &clk_mux_vc_src1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC1, clk_gate_vc_src1,
+ clk_gate_vc_src1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr0_parent[] = {
+ &clk_mux_ddr0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR0, clk_gate_ddr0,
+ clk_gate_ddr0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 7, 0);
+
+static const struct clk_hw *clk_gate_ddr1_parent[] = {
+ &clk_mux_ddr1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR1, clk_gate_ddr1,
+ clk_gate_ddr1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 8, 0);
+
+static const struct clk_hw *clk_gate_ddr2_parent[] = {
+ &clk_mux_ddr2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR2, clk_gate_ddr2,
+ clk_gate_ddr2_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 9, 0);
+
+static const struct clk_hw *clk_gate_ddr3_parent[] = {
+ &clk_mux_ddr3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR3, clk_gate_ddr3,
+ clk_gate_ddr3_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr4_parent[] = {
+ &clk_mux_ddr4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR4, clk_gate_ddr4,
+ clk_gate_ddr4_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 11, 0);
+
+static const struct clk_hw *clk_gate_ddr5_parent[] = {
+ &clk_mux_ddr5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR5, clk_gate_ddr5,
+ clk_gate_ddr5_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 12, 0);
+
+static const struct clk_hw *clk_gate_ddr6_parent[] = {
+ &clk_mux_ddr6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR6, clk_gate_ddr6,
+ clk_gate_ddr6_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 13, 0);
+
+static const struct clk_hw *clk_gate_ddr7_parent[] = {
+ &clk_mux_ddr7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR7, clk_gate_ddr7,
+ clk_gate_ddr7_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 14, 0);
+
+static const struct clk_hw *clk_gate_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_50M, clk_gate_top_50m,
+ clk_gate_top_50m_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 1, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX, clk_gate_sc_rx,
+ clk_gate_sc_rx_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 12, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_x0y1_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX_X0Y1, clk_gate_sc_rx_x0y1,
+ clk_gate_sc_rx_x0y1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 13, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI0, clk_gate_top_axi0,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 5, 0);
+
+static const struct clk_hw *clk_gate_mailbox_intc_parent[] = {
+ &clk_gate_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC0, clk_gate_intc0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC1, clk_gate_intc1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC2, clk_gate_intc2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC3, clk_gate_intc3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX0, clk_gate_mailbox0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 16, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX1, clk_gate_mailbox1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX2, clk_gate_mailbox2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX3, clk_gate_mailbox3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 19, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI_HSPERI, clk_gate_top_axi_hsperi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_TIMER, clk_gate_apb_timer,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 7, 0);
+
+static const struct clk_hw *clk_gate_timer0_parent[] = {
+ &clk_div_timer0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER0, clk_gate_timer0,
+ clk_gate_timer0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 8, 0);
+
+static const struct clk_hw *clk_gate_timer1_parent[] = {
+ &clk_div_timer1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER1, clk_gate_timer1,
+ clk_gate_timer1_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 9, 0);
+
+static const struct clk_hw *clk_gate_timer2_parent[] = {
+ &clk_div_timer2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER2, clk_gate_timer2,
+ clk_gate_timer2_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 10, 0);
+
+static const struct clk_hw *clk_gate_timer3_parent[] = {
+ &clk_div_timer3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER3, clk_gate_timer3,
+ clk_gate_timer3_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 11, 0);
+
+static const struct clk_hw *clk_gate_timer4_parent[] = {
+ &clk_div_timer4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER4, clk_gate_timer4,
+ clk_gate_timer4_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 12, 0);
+
+static const struct clk_hw *clk_gate_timer5_parent[] = {
+ &clk_div_timer5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER5, clk_gate_timer5,
+ clk_gate_timer5_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 13, 0);
+
+static const struct clk_hw *clk_gate_timer6_parent[] = {
+ &clk_div_timer6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER6, clk_gate_timer6,
+ clk_gate_timer6_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 14, 0);
+
+static const struct clk_hw *clk_gate_timer7_parent[] = {
+ &clk_div_timer7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER7, clk_gate_timer7,
+ clk_gate_timer7_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 15, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_CFG, clk_gate_cxp_cfg,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 15, 0);
+
+static const struct clk_hw *clk_gate_cxp_mac_parent[] = {
+ &clk_mux_cxp_mac.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_MAC, clk_gate_cxp_mac,
+ clk_gate_cxp_mac_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 14, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_phy_parent[] = {
+ &clk_div_cxp_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_PHY, clk_gate_cxp_test_phy,
+ clk_gate_cxp_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 6, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_eth_phy_parent[] = {
+ &clk_div_cxp_test_eth_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_ETH_PHY, clk_gate_cxp_test_eth_phy,
+ clk_gate_cxp_test_eth_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 7, 0);
+
+static const struct clk_hw *clk_gate_pcie_1g_parent[] = {
+ &clk_div_pcie_1g.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PCIE_1G, clk_gate_pcie_1g,
+ clk_gate_pcie_1g_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 15, 0);
+
+static const struct clk_hw *clk_gate_c2c0_test_phy_parent[] = {
+ &clk_div_c2c0_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C0_TEST_PHY, clk_gate_c2c0_test_phy,
+ clk_gate_c2c0_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 4, 0);
+
+static const struct clk_hw *clk_gate_c2c1_test_phy_parent[] = {
+ &clk_div_c2c1_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C1_TEST_PHY, clk_gate_c2c1_test_phy,
+ clk_gate_c2c1_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 5, 0);
+
+static const struct clk_hw *clk_gate_uart_500m_parent[] = {
+ &clk_div_uart_500m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_UART_500M, clk_gate_uart_500m,
+ clk_gate_uart_500m_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 1, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_UART, clk_gate_apb_uart,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 2, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_SPI, clk_gate_apb_spi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_SPIFMC, clk_gate_ahb_spifmc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 5, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_I2C, clk_gate_apb_i2c,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_DBG_I2C, clk_gate_axi_dbg_i2c,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 3, 0);
+
+static const struct clk_hw *clk_gate_gpio_db_parent[] = {
+ &clk_div_gpio_db.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_GPIO_DB, clk_gate_gpio_db,
+ clk_gate_gpio_db_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO_INTR, clk_gate_apb_gpio_intr,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO, clk_gate_apb_gpio,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 19, 0);
+
+static const struct clk_hw *clk_gate_sd_parent[] = {
+ &clk_div_sd.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD, clk_gate_sd,
+ clk_gate_sd_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 3, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SD, clk_gate_axi_sd,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 2, 0);
+
+static const struct clk_hw *clk_gate_sd_100k_parent[] = {
+ &clk_div_sd_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD_100K, clk_gate_sd_100k,
+ clk_gate_sd_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 4, 0);
+
+static const struct clk_hw *clk_gate_emmc_parent[] = {
+ &clk_div_emmc.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC, clk_gate_emmc,
+ clk_gate_emmc_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 0, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_EMMC, clk_gate_axi_emmc,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 31, 0);
+
+static const struct clk_hw *clk_gate_emmc_100k_parent[] = {
+ &clk_div_emmc_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC_100K, clk_gate_emmc_100k,
+ clk_gate_emmc_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 1, 0);
+
+static const struct clk_hw *clk_gate_efuse_parent[] = {
+ &clk_div_efuse.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EFUSE, clk_gate_efuse,
+ clk_gate_efuse_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_EFUSE, clk_gate_apb_efuse,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_SYSDMA_AXI, clk_gate_sysdma_axi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 0, 0);
+
+static const struct clk_hw *clk_gate_tx_eth0_parent[] = {
+ &clk_div_tx_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TX_ETH0, clk_gate_tx_eth0,
+ clk_gate_tx_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 27, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_ETH0, clk_gate_axi_eth0,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 28, 0);
+
+static const struct clk_hw *clk_gate_ptp_ref_i_eth0_parent[] = {
+ &clk_div_ptp_ref_i_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PTP_REF_I_ETH0, clk_gate_ptp_ref_i_eth0,
+ clk_gate_ptp_ref_i_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 29, 0);
+
+static const struct clk_hw *clk_gate_ref_eth0_parent[] = {
+ &clk_div_ref_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_REF_ETH0, clk_gate_ref_eth0,
+ clk_gate_ref_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 30, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_RTC, clk_gate_apb_rtc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 26, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_PWM, clk_gate_apb_pwm,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 25, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_WDT, clk_gate_apb_wdt,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 24, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SRAM, clk_gate_axi_sram,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_ROM, clk_gate_ahb_rom,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 4, 0);
+
+static const struct clk_hw *clk_gate_pka_parent[] = {
+ &clk_div_pka.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PKA, clk_gate_pka,
+ clk_gate_pka_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 16, 0);
+
+static struct sg2044_clk_common * const sg2044_div_commons[] = {
+ &clk_div_ap_sys_fixed.common,
+ &clk_div_ap_sys_main.common,
+ &clk_div_rp_sys_fixed.common,
+ &clk_div_rp_sys_main.common,
+ &clk_div_tpu_sys_fixed.common,
+ &clk_div_tpu_sys_main.common,
+ &clk_div_noc_sys_fixed.common,
+ &clk_div_noc_sys_main.common,
+ &clk_div_vc_src0_fixed.common,
+ &clk_div_vc_src0_main.common,
+ &clk_div_vc_src1_fixed.common,
+ &clk_div_vc_src1_main.common,
+ &clk_div_cxp_mac_fixed.common,
+ &clk_div_cxp_mac_main.common,
+ &clk_div_ddr0_fixed.common,
+ &clk_div_ddr0_main.common,
+ &clk_div_ddr1_fixed.common,
+ &clk_div_ddr1_main.common,
+ &clk_div_ddr2_fixed.common,
+ &clk_div_ddr2_main.common,
+ &clk_div_ddr3_fixed.common,
+ &clk_div_ddr3_main.common,
+ &clk_div_ddr4_fixed.common,
+ &clk_div_ddr4_main.common,
+ &clk_div_ddr5_fixed.common,
+ &clk_div_ddr5_main.common,
+ &clk_div_ddr6_fixed.common,
+ &clk_div_ddr6_main.common,
+ &clk_div_ddr7_fixed.common,
+ &clk_div_ddr7_main.common,
+ &clk_div_top_50m.common,
+ &clk_div_top_axi0.common,
+ &clk_div_top_axi_hsperi.common,
+ &clk_div_timer0.common,
+ &clk_div_timer1.common,
+ &clk_div_timer2.common,
+ &clk_div_timer3.common,
+ &clk_div_timer4.common,
+ &clk_div_timer5.common,
+ &clk_div_timer6.common,
+ &clk_div_timer7.common,
+ &clk_div_cxp_test_phy.common,
+ &clk_div_cxp_test_eth_phy.common,
+ &clk_div_c2c0_test_phy.common,
+ &clk_div_c2c1_test_phy.common,
+ &clk_div_pcie_1g.common,
+ &clk_div_uart_500m.common,
+ &clk_div_gpio_db.common,
+ &clk_div_sd.common,
+ &clk_div_sd_100k.common,
+ &clk_div_emmc.common,
+ &clk_div_emmc_100k.common,
+ &clk_div_efuse.common,
+ &clk_div_tx_eth0.common,
+ &clk_div_ptp_ref_i_eth0.common,
+ &clk_div_ref_eth0.common,
+ &clk_div_pka.common,
+};
+
+static struct sg2044_clk_common * const sg2044_mux_commons[] = {
+ &clk_mux_ddr0.common,
+ &clk_mux_ddr1.common,
+ &clk_mux_ddr2.common,
+ &clk_mux_ddr3.common,
+ &clk_mux_ddr4.common,
+ &clk_mux_ddr5.common,
+ &clk_mux_ddr6.common,
+ &clk_mux_ddr7.common,
+ &clk_mux_noc_sys.common,
+ &clk_mux_tpu_sys.common,
+ &clk_mux_rp_sys.common,
+ &clk_mux_ap_sys.common,
+ &clk_mux_vc_src0.common,
+ &clk_mux_vc_src1.common,
+ &clk_mux_cxp_mac.common,
+};
+
+static struct sg2044_clk_common * const sg2044_gate_commons[] = {
+ &clk_gate_ap_sys.common,
+ &clk_gate_rp_sys.common,
+ &clk_gate_tpu_sys.common,
+ &clk_gate_noc_sys.common,
+ &clk_gate_vc_src0.common,
+ &clk_gate_vc_src1.common,
+ &clk_gate_ddr0.common,
+ &clk_gate_ddr1.common,
+ &clk_gate_ddr2.common,
+ &clk_gate_ddr3.common,
+ &clk_gate_ddr4.common,
+ &clk_gate_ddr5.common,
+ &clk_gate_ddr6.common,
+ &clk_gate_ddr7.common,
+ &clk_gate_top_50m.common,
+ &clk_gate_sc_rx.common,
+ &clk_gate_sc_rx_x0y1.common,
+ &clk_gate_top_axi0.common,
+ &clk_gate_intc0.common,
+ &clk_gate_intc1.common,
+ &clk_gate_intc2.common,
+ &clk_gate_intc3.common,
+ &clk_gate_mailbox0.common,
+ &clk_gate_mailbox1.common,
+ &clk_gate_mailbox2.common,
+ &clk_gate_mailbox3.common,
+ &clk_gate_top_axi_hsperi.common,
+ &clk_gate_apb_timer.common,
+ &clk_gate_timer0.common,
+ &clk_gate_timer1.common,
+ &clk_gate_timer2.common,
+ &clk_gate_timer3.common,
+ &clk_gate_timer4.common,
+ &clk_gate_timer5.common,
+ &clk_gate_timer6.common,
+ &clk_gate_timer7.common,
+ &clk_gate_cxp_cfg.common,
+ &clk_gate_cxp_mac.common,
+ &clk_gate_cxp_test_phy.common,
+ &clk_gate_cxp_test_eth_phy.common,
+ &clk_gate_pcie_1g.common,
+ &clk_gate_c2c0_test_phy.common,
+ &clk_gate_c2c1_test_phy.common,
+ &clk_gate_uart_500m.common,
+ &clk_gate_apb_uart.common,
+ &clk_gate_apb_spi.common,
+ &clk_gate_ahb_spifmc.common,
+ &clk_gate_apb_i2c.common,
+ &clk_gate_axi_dbg_i2c.common,
+ &clk_gate_gpio_db.common,
+ &clk_gate_apb_gpio_intr.common,
+ &clk_gate_apb_gpio.common,
+ &clk_gate_sd.common,
+ &clk_gate_axi_sd.common,
+ &clk_gate_sd_100k.common,
+ &clk_gate_emmc.common,
+ &clk_gate_axi_emmc.common,
+ &clk_gate_emmc_100k.common,
+ &clk_gate_efuse.common,
+ &clk_gate_apb_efuse.common,
+ &clk_gate_sysdma_axi.common,
+ &clk_gate_tx_eth0.common,
+ &clk_gate_axi_eth0.common,
+ &clk_gate_ptp_ref_i_eth0.common,
+ &clk_gate_ref_eth0.common,
+ &clk_gate_apb_rtc.common,
+ &clk_gate_apb_pwm.common,
+ &clk_gate_apb_wdt.common,
+ &clk_gate_axi_sram.common,
+ &clk_gate_ahb_rom.common,
+ &clk_gate_pka.common,
+};
+
+static void sg2044_clk_fix_init_parent(struct clk_hw **pdata,
+ const struct clk_init_data *init,
+ struct clk_hw_onecell_data *data)
+{
+ u8 i;
+ const struct clk_hw *hw;
+ const struct sg2044_clk_common *common;
+
+ for (i = 0; i < init->num_parents; i++) {
+ hw = init->parent_hws[i];
+ common = hw_to_sg2044_clk_common(hw);
+
+ WARN(!data->hws[common->id], "clk %u is not register\n",
+ common->id);
+ pdata[i] = data->hws[common->id];
+ }
+}
+
+static int sg2044_clk_init_ctrl(struct device *dev, void __iomem *reg,
+ struct sg2044_clk_ctrl *ctrl,
+ const struct sg2044_clk_desc_data *desc)
+{
+ int ret, i;
+ struct clk_hw *hw;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_div; i++) {
+ struct sg2044_clk_common *common = desc->div[i];
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ for (i = 0; i < desc->num_mux; i++) {
+ struct sg2044_clk_common *common = desc->mux[i];
+ struct sg2044_mux *mux = hw_to_sg2044_mux(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ init->name,
+ init->parent_data,
+ init->num_parents,
+ init->flags,
+ reg + mux->mux.offset,
+ mux->mux.shift,
+ 1,
+ mux->mux.flags,
+ mux->mux.table,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ if (!(mux->mux.flags & CLK_MUX_READ_ONLY)) {
+ mux->nb.notifier_call = sg2044_mux_notifier_cb;
+ ret = devm_clk_notifier_register(dev, hw->clk,
+ &mux->nb);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "%s: failed to register notifier\n",
+ clk_hw_get_name(hw));
+ }
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ for (i = 0; i < desc->num_gate; i++) {
+ struct sg2044_clk_common *common = desc->gate[i];
+ struct sg2044_gate *gate = hw_to_sg2044_gate(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+ struct clk_hw *parent_hws[1] = { };
+
+ sg2044_clk_fix_init_parent(parent_hws, init, &ctrl->data);
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_gate_parent_hw(dev, init->name,
+ parent_hws[0],
+ init->flags,
+ reg + gate->gate.offset,
+ gate->gate.shift,
+ gate->gate.flags,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_clk_ctrl *ctrl;
+ const struct sg2044_clk_desc_data *desc;
+ void __iomem *reg;
+ u32 num_clks;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ desc = device_get_match_data(dev);
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ num_clks = desc->num_div + desc->num_gate + desc->num_mux;
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, num_clks), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = num_clks;
+
+ return sg2044_clk_init_ctrl(dev, reg, ctrl, desc);
+}
+
+static const struct sg2044_clk_desc_data sg2044_clk_desc_data = {
+ .div = sg2044_div_commons,
+ .mux = sg2044_mux_commons,
+ .gate = sg2044_gate_commons,
+ .num_div = ARRAY_SIZE(sg2044_div_commons),
+ .num_mux = ARRAY_SIZE(sg2044_mux_commons),
+ .num_gate = ARRAY_SIZE(sg2044_gate_commons),
+};
+
+static const struct of_device_id sg2044_clk_match[] = {
+ { .compatible = "sophgo,sg2044-clk", .data = &sg2044_clk_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_clk_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_clk_probe,
+ .driver = {
+ .name = "sg2044-clk",
+ .of_match_table = sg2044_clk_match,
+ },
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig
new file mode 100644
index 000000000000..4c4df845b3cb
--- /dev/null
+++ b/drivers/clk/spacemit/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config SPACEMIT_CCU
+ tristate "Clock support for SpacemiT SoCs"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Say Y to enable clock controller unit support for SpacemiT SoCs.
+
+if SPACEMIT_CCU
+
+config SPACEMIT_K1_CCU
+ tristate "Support for SpacemiT K1 SoC"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ help
+ Support for clock controller unit in SpacemiT K1 SoC.
+
+endif
diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile
new file mode 100644
index 000000000000..5ec6da61db98
--- /dev/null
+++ b/drivers/clk/spacemit/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SPACEMIT_K1_CCU) = spacemit-ccu-k1.o
+spacemit-ccu-k1-y = ccu_pll.o ccu_mix.o ccu_ddn.o
+spacemit-ccu-k1-y += ccu-k1.o
diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c
new file mode 100644
index 000000000000..cdde37a05235
--- /dev/null
+++ b/drivers/clk/spacemit/ccu-k1.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/array_size.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+#include "ccu_mix.h"
+#include "ccu_ddn.h"
+
+#include <dt-bindings/clock/spacemit,k1-syscon.h>
+
+/* APBS register offset */
+#define APBS_PLL1_SWCR1 0x100
+#define APBS_PLL1_SWCR2 0x104
+#define APBS_PLL1_SWCR3 0x108
+#define APBS_PLL2_SWCR1 0x118
+#define APBS_PLL2_SWCR2 0x11c
+#define APBS_PLL2_SWCR3 0x120
+#define APBS_PLL3_SWCR1 0x124
+#define APBS_PLL3_SWCR2 0x128
+#define APBS_PLL3_SWCR3 0x12c
+
+/* MPMU register offset */
+#define MPMU_POSR 0x0010
+#define POSR_PLL1_LOCK BIT(27)
+#define POSR_PLL2_LOCK BIT(28)
+#define POSR_PLL3_LOCK BIT(29)
+#define MPMU_SUCCR 0x0014
+#define MPMU_ISCCR 0x0044
+#define MPMU_WDTPCR 0x0200
+#define MPMU_RIPCCR 0x0210
+#define MPMU_ACGR 0x1024
+#define MPMU_APBCSCR 0x1050
+#define MPMU_SUCCR_1 0x10b0
+
+/* APBC register offset */
+#define APBC_UART1_CLK_RST 0x00
+#define APBC_UART2_CLK_RST 0x04
+#define APBC_GPIO_CLK_RST 0x08
+#define APBC_PWM0_CLK_RST 0x0c
+#define APBC_PWM1_CLK_RST 0x10
+#define APBC_PWM2_CLK_RST 0x14
+#define APBC_PWM3_CLK_RST 0x18
+#define APBC_TWSI8_CLK_RST 0x20
+#define APBC_UART3_CLK_RST 0x24
+#define APBC_RTC_CLK_RST 0x28
+#define APBC_TWSI0_CLK_RST 0x2c
+#define APBC_TWSI1_CLK_RST 0x30
+#define APBC_TIMERS1_CLK_RST 0x34
+#define APBC_TWSI2_CLK_RST 0x38
+#define APBC_AIB_CLK_RST 0x3c
+#define APBC_TWSI4_CLK_RST 0x40
+#define APBC_TIMERS2_CLK_RST 0x44
+#define APBC_ONEWIRE_CLK_RST 0x48
+#define APBC_TWSI5_CLK_RST 0x4c
+#define APBC_DRO_CLK_RST 0x58
+#define APBC_IR_CLK_RST 0x5c
+#define APBC_TWSI6_CLK_RST 0x60
+#define APBC_COUNTER_CLK_SEL 0x64
+#define APBC_TWSI7_CLK_RST 0x68
+#define APBC_TSEN_CLK_RST 0x6c
+#define APBC_UART4_CLK_RST 0x70
+#define APBC_UART5_CLK_RST 0x74
+#define APBC_UART6_CLK_RST 0x78
+#define APBC_SSP3_CLK_RST 0x7c
+#define APBC_SSPA0_CLK_RST 0x80
+#define APBC_SSPA1_CLK_RST 0x84
+#define APBC_IPC_AP2AUD_CLK_RST 0x90
+#define APBC_UART7_CLK_RST 0x94
+#define APBC_UART8_CLK_RST 0x98
+#define APBC_UART9_CLK_RST 0x9c
+#define APBC_CAN0_CLK_RST 0xa0
+#define APBC_PWM4_CLK_RST 0xa8
+#define APBC_PWM5_CLK_RST 0xac
+#define APBC_PWM6_CLK_RST 0xb0
+#define APBC_PWM7_CLK_RST 0xb4
+#define APBC_PWM8_CLK_RST 0xb8
+#define APBC_PWM9_CLK_RST 0xbc
+#define APBC_PWM10_CLK_RST 0xc0
+#define APBC_PWM11_CLK_RST 0xc4
+#define APBC_PWM12_CLK_RST 0xc8
+#define APBC_PWM13_CLK_RST 0xcc
+#define APBC_PWM14_CLK_RST 0xd0
+#define APBC_PWM15_CLK_RST 0xd4
+#define APBC_PWM16_CLK_RST 0xd8
+#define APBC_PWM17_CLK_RST 0xdc
+#define APBC_PWM18_CLK_RST 0xe0
+#define APBC_PWM19_CLK_RST 0xe4
+
+/* APMU register offset */
+#define APMU_JPG_CLK_RES_CTRL 0x020
+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
+#define APMU_ISP_CLK_RES_CTRL 0x038
+#define APMU_LCD_CLK_RES_CTRL1 0x044
+#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
+#define APMU_LCD_CLK_RES_CTRL2 0x04c
+#define APMU_CCIC_CLK_RES_CTRL 0x050
+#define APMU_SDH0_CLK_RES_CTRL 0x054
+#define APMU_SDH1_CLK_RES_CTRL 0x058
+#define APMU_USB_CLK_RES_CTRL 0x05c
+#define APMU_QSPI_CLK_RES_CTRL 0x060
+#define APMU_DMA_CLK_RES_CTRL 0x064
+#define APMU_AES_CLK_RES_CTRL 0x068
+#define APMU_VPU_CLK_RES_CTRL 0x0a4
+#define APMU_GPU_CLK_RES_CTRL 0x0cc
+#define APMU_SDH2_CLK_RES_CTRL 0x0e0
+#define APMU_PMUA_MC_CTRL 0x0e8
+#define APMU_PMU_CC2_AP 0x100
+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
+#define APMU_AUDIO_CLK_RES_CTRL 0x14c
+#define APMU_HDMI_CLK_RES_CTRL 0x1b8
+#define APMU_CCI550_CLK_CTRL 0x300
+#define APMU_ACLK_CLK_CTRL 0x388
+#define APMU_CPU_C0_CLK_CTRL 0x38C
+#define APMU_CPU_C1_CLK_CTRL 0x390
+#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc
+#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4
+#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc
+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
+
+struct spacemit_ccu_data {
+ struct clk_hw **hws;
+ size_t num;
+};
+
+/* APBS clocks start, APBS region contains and only contains all PLL clocks */
+
+/*
+ * PLL{1,2} must run at fixed frequencies to provide clocks in correct rates for
+ * peripherals.
+ */
+static const struct ccu_pll_rate_tbl pll1_rate_tbl[] = {
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+};
+
+static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = {
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+};
+
+static const struct ccu_pll_rate_tbl pll3_rate_tbl[] = {
+ CCU_PLL_RATE(1600000000UL, 0x0050cd61, 0x43eaaaab),
+ CCU_PLL_RATE(1800000000UL, 0x0050cd61, 0x4b000000),
+ CCU_PLL_RATE(2000000000UL, 0x0050dd62, 0x2aeaaaab),
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+ CCU_PLL_RATE(3200000000UL, 0x0050dd67, 0x43eaaaab),
+};
+
+CCU_PLL_DEFINE(pll1, pll1_rate_tbl, APBS_PLL1_SWCR1, APBS_PLL1_SWCR3, MPMU_POSR, POSR_PLL1_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll2, pll2_rate_tbl, APBS_PLL2_SWCR1, APBS_PLL2_SWCR3, MPMU_POSR, POSR_PLL2_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll3, pll3_rate_tbl, APBS_PLL3_SWCR1, APBS_PLL3_SWCR3, MPMU_POSR, POSR_PLL3_LOCK,
+ CLK_SET_RATE_GATE);
+
+CCU_FACTOR_GATE_DEFINE(pll1_d2, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d3, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d6, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d11_223p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(15), 11, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d13_189, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(16), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d23_106p8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(20), 23, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d64_38p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(0), 64, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_245p7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(10), 10, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_24p5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(11), 100, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll2_d1, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d2, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d3, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d4, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d5, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d6, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d7, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d8, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll3_d1, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d2, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d3, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d4, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d5, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d6, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d7, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d8, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_DEFINE(pll3_20, CCU_PARENT_HW(pll3_d8), 20, 1);
+CCU_FACTOR_DEFINE(pll3_40, CCU_PARENT_HW(pll3_d8), 10, 1);
+CCU_FACTOR_DEFINE(pll3_80, CCU_PARENT_HW(pll3_d8), 5, 1);
+
+/* APBS clocks end */
+
+/* MPMU clocks start */
+CCU_GATE_DEFINE(pll1_d8_307p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(13), 0);
+
+CCU_FACTOR_DEFINE(pll1_d32_76p8, CCU_PARENT_HW(pll1_d8_307p2), 4, 1);
+
+CCU_FACTOR_DEFINE(pll1_d40_61p44, CCU_PARENT_HW(pll1_d8_307p2), 5, 1);
+
+CCU_FACTOR_DEFINE(pll1_d16_153p6, CCU_PARENT_HW(pll1_d8), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d24_102p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(12), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(7), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2_ap, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(11), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_m3d128_57p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(8), 16, 3);
+CCU_FACTOR_GATE_DEFINE(pll1_d96_25p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(4), 12, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(3), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8_wdt, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(19), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d384_6p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(2), 48, 1);
+
+CCU_FACTOR_DEFINE(pll1_d768_3p2, CCU_PARENT_HW(pll1_d384_6p4), 2, 1);
+CCU_FACTOR_DEFINE(pll1_d1536_1p6, CCU_PARENT_HW(pll1_d384_6p4), 4, 1);
+CCU_FACTOR_DEFINE(pll1_d3072_0p8, CCU_PARENT_HW(pll1_d384_6p4), 8, 1);
+
+CCU_GATE_DEFINE(pll1_d6_409p6, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(0), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d12_204p8, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(5), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d5_491p52, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(21), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d10_245p76, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(18), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d4_614p4, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(15), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d52_47p26, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(10), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d78_31p5, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(6), 39, 2);
+
+CCU_GATE_DEFINE(pll1_d3_819p2, CCU_PARENT_HW(pll1_d3), MPMU_ACGR, BIT(14), 0);
+
+CCU_GATE_DEFINE(pll1_d2_1228p8, CCU_PARENT_HW(pll1_d2), MPMU_ACGR, BIT(16), 0);
+
+CCU_GATE_DEFINE(slow_uart, CCU_PARENT_NAME(osc), MPMU_ACGR, BIT(1), CLK_IGNORE_UNUSED);
+CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 0);
+CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 0);
+
+CCU_GATE_DEFINE(wdt_clk, CCU_PARENT_HW(pll1_d96_25p6), MPMU_WDTPCR, BIT(1), 0);
+
+CCU_FACTOR_GATE_DEFINE(i2s_sysclk, CCU_PARENT_HW(pll1_d16_153p6), MPMU_ISCCR, BIT(31), 50, 1);
+CCU_FACTOR_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_sysclk), MPMU_ISCCR, BIT(29), 1, 1);
+
+static const struct clk_parent_data apb_parents[] = {
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_DEFINE(apb_clk, apb_parents, MPMU_APBCSCR, 0, 2, 0);
+
+CCU_GATE_DEFINE(wdt_bus_clk, CCU_PARENT_HW(apb_clk), MPMU_WDTPCR, BIT(0), 0);
+
+CCU_GATE_DEFINE(ripc_clk, CCU_PARENT_HW(apb_clk), MPMU_RIPCCR, 0x1, 0);
+/* MPMU clocks end */
+
+/* APBC clocks start */
+static const struct clk_parent_data uart_clk_parents[] = {
+ CCU_PARENT_HW(pll1_m3d128_57p6),
+ CCU_PARENT_HW(slow_uart1_14p74),
+ CCU_PARENT_HW(slow_uart2_48),
+};
+CCU_MUX_GATE_DEFINE(uart0_clk, uart_clk_parents, APBC_UART1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart2_clk, uart_clk_parents, APBC_UART2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart3_clk, uart_clk_parents, APBC_UART3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart4_clk, uart_clk_parents, APBC_UART4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart5_clk, uart_clk_parents, APBC_UART5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart6_clk, uart_clk_parents, APBC_UART6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart7_clk, uart_clk_parents, APBC_UART7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart8_clk, uart_clk_parents, APBC_UART8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart9_clk, uart_clk_parents, APBC_UART9_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(gpio_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_GPIO_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data pwm_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+};
+CCU_MUX_GATE_DEFINE(pwm0_clk, pwm_parents, APBC_PWM0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm1_clk, pwm_parents, APBC_PWM1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm2_clk, pwm_parents, APBC_PWM2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm3_clk, pwm_parents, APBC_PWM3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm4_clk, pwm_parents, APBC_PWM4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm5_clk, pwm_parents, APBC_PWM5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm6_clk, pwm_parents, APBC_PWM6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm7_clk, pwm_parents, APBC_PWM7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm8_clk, pwm_parents, APBC_PWM8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm9_clk, pwm_parents, APBC_PWM9_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm10_clk, pwm_parents, APBC_PWM10_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm11_clk, pwm_parents, APBC_PWM11_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm12_clk, pwm_parents, APBC_PWM12_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm13_clk, pwm_parents, APBC_PWM13_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm14_clk, pwm_parents, APBC_PWM14_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm15_clk, pwm_parents, APBC_PWM15_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm16_clk, pwm_parents, APBC_PWM16_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm17_clk, pwm_parents, APBC_PWM17_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm18_clk, pwm_parents, APBC_PWM18_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm19_clk, pwm_parents, APBC_PWM19_CLK_RST, 4, 3, BIT(1), 0);
+
+static const struct clk_parent_data ssp_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+};
+CCU_MUX_GATE_DEFINE(ssp3_clk, ssp_parents, APBC_SSP3_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(rtc_clk, CCU_PARENT_NAME(osc), APBC_RTC_CLK_RST,
+ BIT(7) | BIT(1), 0);
+
+static const struct clk_parent_data twsi_parents[] = {
+ CCU_PARENT_HW(pll1_d78_31p5),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d40_61p44),
+};
+CCU_MUX_GATE_DEFINE(twsi0_clk, twsi_parents, APBC_TWSI0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi1_clk, twsi_parents, APBC_TWSI1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi2_clk, twsi_parents, APBC_TWSI2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi4_clk, twsi_parents, APBC_TWSI4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi5_clk, twsi_parents, APBC_TWSI5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi6_clk, twsi_parents, APBC_TWSI6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi7_clk, twsi_parents, APBC_TWSI7_CLK_RST, 4, 3, BIT(1), 0);
+/*
+ * APBC_TWSI8_CLK_RST has a quirk that reading always results in zero.
+ * Combine functional and bus bits together as a gate to avoid sharing the
+ * write-only register between different clock hardwares.
+ */
+CCU_GATE_DEFINE(twsi8_clk, CCU_PARENT_HW(pll1_d78_31p5), APBC_TWSI8_CLK_RST, BIT(1) | BIT(0), 0);
+
+static const struct clk_parent_data timer_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_NAME(vctcxo_3m),
+ CCU_PARENT_NAME(vctcxo_1m),
+};
+CCU_MUX_GATE_DEFINE(timers1_clk, timer_parents, APBC_TIMERS1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(timers2_clk, timer_parents, APBC_TIMERS2_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(aib_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_AIB_CLK_RST, BIT(1), 0);
+
+CCU_GATE_DEFINE(onewire_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_ONEWIRE_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data sspa_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+ CCU_PARENT_HW(i2s_bclk),
+};
+CCU_MUX_GATE_DEFINE(sspa0_clk, sspa_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(sspa1_clk, sspa_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_GATE_DEFINE(dro_clk, CCU_PARENT_HW(apb_clk), APBC_DRO_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ir_clk, CCU_PARENT_HW(apb_clk), APBC_IR_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(tsen_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ipc_ap2aud_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data can_parents[] = {
+ CCU_PARENT_HW(pll3_20),
+ CCU_PARENT_HW(pll3_40),
+ CCU_PARENT_HW(pll3_80),
+};
+CCU_MUX_GATE_DEFINE(can0_clk, can_parents, APBC_CAN0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_GATE_DEFINE(can0_bus_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_CAN0_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(uart0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART9_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(gpio_bus_clk, CCU_PARENT_HW(apb_clk), APBC_GPIO_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(pwm0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM9_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm10_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM10_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm11_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM11_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm12_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM12_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm13_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM13_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm14_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM14_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm15_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM15_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm16_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM16_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm17_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM17_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm18_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM18_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm19_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM19_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ssp3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSP3_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(rtc_bus_clk, CCU_PARENT_HW(apb_clk), APBC_RTC_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(twsi0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI7_CLK_RST, BIT(0), 0);
+/* Placeholder to workaround quirk of the register */
+CCU_FACTOR_DEFINE(twsi8_bus_clk, CCU_PARENT_HW(apb_clk), 1, 1);
+
+CCU_GATE_DEFINE(timers1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(timers2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS2_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(aib_bus_clk, CCU_PARENT_HW(apb_clk), APBC_AIB_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(onewire_bus_clk, CCU_PARENT_HW(apb_clk), APBC_ONEWIRE_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(sspa0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(sspa1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA1_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(tsen_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ipc_ap2aud_bus_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(0), 0);
+/* APBC clocks end */
+
+/* APMU clocks start */
+static const struct clk_parent_data pmua_aclk_parents[] = {
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_FC_DEFINE(pmua_aclk, pmua_aclk_parents, APMU_ACLK_CLK_CTRL, 1, 2, BIT(4), 0, 1, 0);
+
+static const struct clk_parent_data cci550_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_FC_DEFINE(cci550_clk, cci550_clk_parents, APMU_CCI550_CLK_CTRL, 8, 3, BIT(12), 0, 2,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c0_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c0_hi_clk, cpu_c0_hi_clk_parents, APMU_CPU_C0_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c0_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c0_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c0_core_clk, cpu_c0_clk_parents, APMU_CPU_C0_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_ace_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_tcm_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 9, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c1_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c1_hi_clk, cpu_c1_hi_clk_parents, APMU_CPU_C1_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c1_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c1_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c1_core_clk, cpu_c1_clk_parents, APMU_CPU_C1_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c1_ace_clk, CCU_PARENT_HW(cpu_c1_core_clk), APMU_CPU_C1_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data jpg_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(jpg_clk, jpg_parents, APMU_JPG_CLK_RES_CTRL, 5, 3, BIT(15), 2, 3,
+ BIT(1), 0);
+
+static const struct clk_parent_data ccic2phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic2phy_clk, ccic2phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+static const struct clk_parent_data ccic3phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic3phy_clk, ccic3phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 31, 1, BIT(30), 0);
+
+static const struct clk_parent_data csi_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(csi_clk, csi_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 20, 3, BIT(15),
+ 16, 3, BIT(4), 0);
+
+static const struct clk_parent_data camm_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_NAME(vctcxo_24m),
+};
+CCU_MUX_DIV_GATE_DEFINE(camm0_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(28), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm1_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(6), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm2_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(3), 0);
+
+static const struct clk_parent_data isp_cpp_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_DEFINE(isp_cpp_clk, isp_cpp_parents, APMU_ISP_CLK_RES_CTRL, 24, 2, 26, 1,
+ BIT(28), 0);
+static const struct clk_parent_data isp_bus_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_bus_clk, isp_bus_parents, APMU_ISP_CLK_RES_CTRL, 18, 3, BIT(23),
+ 21, 2, BIT(17), 0);
+static const struct clk_parent_data isp_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_clk, isp_parents, APMU_ISP_CLK_RES_CTRL, 4, 3, BIT(7), 8, 2,
+ BIT(1), 0);
+
+static const struct clk_parent_data dpumclk_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_mclk, dpumclk_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 1, 4, BIT(29), 5, 3, BIT(0), 0);
+
+static const struct clk_parent_data dpuesc_parents[] = {
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d32_76p8),
+};
+CCU_MUX_GATE_DEFINE(dpu_esc_clk, dpuesc_parents, APMU_LCD_CLK_RES_CTRL1, 0, 2, BIT(2), 0);
+
+static const struct clk_parent_data dpubit_parents[] = {
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_bit_clk, dpubit_parents, APMU_LCD_CLK_RES_CTRL1, 17, 3, BIT(31),
+ 20, 3, BIT(16), 0);
+
+static const struct clk_parent_data dpupx_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_pxclk, dpupx_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 17, 4, BIT(30), 21, 3, BIT(16), 0);
+
+CCU_GATE_DEFINE(dpu_hclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_CLK_RES_CTRL1,
+ BIT(5), 0);
+
+static const struct clk_parent_data dpu_spi_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_spi_clk, dpu_spi_parents, APMU_LCD_SPI_CLK_RES_CTRL, 8, 3,
+ BIT(7), 12, 3, BIT(1), 0);
+CCU_GATE_DEFINE(dpu_spi_hbus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dpu_spi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(dpu_spi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(6), 0);
+
+static const struct clk_parent_data v2d_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d4_614p4),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(v2d_clk, v2d_parents, APMU_LCD_CLK_RES_CTRL1, 9, 3, BIT(28), 12, 2,
+ BIT(8), 0);
+
+static const struct clk_parent_data ccic_4x_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(ccic_4x_clk, ccic_4x_parents, APMU_CCIC_CLK_RES_CTRL, 18, 3,
+ BIT(15), 23, 2, BIT(4), 0);
+
+static const struct clk_parent_data ccic1phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic1phy_clk, ccic1phy_parents, APMU_CCIC_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+CCU_GATE_DEFINE(sdh_axi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_SDH0_CLK_RES_CTRL, BIT(3), 0);
+static const struct clk_parent_data sdh01_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh0_clk, sdh01_parents, APMU_SDH0_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh1_clk, sdh01_parents, APMU_SDH1_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+static const struct clk_parent_data sdh2_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh2_clk, sdh2_parents, APMU_SDH2_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+
+CCU_GATE_DEFINE(usb_axi_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(1), 0);
+CCU_GATE_DEFINE(usb_p1_aclk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(usb30_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(8), 0);
+
+static const struct clk_parent_data qspi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d13_189),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(qspi_clk, qspi_parents, APMU_QSPI_CLK_RES_CTRL, 9, 3, BIT(12), 6, 3,
+ BIT(4), 0);
+CCU_GATE_DEFINE(qspi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_QSPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dma_clk, CCU_PARENT_HW(pmua_aclk), APMU_DMA_CLK_RES_CTRL, BIT(3), 0);
+
+static const struct clk_parent_data aes_parents[] = {
+ CCU_PARENT_HW(pll1_d12_204p8),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_GATE_DEFINE(aes_clk, aes_parents, APMU_AES_CLK_RES_CTRL, 6, 1, BIT(5), 0);
+
+static const struct clk_parent_data vpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(vpu_clk, vpu_parents, APMU_VPU_CLK_RES_CTRL, 13, 3, BIT(21), 10, 3,
+ BIT(3), 0);
+
+static const struct clk_parent_data gpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(gpu_clk, gpu_parents, APMU_GPU_CLK_RES_CTRL, 12, 3, BIT(15), 18, 3,
+ BIT(4), 0);
+
+static const struct clk_parent_data emmc_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d3_819p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(emmc_clk, emmc_parents, APMU_PMUA_EM_CLK_RES_CTRL, 8, 3, BIT(11),
+ 6, 2, BIT(4), 0);
+CCU_DIV_GATE_DEFINE(emmc_x_clk, CCU_PARENT_HW(pll1_d2_1228p8), APMU_PMUA_EM_CLK_RES_CTRL, 12,
+ 3, BIT(15), 0);
+
+static const struct clk_parent_data audio_parents[] = {
+ CCU_PARENT_HW(pll1_aud_245p7),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(audio_clk, audio_parents, APMU_AUDIO_CLK_RES_CTRL, 4, 3, BIT(15),
+ 7, 3, BIT(12), 0);
+
+static const struct clk_parent_data hdmi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(hdmi_mclk, hdmi_parents, APMU_HDMI_CLK_RES_CTRL, 1, 4, BIT(29), 5,
+ 3, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie0_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(2), 0);
+CCU_GATE_DEFINE(pcie0_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(1), 0);
+CCU_GATE_DEFINE(pcie0_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie1_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(2), 0);
+CCU_GATE_DEFINE(pcie1_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(1), 0);
+CCU_GATE_DEFINE(pcie1_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie2_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(2), 0);
+CCU_GATE_DEFINE(pcie2_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(1), 0);
+CCU_GATE_DEFINE(pcie2_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(0), 0);
+
+CCU_GATE_DEFINE(emac0_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC0_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac0_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC0_CLK_RES_CTRL, BIT(15), 0);
+CCU_GATE_DEFINE(emac1_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC1_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac1_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC1_CLK_RES_CTRL, BIT(15), 0);
+
+CCU_GATE_DEFINE(emmc_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_PMUA_EM_CLK_RES_CTRL, BIT(3), 0);
+/* APMU clocks end */
+
+static struct clk_hw *k1_ccu_pll_hws[] = {
+ [CLK_PLL1] = &pll1.common.hw,
+ [CLK_PLL2] = &pll2.common.hw,
+ [CLK_PLL3] = &pll3.common.hw,
+ [CLK_PLL1_D2] = &pll1_d2.common.hw,
+ [CLK_PLL1_D3] = &pll1_d3.common.hw,
+ [CLK_PLL1_D4] = &pll1_d4.common.hw,
+ [CLK_PLL1_D5] = &pll1_d5.common.hw,
+ [CLK_PLL1_D6] = &pll1_d6.common.hw,
+ [CLK_PLL1_D7] = &pll1_d7.common.hw,
+ [CLK_PLL1_D8] = &pll1_d8.common.hw,
+ [CLK_PLL1_D11] = &pll1_d11_223p4.common.hw,
+ [CLK_PLL1_D13] = &pll1_d13_189.common.hw,
+ [CLK_PLL1_D23] = &pll1_d23_106p8.common.hw,
+ [CLK_PLL1_D64] = &pll1_d64_38p4.common.hw,
+ [CLK_PLL1_D10_AUD] = &pll1_aud_245p7.common.hw,
+ [CLK_PLL1_D100_AUD] = &pll1_aud_24p5.common.hw,
+ [CLK_PLL2_D1] = &pll2_d1.common.hw,
+ [CLK_PLL2_D2] = &pll2_d2.common.hw,
+ [CLK_PLL2_D3] = &pll2_d3.common.hw,
+ [CLK_PLL2_D4] = &pll2_d4.common.hw,
+ [CLK_PLL2_D5] = &pll2_d5.common.hw,
+ [CLK_PLL2_D6] = &pll2_d6.common.hw,
+ [CLK_PLL2_D7] = &pll2_d7.common.hw,
+ [CLK_PLL2_D8] = &pll2_d8.common.hw,
+ [CLK_PLL3_D1] = &pll3_d1.common.hw,
+ [CLK_PLL3_D2] = &pll3_d2.common.hw,
+ [CLK_PLL3_D3] = &pll3_d3.common.hw,
+ [CLK_PLL3_D4] = &pll3_d4.common.hw,
+ [CLK_PLL3_D5] = &pll3_d5.common.hw,
+ [CLK_PLL3_D6] = &pll3_d6.common.hw,
+ [CLK_PLL3_D7] = &pll3_d7.common.hw,
+ [CLK_PLL3_D8] = &pll3_d8.common.hw,
+ [CLK_PLL3_80] = &pll3_80.common.hw,
+ [CLK_PLL3_40] = &pll3_40.common.hw,
+ [CLK_PLL3_20] = &pll3_20.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_pll_data = {
+ .hws = k1_ccu_pll_hws,
+ .num = ARRAY_SIZE(k1_ccu_pll_hws),
+};
+
+static struct clk_hw *k1_ccu_mpmu_hws[] = {
+ [CLK_PLL1_307P2] = &pll1_d8_307p2.common.hw,
+ [CLK_PLL1_76P8] = &pll1_d32_76p8.common.hw,
+ [CLK_PLL1_61P44] = &pll1_d40_61p44.common.hw,
+ [CLK_PLL1_153P6] = &pll1_d16_153p6.common.hw,
+ [CLK_PLL1_102P4] = &pll1_d24_102p4.common.hw,
+ [CLK_PLL1_51P2] = &pll1_d48_51p2.common.hw,
+ [CLK_PLL1_51P2_AP] = &pll1_d48_51p2_ap.common.hw,
+ [CLK_PLL1_57P6] = &pll1_m3d128_57p6.common.hw,
+ [CLK_PLL1_25P6] = &pll1_d96_25p6.common.hw,
+ [CLK_PLL1_12P8] = &pll1_d192_12p8.common.hw,
+ [CLK_PLL1_12P8_WDT] = &pll1_d192_12p8_wdt.common.hw,
+ [CLK_PLL1_6P4] = &pll1_d384_6p4.common.hw,
+ [CLK_PLL1_3P2] = &pll1_d768_3p2.common.hw,
+ [CLK_PLL1_1P6] = &pll1_d1536_1p6.common.hw,
+ [CLK_PLL1_0P8] = &pll1_d3072_0p8.common.hw,
+ [CLK_PLL1_409P6] = &pll1_d6_409p6.common.hw,
+ [CLK_PLL1_204P8] = &pll1_d12_204p8.common.hw,
+ [CLK_PLL1_491] = &pll1_d5_491p52.common.hw,
+ [CLK_PLL1_245P76] = &pll1_d10_245p76.common.hw,
+ [CLK_PLL1_614] = &pll1_d4_614p4.common.hw,
+ [CLK_PLL1_47P26] = &pll1_d52_47p26.common.hw,
+ [CLK_PLL1_31P5] = &pll1_d78_31p5.common.hw,
+ [CLK_PLL1_819] = &pll1_d3_819p2.common.hw,
+ [CLK_PLL1_1228] = &pll1_d2_1228p8.common.hw,
+ [CLK_SLOW_UART] = &slow_uart.common.hw,
+ [CLK_SLOW_UART1] = &slow_uart1_14p74.common.hw,
+ [CLK_SLOW_UART2] = &slow_uart2_48.common.hw,
+ [CLK_WDT] = &wdt_clk.common.hw,
+ [CLK_RIPC] = &ripc_clk.common.hw,
+ [CLK_I2S_SYSCLK] = &i2s_sysclk.common.hw,
+ [CLK_I2S_BCLK] = &i2s_bclk.common.hw,
+ [CLK_APB] = &apb_clk.common.hw,
+ [CLK_WDT_BUS] = &wdt_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_mpmu_data = {
+ .hws = k1_ccu_mpmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_mpmu_hws),
+};
+
+static struct clk_hw *k1_ccu_apbc_hws[] = {
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART2] = &uart2_clk.common.hw,
+ [CLK_UART3] = &uart3_clk.common.hw,
+ [CLK_UART4] = &uart4_clk.common.hw,
+ [CLK_UART5] = &uart5_clk.common.hw,
+ [CLK_UART6] = &uart6_clk.common.hw,
+ [CLK_UART7] = &uart7_clk.common.hw,
+ [CLK_UART8] = &uart8_clk.common.hw,
+ [CLK_UART9] = &uart9_clk.common.hw,
+ [CLK_GPIO] = &gpio_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_PWM4] = &pwm4_clk.common.hw,
+ [CLK_PWM5] = &pwm5_clk.common.hw,
+ [CLK_PWM6] = &pwm6_clk.common.hw,
+ [CLK_PWM7] = &pwm7_clk.common.hw,
+ [CLK_PWM8] = &pwm8_clk.common.hw,
+ [CLK_PWM9] = &pwm9_clk.common.hw,
+ [CLK_PWM10] = &pwm10_clk.common.hw,
+ [CLK_PWM11] = &pwm11_clk.common.hw,
+ [CLK_PWM12] = &pwm12_clk.common.hw,
+ [CLK_PWM13] = &pwm13_clk.common.hw,
+ [CLK_PWM14] = &pwm14_clk.common.hw,
+ [CLK_PWM15] = &pwm15_clk.common.hw,
+ [CLK_PWM16] = &pwm16_clk.common.hw,
+ [CLK_PWM17] = &pwm17_clk.common.hw,
+ [CLK_PWM18] = &pwm18_clk.common.hw,
+ [CLK_PWM19] = &pwm19_clk.common.hw,
+ [CLK_SSP3] = &ssp3_clk.common.hw,
+ [CLK_RTC] = &rtc_clk.common.hw,
+ [CLK_TWSI0] = &twsi0_clk.common.hw,
+ [CLK_TWSI1] = &twsi1_clk.common.hw,
+ [CLK_TWSI2] = &twsi2_clk.common.hw,
+ [CLK_TWSI4] = &twsi4_clk.common.hw,
+ [CLK_TWSI5] = &twsi5_clk.common.hw,
+ [CLK_TWSI6] = &twsi6_clk.common.hw,
+ [CLK_TWSI7] = &twsi7_clk.common.hw,
+ [CLK_TWSI8] = &twsi8_clk.common.hw,
+ [CLK_TIMERS1] = &timers1_clk.common.hw,
+ [CLK_TIMERS2] = &timers2_clk.common.hw,
+ [CLK_AIB] = &aib_clk.common.hw,
+ [CLK_ONEWIRE] = &onewire_clk.common.hw,
+ [CLK_SSPA0] = &sspa0_clk.common.hw,
+ [CLK_SSPA1] = &sspa1_clk.common.hw,
+ [CLK_DRO] = &dro_clk.common.hw,
+ [CLK_IR] = &ir_clk.common.hw,
+ [CLK_TSEN] = &tsen_clk.common.hw,
+ [CLK_IPC_AP2AUD] = &ipc_ap2aud_clk.common.hw,
+ [CLK_CAN0] = &can0_clk.common.hw,
+ [CLK_CAN0_BUS] = &can0_bus_clk.common.hw,
+ [CLK_UART0_BUS] = &uart0_bus_clk.common.hw,
+ [CLK_UART2_BUS] = &uart2_bus_clk.common.hw,
+ [CLK_UART3_BUS] = &uart3_bus_clk.common.hw,
+ [CLK_UART4_BUS] = &uart4_bus_clk.common.hw,
+ [CLK_UART5_BUS] = &uart5_bus_clk.common.hw,
+ [CLK_UART6_BUS] = &uart6_bus_clk.common.hw,
+ [CLK_UART7_BUS] = &uart7_bus_clk.common.hw,
+ [CLK_UART8_BUS] = &uart8_bus_clk.common.hw,
+ [CLK_UART9_BUS] = &uart9_bus_clk.common.hw,
+ [CLK_GPIO_BUS] = &gpio_bus_clk.common.hw,
+ [CLK_PWM0_BUS] = &pwm0_bus_clk.common.hw,
+ [CLK_PWM1_BUS] = &pwm1_bus_clk.common.hw,
+ [CLK_PWM2_BUS] = &pwm2_bus_clk.common.hw,
+ [CLK_PWM3_BUS] = &pwm3_bus_clk.common.hw,
+ [CLK_PWM4_BUS] = &pwm4_bus_clk.common.hw,
+ [CLK_PWM5_BUS] = &pwm5_bus_clk.common.hw,
+ [CLK_PWM6_BUS] = &pwm6_bus_clk.common.hw,
+ [CLK_PWM7_BUS] = &pwm7_bus_clk.common.hw,
+ [CLK_PWM8_BUS] = &pwm8_bus_clk.common.hw,
+ [CLK_PWM9_BUS] = &pwm9_bus_clk.common.hw,
+ [CLK_PWM10_BUS] = &pwm10_bus_clk.common.hw,
+ [CLK_PWM11_BUS] = &pwm11_bus_clk.common.hw,
+ [CLK_PWM12_BUS] = &pwm12_bus_clk.common.hw,
+ [CLK_PWM13_BUS] = &pwm13_bus_clk.common.hw,
+ [CLK_PWM14_BUS] = &pwm14_bus_clk.common.hw,
+ [CLK_PWM15_BUS] = &pwm15_bus_clk.common.hw,
+ [CLK_PWM16_BUS] = &pwm16_bus_clk.common.hw,
+ [CLK_PWM17_BUS] = &pwm17_bus_clk.common.hw,
+ [CLK_PWM18_BUS] = &pwm18_bus_clk.common.hw,
+ [CLK_PWM19_BUS] = &pwm19_bus_clk.common.hw,
+ [CLK_SSP3_BUS] = &ssp3_bus_clk.common.hw,
+ [CLK_RTC_BUS] = &rtc_bus_clk.common.hw,
+ [CLK_TWSI0_BUS] = &twsi0_bus_clk.common.hw,
+ [CLK_TWSI1_BUS] = &twsi1_bus_clk.common.hw,
+ [CLK_TWSI2_BUS] = &twsi2_bus_clk.common.hw,
+ [CLK_TWSI4_BUS] = &twsi4_bus_clk.common.hw,
+ [CLK_TWSI5_BUS] = &twsi5_bus_clk.common.hw,
+ [CLK_TWSI6_BUS] = &twsi6_bus_clk.common.hw,
+ [CLK_TWSI7_BUS] = &twsi7_bus_clk.common.hw,
+ [CLK_TWSI8_BUS] = &twsi8_bus_clk.common.hw,
+ [CLK_TIMERS1_BUS] = &timers1_bus_clk.common.hw,
+ [CLK_TIMERS2_BUS] = &timers2_bus_clk.common.hw,
+ [CLK_AIB_BUS] = &aib_bus_clk.common.hw,
+ [CLK_ONEWIRE_BUS] = &onewire_bus_clk.common.hw,
+ [CLK_SSPA0_BUS] = &sspa0_bus_clk.common.hw,
+ [CLK_SSPA1_BUS] = &sspa1_bus_clk.common.hw,
+ [CLK_TSEN_BUS] = &tsen_bus_clk.common.hw,
+ [CLK_IPC_AP2AUD_BUS] = &ipc_ap2aud_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apbc_data = {
+ .hws = k1_ccu_apbc_hws,
+ .num = ARRAY_SIZE(k1_ccu_apbc_hws),
+};
+
+static struct clk_hw *k1_ccu_apmu_hws[] = {
+ [CLK_CCI550] = &cci550_clk.common.hw,
+ [CLK_CPU_C0_HI] = &cpu_c0_hi_clk.common.hw,
+ [CLK_CPU_C0_CORE] = &cpu_c0_core_clk.common.hw,
+ [CLK_CPU_C0_ACE] = &cpu_c0_ace_clk.common.hw,
+ [CLK_CPU_C0_TCM] = &cpu_c0_tcm_clk.common.hw,
+ [CLK_CPU_C1_HI] = &cpu_c1_hi_clk.common.hw,
+ [CLK_CPU_C1_CORE] = &cpu_c1_core_clk.common.hw,
+ [CLK_CPU_C1_ACE] = &cpu_c1_ace_clk.common.hw,
+ [CLK_CCIC_4X] = &ccic_4x_clk.common.hw,
+ [CLK_CCIC1PHY] = &ccic1phy_clk.common.hw,
+ [CLK_SDH_AXI] = &sdh_axi_aclk.common.hw,
+ [CLK_SDH0] = &sdh0_clk.common.hw,
+ [CLK_SDH1] = &sdh1_clk.common.hw,
+ [CLK_SDH2] = &sdh2_clk.common.hw,
+ [CLK_USB_P1] = &usb_p1_aclk.common.hw,
+ [CLK_USB_AXI] = &usb_axi_clk.common.hw,
+ [CLK_USB30] = &usb30_clk.common.hw,
+ [CLK_QSPI] = &qspi_clk.common.hw,
+ [CLK_QSPI_BUS] = &qspi_bus_clk.common.hw,
+ [CLK_DMA] = &dma_clk.common.hw,
+ [CLK_AES] = &aes_clk.common.hw,
+ [CLK_VPU] = &vpu_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_EMMC] = &emmc_clk.common.hw,
+ [CLK_EMMC_X] = &emmc_x_clk.common.hw,
+ [CLK_AUDIO] = &audio_clk.common.hw,
+ [CLK_HDMI] = &hdmi_mclk.common.hw,
+ [CLK_PMUA_ACLK] = &pmua_aclk.common.hw,
+ [CLK_PCIE0_MASTER] = &pcie0_master_clk.common.hw,
+ [CLK_PCIE0_SLAVE] = &pcie0_slave_clk.common.hw,
+ [CLK_PCIE0_DBI] = &pcie0_dbi_clk.common.hw,
+ [CLK_PCIE1_MASTER] = &pcie1_master_clk.common.hw,
+ [CLK_PCIE1_SLAVE] = &pcie1_slave_clk.common.hw,
+ [CLK_PCIE1_DBI] = &pcie1_dbi_clk.common.hw,
+ [CLK_PCIE2_MASTER] = &pcie2_master_clk.common.hw,
+ [CLK_PCIE2_SLAVE] = &pcie2_slave_clk.common.hw,
+ [CLK_PCIE2_DBI] = &pcie2_dbi_clk.common.hw,
+ [CLK_EMAC0_BUS] = &emac0_bus_clk.common.hw,
+ [CLK_EMAC0_PTP] = &emac0_ptp_clk.common.hw,
+ [CLK_EMAC1_BUS] = &emac1_bus_clk.common.hw,
+ [CLK_EMAC1_PTP] = &emac1_ptp_clk.common.hw,
+ [CLK_JPG] = &jpg_clk.common.hw,
+ [CLK_CCIC2PHY] = &ccic2phy_clk.common.hw,
+ [CLK_CCIC3PHY] = &ccic3phy_clk.common.hw,
+ [CLK_CSI] = &csi_clk.common.hw,
+ [CLK_CAMM0] = &camm0_clk.common.hw,
+ [CLK_CAMM1] = &camm1_clk.common.hw,
+ [CLK_CAMM2] = &camm2_clk.common.hw,
+ [CLK_ISP_CPP] = &isp_cpp_clk.common.hw,
+ [CLK_ISP_BUS] = &isp_bus_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_DPU_MCLK] = &dpu_mclk.common.hw,
+ [CLK_DPU_ESC] = &dpu_esc_clk.common.hw,
+ [CLK_DPU_BIT] = &dpu_bit_clk.common.hw,
+ [CLK_DPU_PXCLK] = &dpu_pxclk.common.hw,
+ [CLK_DPU_HCLK] = &dpu_hclk.common.hw,
+ [CLK_DPU_SPI] = &dpu_spi_clk.common.hw,
+ [CLK_DPU_SPI_HBUS] = &dpu_spi_hbus_clk.common.hw,
+ [CLK_DPU_SPIBUS] = &dpu_spi_bus_clk.common.hw,
+ [CLK_DPU_SPI_ACLK] = &dpu_spi_aclk.common.hw,
+ [CLK_V2D] = &v2d_clk.common.hw,
+ [CLK_EMMC_BUS] = &emmc_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apmu_data = {
+ .hws = k1_ccu_apmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_apmu_hws),
+};
+
+static int spacemit_ccu_register(struct device *dev,
+ struct regmap *regmap,
+ struct regmap *lock_regmap,
+ const struct spacemit_ccu_data *data)
+{
+ struct clk_hw_onecell_data *clk_data;
+ int i, ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, data->num),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num; i++) {
+ struct clk_hw *hw = data->hws[i];
+ struct ccu_common *common;
+ const char *name;
+
+ if (!hw) {
+ clk_data->hws[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ name = hw->init->name;
+
+ common = hw_to_ccu_common(hw);
+ common->regmap = regmap;
+ common->lock_regmap = lock_regmap;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Cannot register clock %d - %s\n",
+ i, name);
+ return ret;
+ }
+
+ clk_data->hws[i] = hw;
+ }
+
+ clk_data->num = data->num;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ dev_err(dev, "failed to add clock hardware provider (%d)\n", ret);
+
+ return ret;
+}
+
+static int k1_ccu_probe(struct platform_device *pdev)
+{
+ struct regmap *base_regmap, *lock_regmap = NULL;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ base_regmap = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(base_regmap))
+ return dev_err_probe(dev, PTR_ERR(base_regmap),
+ "failed to get regmap\n");
+
+ /*
+ * The lock status of PLLs locate in MPMU region, while PLLs themselves
+ * are in APBS region. Reference to MPMU syscon is required to check PLL
+ * status.
+ */
+ if (of_device_is_compatible(dev->of_node, "spacemit,k1-pll")) {
+ struct device_node *mpmu = of_parse_phandle(dev->of_node,
+ "spacemit,mpmu", 0);
+ if (!mpmu)
+ return dev_err_probe(dev, -ENODEV,
+ "Cannot parse MPMU region\n");
+
+ lock_regmap = device_node_to_regmap(mpmu);
+ of_node_put(mpmu);
+
+ if (IS_ERR(lock_regmap))
+ return dev_err_probe(dev, PTR_ERR(lock_regmap),
+ "failed to get lock regmap\n");
+ }
+
+ ret = spacemit_ccu_register(dev, base_regmap, lock_regmap,
+ of_device_get_match_data(dev));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register clocks\n");
+
+ return 0;
+}
+
+static const struct of_device_id of_k1_ccu_match[] = {
+ {
+ .compatible = "spacemit,k1-pll",
+ .data = &k1_ccu_pll_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-mpmu",
+ .data = &k1_ccu_mpmu_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apbc",
+ .data = &k1_ccu_apbc_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apmu",
+ .data = &k1_ccu_apmu_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_k1_ccu_match);
+
+static struct platform_driver k1_ccu_driver = {
+ .driver = {
+ .name = "spacemit,k1-ccu",
+ .of_match_table = of_k1_ccu_match,
+ },
+ .probe = k1_ccu_probe,
+};
+module_platform_driver(k1_ccu_driver);
+
+MODULE_DESCRIPTION("SpacemiT K1 CCU driver");
+MODULE_AUTHOR("Haylen Chu <heylenay@4d2.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/ccu_common.h b/drivers/clk/spacemit/ccu_common.h
new file mode 100644
index 000000000000..da72f3836e0b
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_common.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_COMMON_H_
+#define _CCU_COMMON_H_
+
+#include <linux/regmap.h>
+
+struct ccu_common {
+ struct regmap *regmap;
+ struct regmap *lock_regmap;
+
+ union {
+ /* For DDN and MIX */
+ struct {
+ u32 reg_ctrl;
+ u32 reg_fc;
+ u32 mask_fc;
+ };
+
+ /* For PLL */
+ struct {
+ u32 reg_swcr1;
+ u32 reg_swcr3;
+ };
+ };
+
+ struct clk_hw hw;
+};
+
+static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw)
+{
+ return container_of(hw, struct ccu_common, hw);
+}
+
+#define ccu_read(c, reg) \
+ ({ \
+ u32 tmp; \
+ regmap_read((c)->regmap, (c)->reg_##reg, &tmp); \
+ tmp; \
+ })
+#define ccu_update(c, reg, mask, val) \
+ regmap_update_bits((c)->regmap, (c)->reg_##reg, mask, val)
+
+#endif /* _CCU_COMMON_H_ */
diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c
new file mode 100644
index 000000000000..be311b045698
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * DDN stands for "Divider Denominator Numerator", it's M/N clock with a
+ * constant x2 factor. This clock hardware follows the equation below,
+ *
+ * numerator Fin
+ * 2 * ------------- = -------
+ * denominator Fout
+ *
+ * Thus, Fout could be calculated with,
+ *
+ * Fin denominator
+ * Fout = ----- * -------------
+ * 2 numerator
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/rational.h>
+
+#include "ccu_ddn.h"
+
+static unsigned long ccu_ddn_calc_rate(unsigned long prate,
+ unsigned long num, unsigned long den)
+{
+ return prate * den / 2 / num;
+}
+
+static unsigned long ccu_ddn_calc_best_rate(struct ccu_ddn *ddn,
+ unsigned long rate, unsigned long prate,
+ unsigned long *num, unsigned long *den)
+{
+ rational_best_approximation(rate, prate / 2,
+ ddn->den_mask >> ddn->den_shift,
+ ddn->num_mask >> ddn->num_shift,
+ den, num);
+ return ccu_ddn_calc_rate(prate, *num, *den);
+}
+
+static long ccu_ddn_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ return ccu_ddn_calc_best_rate(ddn, rate, *prate, &num, &den);
+}
+
+static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned int val, num, den;
+
+ val = ccu_read(&ddn->common, ctrl);
+
+ num = (val & ddn->num_mask) >> ddn->num_shift;
+ den = (val & ddn->den_mask) >> ddn->den_shift;
+
+ return ccu_ddn_calc_rate(prate, num, den);
+}
+
+static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ ccu_ddn_calc_best_rate(ddn, rate, prate, &num, &den);
+
+ ccu_update(&ddn->common, ctrl,
+ ddn->num_mask | ddn->den_mask,
+ (num << ddn->num_shift) | (den << ddn->den_shift));
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_ddn_ops = {
+ .recalc_rate = ccu_ddn_recalc_rate,
+ .round_rate = ccu_ddn_round_rate,
+ .set_rate = ccu_ddn_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h
new file mode 100644
index 000000000000..a52fabe77d62
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_DDN_H_
+#define _CCU_DDN_H_
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_ddn {
+ struct ccu_common common;
+ unsigned int num_mask;
+ unsigned int num_shift;
+ unsigned int den_mask;
+ unsigned int den_shift;
+};
+
+#define CCU_DDN_INIT(_name, _parent, _flags) \
+ CLK_HW_INIT_HW(#_name, &_parent.common.hw, &spacemit_ccu_ddn_ops, _flags)
+
+#define CCU_DDN_DEFINE(_name, _parent, _reg_ctrl, _num_shift, _num_width, \
+ _den_shift, _den_width, _flags) \
+static struct ccu_ddn _name = { \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .hw.init = CCU_DDN_INIT(_name, _parent, _flags), \
+ }, \
+ .num_mask = GENMASK(_num_shift + _num_width - 1, _num_shift), \
+ .num_shift = _num_shift, \
+ .den_mask = GENMASK(_den_shift + _den_width - 1, _den_shift), \
+ .den_shift = _den_shift, \
+}
+
+static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_ddn, common);
+}
+
+extern const struct clk_ops spacemit_ccu_ddn_ops;
+
+#endif
diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c
new file mode 100644
index 000000000000..9b852aa61f78
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * MIX clock type is the combination of mux, factor or divider, and gate
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_mix.h"
+
+#define MIX_FC_TIMEOUT_US 10000
+#define MIX_FC_DELAY_US 5
+
+static void ccu_gate_disable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ ccu_update(&mix->common, ctrl, mix->gate.mask, 0);
+}
+
+static int ccu_gate_enable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ ccu_update(&mix->common, ctrl, gate->mask, gate->mask);
+
+ return 0;
+}
+
+static int ccu_gate_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ return (ccu_read(&mix->common, ctrl) & gate->mask) == gate->mask;
+}
+
+static unsigned long ccu_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ return parent_rate * mix->factor.mul / mix->factor.div;
+}
+
+static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_div_config *div = &mix->div;
+ unsigned long val;
+
+ val = ccu_read(&mix->common, ctrl) >> div->shift;
+ val &= (1 << div->width) - 1;
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL, 0, div->width);
+}
+
+/*
+ * Some clocks require a "FC" (frequency change) bit to be set after changing
+ * their rates or reparenting. This bit will be automatically cleared by
+ * hardware in MIX_FC_TIMEOUT_US, which indicates the operation is completed.
+ */
+static int ccu_mix_trigger_fc(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+ unsigned int val;
+
+ if (common->reg_fc)
+ return 0;
+
+ ccu_update(common, fc, common->mask_fc, common->mask_fc);
+
+ return regmap_read_poll_timeout_atomic(common->regmap, common->reg_fc,
+ val, !(val & common->mask_fc),
+ MIX_FC_DELAY_US,
+ MIX_FC_TIMEOUT_US);
+}
+
+static long ccu_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return ccu_factor_recalc_rate(hw, *prate);
+}
+
+static int ccu_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static unsigned long
+ccu_mix_calc_best_rate(struct clk_hw *hw, unsigned long rate,
+ struct clk_hw **best_parent,
+ unsigned long *best_parent_rate,
+ u32 *div_val)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ unsigned int parent_num = clk_hw_get_num_parents(hw);
+ struct ccu_div_config *div = &mix->div;
+ u32 div_max = 1 << div->width;
+ unsigned long best_rate = 0;
+
+ for (int i = 0; i < parent_num; i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate;
+
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ for (int j = 1; j <= div_max; j++) {
+ unsigned long tmp = DIV_ROUND_CLOSEST_ULL(parent_rate, j);
+
+ if (abs(tmp - rate) < abs(best_rate - rate)) {
+ best_rate = tmp;
+
+ if (div_val)
+ *div_val = j - 1;
+
+ if (best_parent) {
+ *best_parent = parent;
+ *best_parent_rate = parent_rate;
+ }
+ }
+ }
+ }
+
+ return best_rate;
+}
+
+static int ccu_mix_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ req->rate = ccu_mix_calc_best_rate(hw, req->rate,
+ &req->best_parent_hw,
+ &req->best_parent_rate,
+ NULL);
+ return 0;
+}
+
+static int ccu_mix_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_common *common = &mix->common;
+ struct ccu_div_config *div = &mix->div;
+ u32 current_div, target_div, mask;
+
+ ccu_mix_calc_best_rate(hw, rate, NULL, NULL, &target_div);
+
+ current_div = ccu_read(common, ctrl) >> div->shift;
+ current_div &= (1 << div->width) - 1;
+
+ if (current_div == target_div)
+ return 0;
+
+ mask = GENMASK(div->width + div->shift - 1, div->shift);
+
+ ccu_update(common, ctrl, mask, target_div << div->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+static u8 ccu_mux_get_parent(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u8 parent;
+
+ parent = ccu_read(&mix->common, ctrl) >> mux->shift;
+ parent &= (1 << mux->width) - 1;
+
+ return parent;
+}
+
+static int ccu_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u32 mask;
+
+ mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
+
+ ccu_update(&mix->common, ctrl, mask, index << mux->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+const struct clk_ops spacemit_ccu_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+};
+
+const struct clk_ops spacemit_ccu_factor_ops = {
+ .round_rate = ccu_factor_round_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_factor_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .round_rate = ccu_factor_round_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_ops = {
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h
new file mode 100644
index 000000000000..51d19f5d6aac
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_MIX_H_
+#define _CCU_MIX_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_gate_config - Gate configuration
+ *
+ * @mask: Mask to enable the gate. Some clocks may have more than one bit
+ * set in this field.
+ */
+struct ccu_gate_config {
+ u32 mask;
+};
+
+struct ccu_factor_config {
+ u32 div;
+ u32 mul;
+};
+
+struct ccu_mux_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_div_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_mix {
+ struct ccu_factor_config factor;
+ struct ccu_gate_config gate;
+ struct ccu_div_config div;
+ struct ccu_mux_config mux;
+ struct ccu_common common;
+};
+
+#define CCU_GATE_INIT(_mask) { .mask = _mask }
+#define CCU_FACTOR_INIT(_div, _mul) { .div = _div, .mul = _mul }
+#define CCU_MUX_INIT(_shift, _width) { .shift = _shift, .width = _width }
+#define CCU_DIV_INIT(_shift, _width) { .shift = _shift, .width = _width }
+
+#define CCU_PARENT_HW(_parent) { .hw = &_parent.common.hw }
+#define CCU_PARENT_NAME(_name) { .fw_name = #_name }
+
+#define CCU_MIX_INITHW(_name, _parent, _ops, _flags) \
+ .hw.init = &(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = #_name, \
+ .parent_data = (const struct clk_parent_data[]) \
+ { _parent }, \
+ .num_parents = 1, \
+ .ops = &_ops, \
+ }
+
+#define CCU_MIX_INITHW_PARENTS(_name, _parents, _ops, _flags) \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(#_name, _parents, &_ops, _flags)
+
+#define CCU_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_FACTOR_DEFINE(_name, _parent, _div, _mul) \
+static struct ccu_mix _name = { \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_ops, 0), \
+ } \
+}
+
+#define CCU_MUX_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_DIV_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_ops, _flags) \
+ } \
+}
+
+#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, 0) \
+ } \
+}
+
+#define CCU_MUX_GATE_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_DIV_GATE_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_gate_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_MUX_DIV_GATE_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _muxshift, _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_fc, \
+ _mshift, _mwidth, _mask_fc, _muxshift, \
+ _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_fc, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth,\
+ _mask_fc, _muxshift, _muxwidth, _mask_gate, \
+ _flags) \
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_ctrl, _mshift,\
+ _mwidth, _mask_fc, _muxshift, _muxwidth, \
+ _mask_gate, _flags)
+
+#define CCU_MUX_DIV_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _mask_fc, _muxshift, _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_FC_DEFINE(_name, _parents, _reg_ctrl, _mask_fc, _muxshift, \
+ _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags) \
+ }, \
+}
+
+static inline struct ccu_mix *hw_to_ccu_mix(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_mix, common);
+}
+
+extern const struct clk_ops spacemit_ccu_gate_ops;
+extern const struct clk_ops spacemit_ccu_factor_ops;
+extern const struct clk_ops spacemit_ccu_mux_ops;
+extern const struct clk_ops spacemit_ccu_div_ops;
+extern const struct clk_ops spacemit_ccu_factor_gate_ops;
+extern const struct clk_ops spacemit_ccu_div_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_gate_ops;
+#endif /* _CCU_DIV_H_ */
diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c
new file mode 100644
index 000000000000..4427dcfbbb97
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/math.h>
+#include <linux/regmap.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+
+#define PLL_TIMEOUT_US 3000
+#define PLL_DELAY_US 5
+
+#define PLL_SWCR3_EN ((u32)BIT(31))
+#define PLL_SWCR3_MASK GENMASK(30, 0)
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_best_rate(struct ccu_pll *pll,
+ unsigned long rate)
+{
+ struct ccu_pll_config *config = &pll->config;
+ const struct ccu_pll_rate_tbl *best_entry;
+ unsigned long best_delta = ULONG_MAX;
+ int i;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+ unsigned long delta = abs_diff(entry->rate, rate);
+
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_entry = entry;
+ }
+ }
+
+ return best_entry;
+}
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_matched_entry(struct ccu_pll *pll)
+{
+ struct ccu_pll_config *config = &pll->config;
+ u32 swcr1, swcr3;
+ int i;
+
+ swcr1 = ccu_read(&pll->common, swcr1);
+ swcr3 = ccu_read(&pll->common, swcr3);
+ swcr3 &= PLL_SWCR3_MASK;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+
+ if (swcr1 == entry->swcr1 && swcr3 == entry->swcr3)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void ccu_pll_update_param(struct ccu_pll *pll, const struct ccu_pll_rate_tbl *entry)
+{
+ struct ccu_common *common = &pll->common;
+
+ regmap_write(common->regmap, common->reg_swcr1, entry->swcr1);
+ ccu_update(common, swcr3, PLL_SWCR3_MASK, entry->swcr3);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return ccu_read(common, swcr3) & PLL_SWCR3_EN;
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ struct ccu_common *common = &pll->common;
+ unsigned int tmp;
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, PLL_SWCR3_EN);
+
+ /* check lock status */
+ return regmap_read_poll_timeout_atomic(common->lock_regmap,
+ pll->config.reg_lock,
+ tmp,
+ tmp & pll->config.mask_lock,
+ PLL_DELAY_US, PLL_TIMEOUT_US);
+}
+
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, 0);
+}
+
+/*
+ * PLLs must be gated before changing rate, which is ensured by
+ * flag CLK_SET_RATE_GATE.
+ */
+static int ccu_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_best_rate(pll, rate);
+ ccu_pll_update_param(pll, entry);
+
+ return 0;
+}
+
+static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_matched_entry(pll);
+
+ WARN_ON_ONCE(!entry);
+
+ return entry ? entry->rate : -EINVAL;
+}
+
+static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return ccu_pll_lookup_best_rate(pll, rate)->rate;
+}
+
+static int ccu_pll_init(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ if (ccu_pll_lookup_matched_entry(pll))
+ return 0;
+
+ ccu_pll_disable(hw);
+ ccu_pll_update_param(pll, &pll->config.rate_tbl[0]);
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_pll_ops = {
+ .init = ccu_pll_init,
+ .enable = ccu_pll_enable,
+ .disable = ccu_pll_disable,
+ .set_rate = ccu_pll_set_rate,
+ .recalc_rate = ccu_pll_recalc_rate,
+ .round_rate = ccu_pll_round_rate,
+ .is_enabled = ccu_pll_is_enabled,
+};
diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h
new file mode 100644
index 000000000000..0592f4c3068c
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_PLL_H_
+#define _CCU_PLL_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_pll_rate_tbl - Structure mapping between PLL rate and register
+ * configuration.
+ *
+ * @rate: PLL rate
+ * @swcr1: Register value of PLLX_SW1_CTRL (PLLx_SWCR1).
+ * @swcr3: Register value of the PLLx_SW3_CTRL's lowest 31 bits of
+ * PLLx_SW3_CTRL (PLLx_SWCR3). This highest bit is for enabling
+ * the PLL and not contained in this field.
+ */
+struct ccu_pll_rate_tbl {
+ unsigned long rate;
+ u32 swcr1;
+ u32 swcr3;
+};
+
+struct ccu_pll_config {
+ const struct ccu_pll_rate_tbl *rate_tbl;
+ u32 tbl_num;
+ u32 reg_lock;
+ u32 mask_lock;
+};
+
+#define CCU_PLL_RATE(_rate, _swcr1, _swcr3) \
+ { \
+ .rate = _rate, \
+ .swcr1 = _swcr1, \
+ .swcr3 = _swcr3, \
+ }
+
+struct ccu_pll {
+ struct ccu_common common;
+ struct ccu_pll_config config;
+};
+
+#define CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock) \
+ { \
+ .rate_tbl = _table, \
+ .tbl_num = ARRAY_SIZE(_table), \
+ .reg_lock = (_reg_lock), \
+ .mask_lock = (_mask_lock), \
+ }
+
+#define CCU_PLL_HWINIT(_name, _flags) \
+ (&(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &spacemit_ccu_pll_ops, \
+ .parent_data = &(struct clk_parent_data) { .index = 0 }, \
+ .num_parents = 1, \
+ .flags = _flags, \
+ })
+
+#define CCU_PLL_DEFINE(_name, _table, _reg_swcr1, _reg_swcr3, _reg_lock, \
+ _mask_lock, _flags) \
+static struct ccu_pll _name = { \
+ .config = CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock), \
+ .common = { \
+ .reg_swcr1 = _reg_swcr1, \
+ .reg_swcr3 = _reg_swcr3, \
+ .hw.init = CCU_PLL_HWINIT(_name, _flags) \
+ } \
+}
+
+static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_pll, common);
+}
+
+extern const struct clk_ops spacemit_ccu_pll_ops;
+
+#endif
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 5830a9d87bf2..8896fd052ef1 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -9,123 +9,123 @@ if SUNXI_CCU
config SUNIV_F1C100S_CCU
tristate "Support for the Allwinner newer F1C100s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUNIV || COMPILE_TEST
config SUN20I_D1_CCU
tristate "Support for the Allwinner D1/R528/T113 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN20I_D1_R_CCU
tristate "Support for the Allwinner D1/R528/T113 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN50I_A64_CCU
tristate "Support for the Allwinner A64 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_CCU
tristate "Support for the Allwinner A100 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_R_CCU
tristate "Support for the Allwinner A100 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_CCU
tristate "Support for the Allwinner H6 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H616_CCU
tristate "Support for the Allwinner H616 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_R_CCU
tristate "Support for the Allwinner H6 and H616 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN55I_A523_CCU
tristate "Support for the Allwinner A523/T527 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN55I_A523_R_CCU
tristate "Support for the Allwinner A523/T527 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN4I_A10_CCU
tristate "Support for the Allwinner A10/A20 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN5I || COMPILE_TEST
depends on SUNXI_CCU=y
config SUN6I_A31_CCU
tristate "Support for the Allwinner A31/A31s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN6I || COMPILE_TEST
config SUN6I_RTC_CCU
tristate "Support for the Allwinner H616/R329 RTC CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_A23_CCU
tristate "Support for the Allwinner A23 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A33_CCU
tristate "Support for the Allwinner A33 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A83T_CCU
tristate "Support for the Allwinner A83T CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
tristate "Support for the Allwinner H3 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
config SUN8I_V3S_CCU
tristate "Support for the Allwinner V3s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_DE2_CCU
tristate "Support for the Allwinner SoCs DE2 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_R40_CCU
tristate "Support for the Allwinner R40 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN9I_A80_CCU
tristate "Support for the Allwinner A80 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN9I || COMPILE_TEST
config SUN8I_R_CCU
tristate "Support for Allwinner SoCs' PRCM CCUs"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index daa462c7d477..955c614830fa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -1094,6 +1094,7 @@ static const struct ccu_reset_map sun50i_h616_ccu_resets[] = {
[RST_BUS_TCON_LCD1] = { 0xb7c, BIT(17) },
[RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
[RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
+ [RST_BUS_LVDS] = { 0xbac, BIT(16) },
[RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
[RST_BUS_TVE0] = { 0xbbc, BIT(17) },
[RST_BUS_HDCP] = { 0xc4c, BIT(16) },
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index f2aa71206bc2..a6cd0f988859 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -239,6 +240,16 @@ static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
.num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
};
+static const struct sunxi_ccu_desc sun50i_h616_de33_clk_desc = {
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
+
+ .hw_clks = &sun8i_h3_de2_hw_clks,
+
+ .resets = sun50i_h5_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
+};
+
static int sunxi_de2_clk_probe(struct platform_device *pdev)
{
struct clk *bus_clk, *mod_clk;
@@ -291,6 +302,16 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
goto err_disable_mod_clk;
}
+ /*
+ * The DE33 requires these additional (unknown) registers set
+ * during initialisation.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun50i-h616-de33-clk")) {
+ writel(0, reg + 0x24);
+ writel(0x0000a980, reg + 0x28);
+ }
+
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, ccu_desc);
if (ret)
goto err_assert_reset;
@@ -335,6 +356,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.compatible = "allwinner,sun50i-h6-de3-clk",
.data = &sun50i_h5_de2_clk_desc,
},
+ {
+ .compatible = "allwinner,sun50i-h616-de33-clk",
+ .data = &sun50i_h616_de33_clk_desc,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sunxi_de2_clk_ids);
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
index 1c4e543366dd..5e2f92bfe412 100644
--- a/drivers/clk/sunxi/Kconfig
+++ b/drivers/clk/sunxi/Kconfig
@@ -2,13 +2,13 @@
menuconfig CLK_SUNXI
bool "Legacy clock support for Allwinner SoCs"
depends on (ARM && ARCH_SUNXI) || COMPILE_TEST
- default y
+ default (ARM && ARCH_SUNXI)
if CLK_SUNXI
config CLK_SUNXI_CLOCKS
bool "Legacy clock drivers"
- default y
+ default ARCH_SUNXI
help
Legacy clock drivers being used on older (A10, A13, A20,
A23, A31, A80) SoCs. These drivers are kept around for
@@ -19,14 +19,14 @@ config CLK_SUNXI_CLOCKS
config CLK_SUNXI_PRCM_SUN6I
bool "Legacy A31 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A31 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
config CLK_SUNXI_PRCM_SUN8I
bool "Legacy sun8i PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the sun8i family PRCM clocks.
Those are usually needed for the PMIC communication,
@@ -34,7 +34,7 @@ config CLK_SUNXI_PRCM_SUN8I
config CLK_SUNXI_PRCM_SUN9I
bool "Legacy A80 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A80 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 4c9555fc6184..ebfb1d59401d 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -847,6 +847,67 @@ static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, BIT(3), 0);
static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, BIT(2), 0);
static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, BIT(1), 0);
+static CCU_GATE(CLK_AXI4_VO_ACLK, axi4_vo_aclk, "axi4-vo-aclk",
+ video_pll_clk_pd, 0x0, BIT(0), 0);
+static CCU_GATE(CLK_GPU_CORE, gpu_core_clk, "gpu-core-clk", video_pll_clk_pd,
+ 0x0, BIT(3), 0);
+static CCU_GATE(CLK_GPU_CFG_ACLK, gpu_cfg_aclk, "gpu-cfg-aclk",
+ video_pll_clk_pd, 0x0, BIT(4), 0);
+static CCU_GATE(CLK_DPU_PIXELCLK0, dpu0_pixelclk, "dpu0-pixelclk",
+ video_pll_clk_pd, 0x0, BIT(5), 0);
+static CCU_GATE(CLK_DPU_PIXELCLK1, dpu1_pixelclk, "dpu1-pixelclk",
+ video_pll_clk_pd, 0x0, BIT(6), 0);
+static CCU_GATE(CLK_DPU_HCLK, dpu_hclk, "dpu-hclk", video_pll_clk_pd, 0x0,
+ BIT(7), 0);
+static CCU_GATE(CLK_DPU_ACLK, dpu_aclk, "dpu-aclk", video_pll_clk_pd, 0x0,
+ BIT(8), 0);
+static CCU_GATE(CLK_DPU_CCLK, dpu_cclk, "dpu-cclk", video_pll_clk_pd, 0x0,
+ BIT(9), 0);
+static CCU_GATE(CLK_HDMI_SFR, hdmi_sfr_clk, "hdmi-sfr-clk", video_pll_clk_pd,
+ 0x0, BIT(10), 0);
+static CCU_GATE(CLK_HDMI_PCLK, hdmi_pclk, "hdmi-pclk", video_pll_clk_pd, 0x0,
+ BIT(11), 0);
+static CCU_GATE(CLK_HDMI_CEC, hdmi_cec_clk, "hdmi-cec-clk", video_pll_clk_pd,
+ 0x0, BIT(12), 0);
+static CCU_GATE(CLK_MIPI_DSI0_PCLK, mipi_dsi0_pclk, "mipi-dsi0-pclk",
+ video_pll_clk_pd, 0x0, BIT(13), 0);
+static CCU_GATE(CLK_MIPI_DSI1_PCLK, mipi_dsi1_pclk, "mipi-dsi1-pclk",
+ video_pll_clk_pd, 0x0, BIT(14), 0);
+static CCU_GATE(CLK_MIPI_DSI0_CFG, mipi_dsi0_cfg_clk, "mipi-dsi0-cfg-clk",
+ video_pll_clk_pd, 0x0, BIT(15), 0);
+static CCU_GATE(CLK_MIPI_DSI1_CFG, mipi_dsi1_cfg_clk, "mipi-dsi1-cfg-clk",
+ video_pll_clk_pd, 0x0, BIT(16), 0);
+static CCU_GATE(CLK_MIPI_DSI0_REFCLK, mipi_dsi0_refclk, "mipi-dsi0-refclk",
+ video_pll_clk_pd, 0x0, BIT(17), 0);
+static CCU_GATE(CLK_MIPI_DSI1_REFCLK, mipi_dsi1_refclk, "mipi-dsi1-refclk",
+ video_pll_clk_pd, 0x0, BIT(18), 0);
+static CCU_GATE(CLK_HDMI_I2S, hdmi_i2s_clk, "hdmi-i2s-clk", video_pll_clk_pd,
+ 0x0, BIT(19), 0);
+static CCU_GATE(CLK_X2H_DPU1_ACLK, x2h_dpu1_aclk, "x2h-dpu1-aclk",
+ video_pll_clk_pd, 0x0, BIT(20), 0);
+static CCU_GATE(CLK_X2H_DPU_ACLK, x2h_dpu_aclk, "x2h-dpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(21), 0);
+static CCU_GATE(CLK_AXI4_VO_PCLK, axi4_vo_pclk, "axi4-vo-pclk",
+ video_pll_clk_pd, 0x0, BIT(22), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU_PCLK, iopmp_vosys_dpu_pclk,
+ "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, BIT(23), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU1_PCLK, iopmp_vosys_dpu1_pclk,
+ "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, BIT(24), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_GPU_PCLK, iopmp_vosys_gpu_pclk,
+ "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, BIT(25), 0);
+static CCU_GATE(CLK_IOPMP_DPU1_ACLK, iopmp_dpu1_aclk, "iopmp-dpu1-aclk",
+ video_pll_clk_pd, 0x0, BIT(27), 0);
+static CCU_GATE(CLK_IOPMP_DPU_ACLK, iopmp_dpu_aclk, "iopmp-dpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(28), 0);
+static CCU_GATE(CLK_IOPMP_GPU_ACLK, iopmp_gpu_aclk, "iopmp-gpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(29), 0);
+static CCU_GATE(CLK_MIPIDSI0_PIXCLK, mipi_dsi0_pixclk, "mipi-dsi0-pixclk",
+ video_pll_clk_pd, 0x0, BIT(30), 0);
+static CCU_GATE(CLK_MIPIDSI1_PIXCLK, mipi_dsi1_pixclk, "mipi-dsi1-pixclk",
+ video_pll_clk_pd, 0x0, BIT(31), 0);
+static CCU_GATE(CLK_HDMI_PIXCLK, hdmi_pixclk, "hdmi-pixclk", video_pll_clk_pd,
+ 0x4, BIT(0), 0);
+
static CLK_FIXED_FACTOR_HW(gmac_pll_clk_100m, "gmac-pll-clk-100m",
&gmac_pll_clk.common.hw, 10, 1, 0);
@@ -963,7 +1024,38 @@ static struct ccu_common *th1520_gate_clks[] = {
&sram3_clk.common,
};
-#define NR_CLKS (CLK_UART_SCLK + 1)
+static struct ccu_common *th1520_vo_gate_clks[] = {
+ &axi4_vo_aclk.common,
+ &gpu_core_clk.common,
+ &gpu_cfg_aclk.common,
+ &dpu0_pixelclk.common,
+ &dpu1_pixelclk.common,
+ &dpu_hclk.common,
+ &dpu_aclk.common,
+ &dpu_cclk.common,
+ &hdmi_sfr_clk.common,
+ &hdmi_pclk.common,
+ &hdmi_cec_clk.common,
+ &mipi_dsi0_pclk.common,
+ &mipi_dsi1_pclk.common,
+ &mipi_dsi0_cfg_clk.common,
+ &mipi_dsi1_cfg_clk.common,
+ &mipi_dsi0_refclk.common,
+ &mipi_dsi1_refclk.common,
+ &hdmi_i2s_clk.common,
+ &x2h_dpu1_aclk.common,
+ &x2h_dpu_aclk.common,
+ &axi4_vo_pclk.common,
+ &iopmp_vosys_dpu_pclk.common,
+ &iopmp_vosys_dpu1_pclk.common,
+ &iopmp_vosys_gpu_pclk.common,
+ &iopmp_dpu1_aclk.common,
+ &iopmp_dpu_aclk.common,
+ &iopmp_gpu_aclk.common,
+ &mipi_dsi0_pixclk.common,
+ &mipi_dsi1_pixclk.common,
+ &hdmi_pixclk.common
+};
static const struct regmap_config th1520_clk_regmap_config = {
.reg_bits = 32,
@@ -972,8 +1064,44 @@ static const struct regmap_config th1520_clk_regmap_config = {
.fast_io = true,
};
+struct th1520_plat_data {
+ struct ccu_common **th1520_pll_clks;
+ struct ccu_common **th1520_div_clks;
+ struct ccu_common **th1520_mux_clks;
+ struct ccu_common **th1520_gate_clks;
+
+ int nr_clks;
+ int nr_pll_clks;
+ int nr_div_clks;
+ int nr_mux_clks;
+ int nr_gate_clks;
+};
+
+static const struct th1520_plat_data th1520_ap_platdata = {
+ .th1520_pll_clks = th1520_pll_clks,
+ .th1520_div_clks = th1520_div_clks,
+ .th1520_mux_clks = th1520_mux_clks,
+ .th1520_gate_clks = th1520_gate_clks,
+
+ .nr_clks = CLK_UART_SCLK + 1,
+
+ .nr_pll_clks = ARRAY_SIZE(th1520_pll_clks),
+ .nr_div_clks = ARRAY_SIZE(th1520_div_clks),
+ .nr_mux_clks = ARRAY_SIZE(th1520_mux_clks),
+ .nr_gate_clks = ARRAY_SIZE(th1520_gate_clks),
+};
+
+static const struct th1520_plat_data th1520_vo_platdata = {
+ .th1520_gate_clks = th1520_vo_gate_clks,
+
+ .nr_clks = CLK_HDMI_PIXCLK + 1,
+
+ .nr_gate_clks = ARRAY_SIZE(th1520_vo_gate_clks),
+};
+
static int th1520_clk_probe(struct platform_device *pdev)
{
+ const struct th1520_plat_data *plat_data;
struct device *dev = &pdev->dev;
struct clk_hw_onecell_data *priv;
@@ -982,11 +1110,16 @@ static int th1520_clk_probe(struct platform_device *pdev)
struct clk_hw *hw;
int ret, i;
- priv = devm_kzalloc(dev, struct_size(priv, hws, NR_CLKS), GFP_KERNEL);
+ plat_data = device_get_match_data(&pdev->dev);
+ if (!plat_data)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "No device match data found\n");
+
+ priv = devm_kzalloc(dev, struct_size(priv, hws, plat_data->nr_clks), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->num = NR_CLKS;
+ priv->num = plat_data->nr_clks;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -996,35 +1129,35 @@ static int th1520_clk_probe(struct platform_device *pdev)
if (IS_ERR(map))
return PTR_ERR(map);
- for (i = 0; i < ARRAY_SIZE(th1520_pll_clks); i++) {
- struct ccu_pll *cp = hw_to_ccu_pll(&th1520_pll_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_pll_clks; i++) {
+ struct ccu_pll *cp = hw_to_ccu_pll(&plat_data->th1520_pll_clks[i]->hw);
- th1520_pll_clks[i]->map = map;
+ plat_data->th1520_pll_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_pll_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_pll_clks[i]->hw);
if (ret)
return ret;
priv->hws[cp->common.clkid] = &cp->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_div_clks); i++) {
- struct ccu_div *cd = hw_to_ccu_div(&th1520_div_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_div_clks; i++) {
+ struct ccu_div *cd = hw_to_ccu_div(&plat_data->th1520_div_clks[i]->hw);
- th1520_div_clks[i]->map = map;
+ plat_data->th1520_div_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_div_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_div_clks[i]->hw);
if (ret)
return ret;
priv->hws[cd->common.clkid] = &cd->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_mux_clks); i++) {
- struct ccu_mux *cm = hw_to_ccu_mux(&th1520_mux_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_mux_clks; i++) {
+ struct ccu_mux *cm = hw_to_ccu_mux(&plat_data->th1520_mux_clks[i]->hw);
const struct clk_init_data *init = cm->common.hw.init;
- th1520_mux_clks[i]->map = map;
+ plat_data->th1520_mux_clks[i]->map = map;
hw = devm_clk_hw_register_mux_parent_data_table(dev,
init->name,
init->parent_data,
@@ -1040,10 +1173,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
priv->hws[cm->common.clkid] = hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_gate_clks); i++) {
- struct ccu_gate *cg = hw_to_ccu_gate(&th1520_gate_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_gate_clks; i++) {
+ struct ccu_gate *cg = hw_to_ccu_gate(&plat_data->th1520_gate_clks[i]->hw);
- th1520_gate_clks[i]->map = map;
+ plat_data->th1520_gate_clks[i]->map = map;
hw = devm_clk_hw_register_gate_parent_data(dev,
cg->common.hw.init->name,
@@ -1057,19 +1190,21 @@ static int th1520_clk_probe(struct platform_device *pdev)
priv->hws[cg->common.clkid] = hw;
}
- ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
- if (ret)
- return ret;
- priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
+ if (plat_data == &th1520_ap_platdata) {
+ ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
- ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
- if (ret)
- return ret;
- priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+ ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
- ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
- if (ret)
- return ret;
+ ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
+ if (ret)
+ return ret;
+ }
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
if (ret)
@@ -1081,6 +1216,11 @@ static int th1520_clk_probe(struct platform_device *pdev)
static const struct of_device_id th1520_clk_match[] = {
{
.compatible = "thead,th1520-clk-ap",
+ .data = &th1520_ap_platdata,
+ },
+ {
+ .compatible = "thead,th1520-clk-vo",
+ .data = &th1520_vo_platdata,
},
{ /* sentinel */ },
};
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 487c85259967..645f517a1ac2 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -73,6 +73,14 @@ config DW_APB_TIMER_OF
select DW_APB_TIMER
select TIMER_OF
+config ECONET_EN751221_TIMER
+ bool "EcoNet EN751221 High Precision Timer" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ select TIMER_OF
+ help
+ Support for CPU timer found on EcoNet MIPS based SoCs.
+
config FTTMR010_TIMER
bool "Faraday Technology timer driver" if COMPILE_TEST
depends on HAS_IOMEM
@@ -437,8 +445,8 @@ config ATMEL_ST
config ATMEL_TCB_CLKSRC
bool "Atmel TC Block timer driver" if COMPILE_TEST
- depends on ARM && HAS_IOMEM
- select TIMER_OF if OF
+ depends on ARM && OF && HAS_IOMEM
+ select TIMER_OF
help
Support for Timer Counter Blocks on Atmel SoCs.
@@ -763,4 +771,12 @@ config RALINK_TIMER
Enables support for system tick counter present on
Ralink SoCs RT3352 and MT7620.
+config NXP_STM_TIMER
+ bool "NXP System Timer Module driver"
+ depends on ARCH_S32 || COMPILE_TEST
+ select CLKSRC_MMIO
+ help
+ Enables the support for NXP System Timer Module found in the
+ s32g NXP platform series.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 43ef16a4efa6..205bf3b0a8f3 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
+obj-$(CONFIG_ECONET_EN751221_TIMER) += timer-econet-en751221.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_OMAP_DM_SYSTIMER) += timer-ti-dm-systimer.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
@@ -92,3 +93,4 @@ obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
obj-$(CONFIG_EP93XX_TIMER) += timer-ep93xx.o
obj-$(CONFIG_RALINK_TIMER) += timer-ralink.o
+obj-$(CONFIG_NXP_STM_TIMER) += timer-nxp-stm.o
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 3fcbd02b2483..2089aeaae225 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -225,7 +225,6 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#if defined(CONFIG_ARCH_RZG2L) || defined(CONFIG_ARCH_R9A09G057)
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -233,7 +232,7 @@ static int __init ostm_probe(struct platform_device *pdev)
return ostm_init(dev->of_node);
}
-static const struct of_device_id ostm_of_table[] = {
+static const struct of_device_id __maybe_unused ostm_of_table[] = {
{ .compatible = "renesas,ostm", },
{ /* sentinel */ }
};
@@ -246,4 +245,3 @@ static struct platform_driver ostm_device_driver = {
},
};
builtin_platform_driver_probe(ostm_device_driver, ostm_probe);
-#endif
diff --git a/drivers/clocksource/timer-econet-en751221.c b/drivers/clocksource/timer-econet-en751221.c
new file mode 100644
index 000000000000..3b449fdaafee
--- /dev/null
+++ b/drivers/clocksource/timer-econet-en751221.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Timer present on EcoNet EN75xx MIPS based SoCs.
+ *
+ * Copyright (C) 2025 by Caleb James DeLisle <cjd@cjdns.fr>
+ */
+
+#include <linux/io.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/cpuhotplug.h>
+#include <linux/clk.h>
+
+#define ECONET_BITS 32
+#define ECONET_MIN_DELTA 0x00001000
+#define ECONET_MAX_DELTA GENMASK(ECONET_BITS - 2, 0)
+/* 34Kc hardware has 1 block and 1004Kc has 2. */
+#define ECONET_NUM_BLOCKS DIV_ROUND_UP(NR_CPUS, 2)
+
+static struct {
+ void __iomem *membase[ECONET_NUM_BLOCKS];
+ u32 freq_hz;
+} econet_timer __ro_after_init;
+
+static DEFINE_PER_CPU(struct clock_event_device, econet_timer_pcpu);
+
+/* Each memory block has 2 timers, the order of registers is:
+ * CTL, CMR0, CNT0, CMR1, CNT1
+ */
+static inline void __iomem *reg_ctl(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1];
+}
+
+static inline void __iomem *reg_compare(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x04;
+}
+
+static inline void __iomem *reg_count(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x08;
+}
+
+static inline u32 ctl_bit_enabled(u32 timer_n)
+{
+ return 1U << (timer_n & 1);
+}
+
+static inline u32 ctl_bit_pending(u32 timer_n)
+{
+ return 1U << ((timer_n & 1) + 16);
+}
+
+static bool cevt_is_pending(int cpu_id)
+{
+ return ioread32(reg_ctl(cpu_id)) & ctl_bit_pending(cpu_id);
+}
+
+static irqreturn_t cevt_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *dev = this_cpu_ptr(&econet_timer_pcpu);
+ int cpu = cpumask_first(dev->cpumask);
+
+ /* Each VPE has its own events,
+ * so this will only happen on spurious interrupt.
+ */
+ if (!cevt_is_pending(cpu))
+ return IRQ_NONE;
+
+ iowrite32(ioread32(reg_count(cpu)), reg_compare(cpu));
+ dev->event_handler(dev);
+ return IRQ_HANDLED;
+}
+
+static int cevt_set_next_event(ulong delta, struct clock_event_device *dev)
+{
+ u32 next;
+ int cpu;
+
+ cpu = cpumask_first(dev->cpumask);
+ next = ioread32(reg_count(cpu)) + delta;
+ iowrite32(next, reg_compare(cpu));
+
+ if ((s32)(next - ioread32(reg_count(cpu))) < ECONET_MIN_DELTA / 2)
+ return -ETIME;
+
+ return 0;
+}
+
+static int cevt_init_cpu(uint cpu)
+{
+ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, cpu);
+ u32 reg;
+
+ pr_debug("%s: Setting up clockevent for CPU %d\n", cd->name, cpu);
+
+ reg = ioread32(reg_ctl(cpu)) | ctl_bit_enabled(cpu);
+ iowrite32(reg, reg_ctl(cpu));
+
+ enable_percpu_irq(cd->irq, IRQ_TYPE_NONE);
+
+ /* Do this last because it synchronously configures the timer */
+ clockevents_config_and_register(cd, econet_timer.freq_hz,
+ ECONET_MIN_DELTA, ECONET_MAX_DELTA);
+
+ return 0;
+}
+
+static u64 notrace sched_clock_read(void)
+{
+ /* Always read from clock zero no matter the CPU */
+ return (u64)ioread32(reg_count(0));
+}
+
+/* Init */
+
+static void __init cevt_dev_init(uint cpu)
+{
+ iowrite32(0, reg_count(cpu));
+ iowrite32(U32_MAX, reg_compare(cpu));
+}
+
+static int __init cevt_init(struct device_node *np)
+{
+ int i, irq, ret;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("%pOFn: irq_of_parse_and_map failed", np);
+ return -EINVAL;
+ }
+
+ ret = request_percpu_irq(irq, cevt_interrupt, np->name, &econet_timer_pcpu);
+
+ if (ret < 0) {
+ pr_err("%pOFn: IRQ %d setup failed (%d)\n", np, irq, ret);
+ goto err_unmap_irq;
+ }
+
+ for_each_possible_cpu(i) {
+ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
+
+ cd->rating = 310,
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PERCPU;
+ cd->set_next_event = cevt_set_next_event;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(i);
+ cd->name = np->name;
+
+ cevt_dev_init(i);
+ }
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "clockevents/econet/timer:starting",
+ cevt_init_cpu, NULL);
+ return 0;
+
+err_unmap_irq:
+ irq_dispose_mapping(irq);
+ return ret;
+}
+
+static int __init timer_init(struct device_node *np)
+{
+ int num_blocks = DIV_ROUND_UP(num_possible_cpus(), 2);
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("%pOFn: Failed to get CPU clock from DT %ld\n", np, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ econet_timer.freq_hz = clk_get_rate(clk);
+
+ for (int i = 0; i < num_blocks; i++) {
+ econet_timer.membase[i] = of_iomap(np, i);
+ if (!econet_timer.membase[i]) {
+ pr_err("%pOFn: failed to map register [%d]\n", np, i);
+ return -ENXIO;
+ }
+ }
+
+ /* For clocksource purposes always read clock zero, whatever the CPU */
+ ret = clocksource_mmio_init(reg_count(0), np->name,
+ econet_timer.freq_hz, 301, ECONET_BITS,
+ clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("%pOFn: clocksource_mmio_init failed: %d", np, ret);
+ return ret;
+ }
+
+ ret = cevt_init(np);
+ if (ret < 0)
+ return ret;
+
+ sched_clock_register(sched_clock_read, ECONET_BITS,
+ econet_timer.freq_hz);
+
+ pr_info("%pOFn: using %u.%03u MHz high precision timer\n", np,
+ econet_timer.freq_hz / 1000000,
+ (econet_timer.freq_hz / 1000) % 1000);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(econet_timer_hpt, "econet,en751221-timer", timer_init);
diff --git a/drivers/clocksource/timer-nxp-stm.c b/drivers/clocksource/timer-nxp-stm.c
new file mode 100644
index 000000000000..d7ccf9001729
--- /dev/null
+++ b/drivers/clocksource/timer-nxp-stm.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
+ *
+ * NXP System Timer Module:
+ *
+ * STM supports commonly required system and application software
+ * timing functions. STM includes a 32-bit count-up timer and four
+ * 32-bit compare channels with a separate interrupt source for each
+ * channel. The timer is driven by the STM module clock divided by an
+ * 8-bit prescale value (1 to 256). It has ability to stop the timer
+ * in Debug mode
+ */
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched_clock.h>
+#include <linux/units.h>
+
+#define STM_CR(__base) (__base)
+
+#define STM_CR_TEN BIT(0)
+#define STM_CR_FRZ BIT(1)
+#define STM_CR_CPS_OFFSET 8u
+#define STM_CR_CPS_MASK GENMASK(15, STM_CR_CPS_OFFSET)
+
+#define STM_CNT(__base) ((__base) + 0x04)
+
+#define STM_CCR0(__base) ((__base) + 0x10)
+#define STM_CCR1(__base) ((__base) + 0x20)
+#define STM_CCR2(__base) ((__base) + 0x30)
+#define STM_CCR3(__base) ((__base) + 0x40)
+
+#define STM_CCR_CEN BIT(0)
+
+#define STM_CIR0(__base) ((__base) + 0x14)
+#define STM_CIR1(__base) ((__base) + 0x24)
+#define STM_CIR2(__base) ((__base) + 0x34)
+#define STM_CIR3(__base) ((__base) + 0x44)
+
+#define STM_CIR_CIF BIT(0)
+
+#define STM_CMP0(__base) ((__base) + 0x18)
+#define STM_CMP1(__base) ((__base) + 0x28)
+#define STM_CMP2(__base) ((__base) + 0x38)
+#define STM_CMP3(__base) ((__base) + 0x48)
+
+#define STM_ENABLE_MASK (STM_CR_FRZ | STM_CR_TEN)
+
+struct stm_timer {
+ void __iomem *base;
+ unsigned long rate;
+ unsigned long delta;
+ unsigned long counter;
+ struct clock_event_device ced;
+ struct clocksource cs;
+ atomic_t refcnt;
+};
+
+static DEFINE_PER_CPU(struct stm_timer *, stm_timers);
+
+static struct stm_timer *stm_sched_clock;
+
+/*
+ * Global structure for multiple STMs initialization
+ */
+static int stm_instances;
+
+/*
+ * This global lock is used to prevent race conditions with the
+ * stm_instances in case the driver is using the ASYNC option
+ */
+static DEFINE_MUTEX(stm_instances_lock);
+
+DEFINE_GUARD(stm_instances, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
+
+static struct stm_timer *cs_to_stm(struct clocksource *cs)
+{
+ return container_of(cs, struct stm_timer, cs);
+}
+
+static struct stm_timer *ced_to_stm(struct clock_event_device *ced)
+{
+ return container_of(ced, struct stm_timer, ced);
+}
+
+static u64 notrace nxp_stm_read_sched_clock(void)
+{
+ return readl(STM_CNT(stm_sched_clock->base));
+}
+
+static u32 nxp_stm_clocksource_getcnt(struct stm_timer *stm_timer)
+{
+ return readl(STM_CNT(stm_timer->base));
+}
+
+static void nxp_stm_clocksource_setcnt(struct stm_timer *stm_timer, u32 cnt)
+{
+ writel(cnt, STM_CNT(stm_timer->base));
+}
+
+static u64 nxp_stm_clocksource_read(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ return (u64)nxp_stm_clocksource_getcnt(stm_timer);
+}
+
+static void nxp_stm_module_enable(struct stm_timer *stm_timer)
+{
+ u32 reg;
+
+ reg = readl(STM_CR(stm_timer->base));
+
+ reg |= STM_ENABLE_MASK;
+
+ writel(reg, STM_CR(stm_timer->base));
+}
+
+static void nxp_stm_module_disable(struct stm_timer *stm_timer)
+{
+ u32 reg;
+
+ reg = readl(STM_CR(stm_timer->base));
+
+ reg &= ~STM_ENABLE_MASK;
+
+ writel(reg, STM_CR(stm_timer->base));
+}
+
+static void nxp_stm_module_put(struct stm_timer *stm_timer)
+{
+ if (atomic_dec_and_test(&stm_timer->refcnt))
+ nxp_stm_module_disable(stm_timer);
+}
+
+static void nxp_stm_module_get(struct stm_timer *stm_timer)
+{
+ if (atomic_inc_return(&stm_timer->refcnt) == 1)
+ nxp_stm_module_enable(stm_timer);
+}
+
+static int nxp_stm_clocksource_enable(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_module_get(stm_timer);
+
+ return 0;
+}
+
+static void nxp_stm_clocksource_disable(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_module_put(stm_timer);
+}
+
+static void nxp_stm_clocksource_suspend(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_clocksource_disable(cs);
+ stm_timer->counter = nxp_stm_clocksource_getcnt(stm_timer);
+}
+
+static void nxp_stm_clocksource_resume(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_clocksource_setcnt(stm_timer, stm_timer->counter);
+ nxp_stm_clocksource_enable(cs);
+}
+
+static void __init devm_clocksource_unregister(void *data)
+{
+ struct stm_timer *stm_timer = data;
+
+ clocksource_unregister(&stm_timer->cs);
+}
+
+static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, struct clk *clk)
+{
+ int ret;
+
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->cs.name = name;
+ stm_timer->cs.rating = 460;
+ stm_timer->cs.read = nxp_stm_clocksource_read;
+ stm_timer->cs.enable = nxp_stm_clocksource_enable;
+ stm_timer->cs.disable = nxp_stm_clocksource_disable;
+ stm_timer->cs.suspend = nxp_stm_clocksource_suspend;
+ stm_timer->cs.resume = nxp_stm_clocksource_resume;
+ stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
+ stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
+ if (ret) {
+ clocksource_unregister(&stm_timer->cs);
+ return ret;
+ }
+
+ stm_sched_clock = stm_timer;
+
+ sched_clock_register(nxp_stm_read_sched_clock, 32, stm_timer->rate);
+
+ dev_dbg(dev, "Registered clocksource %s\n", name);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_read_counter(struct stm_timer *stm_timer)
+{
+ return readl(STM_CNT(stm_timer->base));
+}
+
+static void nxp_stm_clockevent_disable(struct stm_timer *stm_timer)
+{
+ writel(0, STM_CCR0(stm_timer->base));
+}
+
+static void nxp_stm_clockevent_enable(struct stm_timer *stm_timer)
+{
+ writel(STM_CCR_CEN, STM_CCR0(stm_timer->base));
+}
+
+static int nxp_stm_clockevent_shutdown(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_clockevent_disable(stm_timer);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+ u32 val;
+
+ nxp_stm_clockevent_disable(stm_timer);
+
+ stm_timer->delta = delta;
+
+ val = nxp_stm_clockevent_read_counter(stm_timer) + delta;
+
+ writel(val, STM_CMP0(stm_timer->base));
+
+ /*
+ * The counter is shared across the channels and can not be
+ * stopped while we are setting the next event. If the delta
+ * is very small it is possible the counter increases above
+ * the computed 'val'. The min_delta value specified when
+ * registering the clockevent will prevent that. The second
+ * case is if the counter wraps while we compute the 'val' and
+ * before writing the comparator register. We read the counter,
+ * check if we are back in time and abort the timer with -ETIME.
+ */
+ if (val > nxp_stm_clockevent_read_counter(stm_timer) + delta)
+ return -ETIME;
+
+ nxp_stm_clockevent_enable(stm_timer);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_set_periodic(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ return nxp_stm_clockevent_set_next_event(stm_timer->rate, ced);
+}
+
+static void nxp_stm_clockevent_suspend(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_module_put(stm_timer);
+}
+
+static void nxp_stm_clockevent_resume(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_module_get(stm_timer);
+}
+
+static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, int irq,
+ struct clk *clk, int cpu)
+{
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->ced.name = name;
+ stm_timer->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ stm_timer->ced.set_state_shutdown = nxp_stm_clockevent_shutdown;
+ stm_timer->ced.set_state_periodic = nxp_stm_clockevent_set_periodic;
+ stm_timer->ced.set_next_event = nxp_stm_clockevent_set_next_event;
+ stm_timer->ced.suspend = nxp_stm_clockevent_suspend;
+ stm_timer->ced.resume = nxp_stm_clockevent_resume;
+ stm_timer->ced.cpumask = cpumask_of(cpu);
+ stm_timer->ced.rating = 460;
+ stm_timer->ced.irq = irq;
+
+ per_cpu(stm_timers, cpu) = stm_timer;
+
+ nxp_stm_module_get(stm_timer);
+
+ dev_dbg(dev, "Initialized per cpu clockevent name=%s, irq=%d, cpu=%d\n", name, irq, cpu);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
+ int ret;
+
+ if (WARN_ON(!stm_timer))
+ return -EFAULT;
+
+ ret = irq_force_affinity(stm_timer->ced.irq, cpumask_of(cpu));
+ if (ret)
+ return ret;
+
+ /*
+ * The timings measurement show reading the counter register
+ * and writing to the comparator register takes as a maximum
+ * value 1100 ns at 133MHz rate frequency. The timer must be
+ * set above this value and to be secure we set the minimum
+ * value equal to 2000ns, so 2us.
+ *
+ * minimum ticks = (rate / MICRO) * 2
+ */
+ clockevents_config_and_register(&stm_timer->ced, stm_timer->rate,
+ (stm_timer->rate / MICRO) * 2, ULONG_MAX);
+
+ return 0;
+}
+
+static irqreturn_t nxp_stm_module_interrupt(int irq, void *dev_id)
+{
+ struct stm_timer *stm_timer = dev_id;
+ struct clock_event_device *ced = &stm_timer->ced;
+ u32 val;
+
+ /*
+ * The interrupt is shared across the channels in the
+ * module. But this one is configured to run only one channel,
+ * consequently it is pointless to test the interrupt flags
+ * before and we can directly reset the channel 0 irq flag
+ * register.
+ */
+ writel(STM_CIR_CIF, STM_CIR0(stm_timer->base));
+
+ /*
+ * Update STM_CMP value using the counter value
+ */
+ val = nxp_stm_clockevent_read_counter(stm_timer) + stm_timer->delta;
+
+ writel(val, STM_CMP0(stm_timer->base));
+
+ /*
+ * stm hardware doesn't support oneshot, it will generate an
+ * interrupt and start the counter again so software needs to
+ * disable the timer to stop the counter loop in ONESHOT mode.
+ */
+ if (likely(clockevent_state_oneshot(ced)))
+ nxp_stm_clockevent_disable(stm_timer);
+
+ ced->event_handler(ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init nxp_stm_timer_probe(struct platform_device *pdev)
+{
+ struct stm_timer *stm_timer;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const char *name = of_node_full_name(np);
+ struct clk *clk;
+ void __iomem *base;
+ int irq, ret;
+
+ /*
+ * The device tree can have multiple STM nodes described, so
+ * it makes this driver a good candidate for the async probe.
+ * It is still unclear if the time framework correctly handles
+ * parallel loading of the timers but at least this driver is
+ * ready to support the option.
+ */
+ guard(stm_instances)(&stm_instances_lock);
+
+ /*
+ * The S32Gx are SoCs featuring a diverse set of cores. Linux
+ * is expected to run on Cortex-A53 cores, while other
+ * software stacks will operate on Cortex-M cores. The number
+ * of STM instances has been sized to include at most one
+ * instance per core.
+ *
+ * As we need a clocksource and a clockevent per cpu, we
+ * simply initialize a clocksource per cpu along with the
+ * clockevent which makes the resulting code simpler.
+ *
+ * However if the device tree is describing more STM instances
+ * than the number of cores, then we ignore them.
+ */
+ if (stm_instances >= num_possible_cpus())
+ return 0;
+
+ base = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "Failed to iomap %pOFn\n", np);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get IRQ\n");
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Clock not found\n");
+
+ stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
+ if (!stm_timer)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, nxp_stm_module_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING, name, stm_timer);
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to allocate interrupt line\n");
+
+ ret = nxp_stm_clocksource_init(dev, stm_timer, name, base, clk);
+ if (ret)
+ return ret;
+
+ /*
+ * Next probed STM will be a per CPU clockevent, until we
+ * probe as many as we have CPUs available on the system, we
+ * do a partial initialization
+ */
+ ret = nxp_stm_clockevent_per_cpu_init(dev, stm_timer, name,
+ base, irq, clk,
+ stm_instances);
+ if (ret)
+ return ret;
+
+ stm_instances++;
+
+ /*
+ * The number of probed STMs for per CPU clockevent is
+ * equal to the number of available CPUs on the
+ * system. We install the cpu hotplug to finish the
+ * initialization by registering the clockevents
+ */
+ if (stm_instances == num_possible_cpus()) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "STM timer:starting",
+ nxp_stm_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id nxp_stm_of_match[] = {
+ { .compatible = "nxp,s32g2-stm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
+
+static struct platform_driver nxp_stm_probe = {
+ .probe = nxp_stm_timer_probe,
+ .driver = {
+ .name = "nxp-stm",
+ .of_match_table = nxp_stm_of_match,
+ },
+};
+module_platform_driver(nxp_stm_probe);
+
+MODULE_DESCRIPTION("NXP System Timer Module driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index 5d4cf5237a11..e5394f98a02e 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019-2020 NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2019-2025 NVIDIA Corporation. All rights reserved.
*/
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -29,6 +30,7 @@
#define TMRSR 0x004
#define TMRSR_INTR_CLR BIT(30)
+#define TMRSR_PCV GENMASK(28, 0)
#define TMRCSSR 0x008
#define TMRCSSR_SRC_USEC (0 << 0)
@@ -45,6 +47,9 @@
#define WDTCR_TIMER_SOURCE_MASK 0xf
#define WDTCR_TIMER_SOURCE(x) ((x) & 0xf)
+#define WDTSR 0x004
+#define WDTSR_CURRENT_EXPIRATION_COUNT GENMASK(14, 12)
+
#define WDTCMDR 0x008
#define WDTCMDR_DISABLE_COUNTER BIT(1)
#define WDTCMDR_START_COUNTER BIT(0)
@@ -169,18 +174,6 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
value &= ~WDTCR_PERIOD_MASK;
value |= WDTCR_PERIOD(1);
- /* enable local interrupt for WDT petting */
- value |= WDTCR_LOCAL_INT_ENABLE;
-
- /* enable local FIQ and remote interrupt for debug dump */
- if (0)
- value |= WDTCR_REMOTE_INT_ENABLE |
- WDTCR_LOCAL_FIQ_ENABLE;
-
- /* enable system debug reset (doesn't properly reboot) */
- if (0)
- value |= WDTCR_SYSTEM_DEBUG_RESET_ENABLE;
-
/* enable system POR reset */
value |= WDTCR_SYSTEM_POR_RESET_ENABLE;
@@ -234,12 +227,69 @@ static int tegra186_wdt_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
+ u32 expiration, val;
+ u64 timeleft;
+
+ if (!watchdog_active(&wdt->base)) {
+ /* return zero if the watchdog timer is not activated. */
+ return 0;
+ }
+
+ /*
+ * Reset occurs on the fifth expiration of the
+ * watchdog timer and so when the watchdog timer is configured,
+ * the actual value programmed into the counter is 1/5 of the
+ * timeout value. Once the counter reaches 0, expiration count
+ * will be increased by 1 and the down counter restarts.
+ * Hence to get the time left before system reset we must
+ * combine 2 parts:
+ * 1. value of the current down counter
+ * 2. (number of counter expirations remaining) * (timeout/5)
+ */
+
+ /* Get the current number of counter expirations. Should be a
+ * value between 0 and 4
+ */
+ val = readl_relaxed(wdt->regs + WDTSR);
+ expiration = FIELD_GET(WDTSR_CURRENT_EXPIRATION_COUNT, val);
+ if (WARN_ON_ONCE(expiration > 4))
+ return 0;
+
+ /* Get the current counter value in microsecond. */
+ val = readl_relaxed(wdt->tmr->regs + TMRSR);
+ timeleft = FIELD_GET(TMRSR_PCV, val);
+
+ /*
+ * Calculate the time remaining by adding the time for the
+ * counter value to the time of the counter expirations that
+ * remain.
+ */
+ timeleft += (((u64)wdt->base.timeout * USEC_PER_SEC) / 5) * (4 - expiration);
+
+ /*
+ * Convert the current counter value to seconds,
+ * rounding up to the nearest second. Cast u64 to
+ * u32 under the assumption that no overflow happens
+ * when coverting to seconds.
+ */
+ timeleft = DIV_ROUND_CLOSEST_ULL(timeleft, USEC_PER_SEC);
+
+ if (WARN_ON_ONCE(timeleft > U32_MAX))
+ return U32_MAX;
+
+ return lower_32_bits(timeleft);
+}
+
static const struct watchdog_ops tegra186_wdt_ops = {
.owner = THIS_MODULE,
.start = tegra186_wdt_start,
.stop = tegra186_wdt_stop,
.ping = tegra186_wdt_ping,
.set_timeout = tegra186_wdt_set_timeout,
+ .get_timeleft = tegra186_wdt_get_timeleft,
};
static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
@@ -365,23 +415,10 @@ static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
}
-static irqreturn_t tegra186_timer_irq(int irq, void *data)
-{
- struct tegra186_timer *tegra = data;
-
- if (watchdog_active(&tegra->wdt->base)) {
- tegra186_wdt_disable(tegra->wdt);
- tegra186_wdt_enable(tegra->wdt);
- }
-
- return IRQ_HANDLED;
-}
-
static int tegra186_timer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra186_timer *tegra;
- unsigned int irq;
int err;
tegra = devm_kzalloc(dev, sizeof(*tegra), GFP_KERNEL);
@@ -400,8 +437,6 @@ static int tegra186_timer_probe(struct platform_device *pdev)
if (err < 0)
return err;
- irq = err;
-
/* create a watchdog using a preconfigured timer */
tegra->wdt = tegra186_wdt_create(tegra, 0);
if (IS_ERR(tegra->wdt)) {
@@ -428,17 +463,8 @@ static int tegra186_timer_probe(struct platform_device *pdev)
goto unregister_osc;
}
- err = devm_request_irq(dev, irq, tegra186_timer_irq, 0,
- "tegra186-timer", tegra);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ#%u: %d\n", irq, err);
- goto unregister_usec;
- }
-
return 0;
-unregister_usec:
- clocksource_unregister(&tegra->usec);
unregister_osc:
clocksource_unregister(&tegra->osc);
unregister_tsc:
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index d64b07ec48e5..78702a08364f 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -217,6 +217,18 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_DT_RUST
+ tristate "Rust based Generic DT based cpufreq driver"
+ depends on HAVE_CLK && OF && RUST
+ select CPUFREQ_DT_PLATDEV
+ select PM_OPP
+ help
+ This adds a Rust based generic DT based cpufreq driver for frequency
+ management. It supports both uniprocessor (UP) and symmetric
+ multiprocessor (SMP) systems.
+
+ If in doubt, say N.
+
config CPUFREQ_VIRT
tristate "Virtual cpufreq driver"
depends on GENERIC_ARCH_TOPOLOGY
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 22ab45209f9b..d38526b8e063 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d26b610e4f24..4f7f9201598d 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -79,11 +79,11 @@ static bool boost_state(unsigned int cpu)
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
+ rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr);
+ rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
@@ -110,14 +110,14 @@ static int boost_set_msr(bool enable)
return -EINVAL;
}
- rdmsrl(msr_addr, val);
+ rdmsrq(msr_addr, val);
if (enable)
val &= ~msr_mask;
else
val |= msr_mask;
- wrmsrl(msr_addr, val);
+ wrmsrq(msr_addr, val);
return 0;
}
@@ -660,7 +660,7 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
nominal_perf = perf_caps.nominal_perf;
if (nominal_freq)
- *nominal_freq = perf_caps.nominal_freq;
+ *nominal_freq = perf_caps.nominal_freq * 1000;
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index e671bc7d1550..447b9aa5ce40 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -31,6 +31,8 @@
#include <acpi/cppc_acpi.h>
+#include <asm/msr.h>
+
#include "amd-pstate.h"
@@ -90,9 +92,9 @@ static int amd_pstate_ut_check_enabled(u32 index)
if (get_shared_mem())
return 0;
- ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
+ ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
if (ret) {
- pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
+ pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
return ret;
}
@@ -137,7 +139,7 @@ static int amd_pstate_ut_check_perf(u32 index)
lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
lowest_perf = cppc_perf.lowest_perf;
} else {
- ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
if (ret) {
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
return ret;
@@ -242,25 +244,30 @@ static int amd_pstate_set_mode(enum amd_pstate_mode mode)
static int amd_pstate_ut_check_driver(u32 index)
{
enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
+ enum amd_pstate_mode orig_mode = amd_pstate_get_status();
+ int ret;
for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
- int ret = amd_pstate_set_mode(mode1);
+ ret = amd_pstate_set_mode(mode1);
if (ret)
return ret;
for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
if (mode1 == mode2)
continue;
ret = amd_pstate_set_mode(mode2);
- if (ret) {
- pr_err("%s: failed to update status for %s->%s\n", __func__,
- amd_pstate_get_mode_string(mode1),
- amd_pstate_get_mode_string(mode2));
- return ret;
- }
+ if (ret)
+ goto out;
}
}
- return 0;
+out:
+ if (ret)
+ pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
+ amd_pstate_get_mode_string(mode1),
+ amd_pstate_get_mode_string(mode2), ret);
+
+ amd_pstate_set_mode(orig_mode);
+ return ret;
}
static int __init amd_pstate_ut_init(void)
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index b961f3a3b580..f3477ab37742 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -197,7 +197,7 @@ static u8 msr_get_epp(struct amd_cpudata *cpudata)
u64 value;
int ret;
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret < 0) {
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
return ret;
@@ -258,10 +258,10 @@ static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
return 0;
if (fast_switch) {
- wrmsrl(MSR_AMD_CPPC_REQ, value);
+ wrmsrq(MSR_AMD_CPPC_REQ, value);
return 0;
} else {
- int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
if (ret)
return ret;
@@ -309,7 +309,7 @@ static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
if (value == prev)
return 0;
- ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
if (ret) {
pr_err("failed to set energy perf value (%d)\n", ret);
return ret;
@@ -371,7 +371,7 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
static inline int msr_cppc_enable(struct cpufreq_policy *policy)
{
- return wrmsrl_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
+ return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
}
static int shmem_cppc_enable(struct cpufreq_policy *policy)
@@ -389,9 +389,10 @@ static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
static int msr_init_perf(struct amd_cpudata *cpudata)
{
union perf_cached perf = READ_ONCE(cpudata->perf);
- u64 cap1, numerator;
+ u64 cap1, numerator, cppc_req;
+ u8 min_perf;
- int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
+ int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
@@ -400,6 +401,22 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, cppc_req);
+ min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req);
+
+ /*
+ * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an
+ * indication that the min_perf value is the one specified through the BIOS option
+ */
+ cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK);
+
+ if (!cppc_req)
+ perf.bios_min_perf = min_perf;
+
perf.highest_perf = numerator;
perf.max_limit_perf = numerator;
perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
@@ -417,6 +434,7 @@ static int shmem_init_perf(struct amd_cpudata *cpudata)
struct cppc_perf_caps cppc_perf;
union perf_cached perf = READ_ONCE(cpudata->perf);
u64 numerator;
+ bool auto_sel;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
@@ -438,7 +456,7 @@ static int shmem_init_perf(struct amd_cpudata *cpudata)
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
- ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
+ ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel);
if (ret) {
pr_warn("failed to get auto_sel, ret: %d\n", ret);
return 0;
@@ -518,8 +536,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
unsigned long flags;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
@@ -554,6 +572,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
if (!policy)
return;
+ /* limit the max perf when core performance boost feature is disabled */
+ if (!cpudata->boost_supported)
+ max_perf = min_t(u8, perf.nominal_perf, max_perf);
+
des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
@@ -563,10 +585,6 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
des_perf = 0;
}
- /* limit the max perf when core performance boost feature is disabled */
- if (!cpudata->boost_supported)
- max_perf = min_t(u8, perf.nominal_perf, max_perf);
-
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
@@ -580,20 +598,26 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
{
/*
* Initialize lower frequency limit (i.e.policy->min) with
- * lowest_nonlinear_frequency which is the most energy efficient
- * frequency. Override the initial value set by cpufreq core and
- * amd-pstate qos_requests.
+ * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS,
+ * Override the initial value set by cpufreq core and amd-pstate qos_requests.
*/
if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
struct cpufreq_policy *policy __free(put_cpufreq_policy) =
cpufreq_cpu_get(policy_data->cpu);
struct amd_cpudata *cpudata;
+ union perf_cached perf;
if (!policy)
return -EINVAL;
cpudata = policy->driver_data;
- policy_data->min = cpudata->lowest_nonlinear_freq;
+ perf = READ_ONCE(cpudata->perf);
+
+ if (perf.bios_min_perf)
+ policy_data->min = perf_to_freq(perf, cpudata->nominal_freq,
+ perf.bios_min_perf);
+ else
+ policy_data->min = cpudata->lowest_nonlinear_freq;
}
cpufreq_verify_within_cpu_limits(policy_data);
@@ -772,7 +796,7 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
goto exit_err;
}
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
if (ret) {
pr_err_once("failed to read initial CPU boost state!\n");
ret = -EIO;
@@ -791,7 +815,7 @@ exit_err:
static void amd_perf_ctl_reset(unsigned int cpu)
{
- wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+ wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
#define CPPC_MAX_PERF U8_MAX
@@ -808,19 +832,16 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
}
-static void amd_pstate_update_limits(unsigned int cpu)
+static void amd_pstate_update_limits(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
bool highest_perf_changed = false;
+ unsigned int cpu = policy->cpu;
if (!amd_pstate_prefcore)
return;
- if (!policy)
- return;
-
if (amd_get_highest_perf(cpu, &cur_high))
return;
@@ -831,8 +852,10 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (highest_perf_changed) {
WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
- if (cur_high < CPPC_MAX_PERF)
+ if (cur_high < CPPC_MAX_PERF) {
sched_set_itmt_core_prio((int)cur_high, cpu);
+ sched_update_asym_prefer_cpu(cpu, prev_high, cur_high);
+ }
}
}
@@ -1024,6 +1047,10 @@ free_cpudata1:
static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
@@ -1305,6 +1332,12 @@ static ssize_t amd_pstate_show_status(char *buf)
return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
}
+int amd_pstate_get_status(void)
+{
+ return cppc_state;
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_status);
+
int amd_pstate_update_status(const char *buf, size_t size)
{
int mode_idx;
@@ -1419,7 +1452,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata;
union perf_cached perf;
struct device *dev;
- u64 value;
int ret;
/*
@@ -1484,12 +1516,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
}
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
- }
ret = amd_pstate_set_epp(policy, cpudata->epp_default);
if (ret)
return ret;
@@ -1509,6 +1535,11 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+
kfree(cpudata);
policy->driver_data = NULL;
}
@@ -1559,21 +1590,38 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_online(struct cpufreq_policy *policy)
{
- pr_debug("AMD CPU Core %d going online\n", policy->cpu);
-
return amd_pstate_cppc_enable(policy);
}
-static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
{
- return 0;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
+ * limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
}
-static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
+static int amd_pstate_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int ret;
+
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
+ * the limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ ret = amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ if (ret)
+ return ret;
/* invalidate to ensure it's rewritten during resume */
cpudata->cppc_req_cached = 0;
@@ -1584,6 +1632,17 @@ static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
return 0;
}
+static int amd_pstate_resume(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur);
+
+ /* Set CPPC_REQ to last sane value until the governor updates it */
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, cur_perf, perf.max_limit_perf,
+ 0U, false);
+}
+
static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -1609,6 +1668,10 @@ static struct cpufreq_driver amd_pstate_driver = {
.fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
+ .online = amd_pstate_cpu_online,
+ .offline = amd_pstate_cpu_offline,
+ .suspend = amd_pstate_suspend,
+ .resume = amd_pstate_resume,
.set_boost = amd_pstate_set_boost,
.update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
@@ -1621,9 +1684,9 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.setpolicy = amd_pstate_epp_set_policy,
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
- .offline = amd_pstate_epp_cpu_offline,
- .online = amd_pstate_epp_cpu_online,
- .suspend = amd_pstate_epp_suspend,
+ .offline = amd_pstate_cpu_offline,
+ .online = amd_pstate_cpu_online,
+ .suspend = amd_pstate_suspend,
.resume = amd_pstate_epp_resume,
.update_limits = amd_pstate_update_limits,
.set_boost = amd_pstate_set_boost,
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index fbe1c08d3f06..cb45fdca27a6 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -30,6 +30,7 @@
* @lowest_perf: the absolute lowest performance level of the processor
* @min_limit_perf: Cached value of the performance corresponding to policy->min
* @max_limit_perf: Cached value of the performance corresponding to policy->max
+ * @bios_min_perf: Cached perf value corresponding to the "Requested CPU Min Frequency" BIOS option
*/
union perf_cached {
struct {
@@ -39,6 +40,7 @@ union perf_cached {
u8 lowest_perf;
u8 min_limit_perf;
u8 max_limit_perf;
+ u8 bios_min_perf;
};
u64 val;
};
@@ -119,6 +121,7 @@ enum amd_pstate_mode {
AMD_PSTATE_MAX,
};
const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode);
+int amd_pstate_get_status(void);
int amd_pstate_update_status(const char *buf, size_t size);
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 59b19b9975e8..13fed4b9e02b 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -129,7 +129,7 @@ static int __init amd_freq_sensitivity_init(void)
pci_dev_put(pcidev);
}
- if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+ if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
return -ENODEV;
if (!(val >> CLASS_CODE_SHIFT))
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index cb93f00bafdb..b7c688a5659c 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -808,10 +808,119 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
}
+
+static ssize_t show_auto_select(struct cpufreq_policy *policy, char *buf)
+{
+ bool val;
+ int ret;
+
+ ret = cppc_get_auto_sel(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t store_auto_select(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_sel(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_auto_act_window(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_auto_act_window(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_auto_act_window(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 usec;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &usec);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_act_window(policy->cpu, usec);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_energy_performance_preference_val(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_epp_perf(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_energy_performance_preference_val(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 val;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_epp(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
cpufreq_freq_attr_ro(freqdomain_cpus);
+cpufreq_freq_attr_rw(auto_select);
+cpufreq_freq_attr_rw(auto_act_window);
+cpufreq_freq_attr_rw(energy_performance_preference_val);
static struct freq_attr *cppc_cpufreq_attr[] = {
&freqdomain_cpus,
+ &auto_select,
+ &auto_act_window,
+ &energy_performance_preference_val,
NULL,
};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f45ded62b0e0..d7426e1d8bdd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -255,51 +255,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-/**
- * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
- * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
- */
-void cpufreq_cpu_release(struct cpufreq_policy *policy)
-{
- if (WARN_ON(!policy))
- return;
-
- lockdep_assert_held(&policy->rwsem);
-
- up_write(&policy->rwsem);
-
- cpufreq_cpu_put(policy);
-}
-
-/**
- * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
- * @cpu: CPU to find the policy for.
- *
- * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
- * if the policy returned by it is not NULL, acquire its rwsem for writing.
- * Return the policy if it is active or release it and return NULL otherwise.
- *
- * The policy returned by this function has to be released with the help of
- * cpufreq_cpu_release() in order to release its rwsem and balance its usage
- * counter properly.
- */
-struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-
- if (!policy)
- return NULL;
-
- down_write(&policy->rwsem);
-
- if (policy_is_inactive(policy)) {
- cpufreq_cpu_release(policy);
- return NULL;
- }
-
- return policy;
-}
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -636,6 +591,22 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
}
+static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
+{
+ int ret;
+
+ if (policy->boost_enabled == enable)
+ return 0;
+
+ policy->boost_enabled = enable;
+
+ ret = cpufreq_driver->set_boost(policy, enable);
+ if (ret)
+ policy->boost_enabled = !policy->boost_enabled;
+
+ return ret;
+}
+
static ssize_t store_local_boost(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
@@ -651,21 +622,11 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (!policy->boost_supported)
return -EINVAL;
- if (policy->boost_enabled == enable)
+ ret = policy_set_boost(policy, enable);
+ if (!ret)
return count;
- policy->boost_enabled = enable;
-
- cpus_read_lock();
- ret = cpufreq_driver->set_boost(policy, enable);
- cpus_read_unlock();
-
- if (ret) {
- policy->boost_enabled = !policy->boost_enabled;
- return ret;
- }
-
- return count;
+ return ret;
}
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
@@ -845,7 +806,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- char str_governor[16];
+ char str_governor[CPUFREQ_NAME_LEN];
int ret;
ret = sscanf(buf, "%15s", str_governor);
@@ -956,9 +917,9 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
- ret = sscanf(buf, "%u", &freq);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &freq);
+ if (ret)
+ return ret;
policy->governor->store_setspeed(policy, freq);
@@ -1025,17 +986,16 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->show)
return -EIO;
- down_read(&policy->rwsem);
+ guard(cpufreq_policy_read)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->show(policy, buf);
- up_read(&policy->rwsem);
+ return fattr->show(policy, buf);
- return ret;
+ return -EBUSY;
}
static ssize_t store(struct kobject *kobj, struct attribute *attr,
@@ -1043,17 +1003,16 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->store)
return -EIO;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->store(policy, buf, count);
- up_write(&policy->rwsem);
+ return fattr->store(policy, buf, count);
- return ret;
+ return -EBUSY;
}
static void cpufreq_sysfs_release(struct kobject *kobj)
@@ -1211,7 +1170,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (has_target())
cpufreq_stop_governor(policy);
@@ -1222,7 +1182,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
- up_write(&policy->rwsem);
+
return ret;
}
@@ -1242,9 +1202,10 @@ static void handle_update(struct work_struct *work)
container_of(work, struct cpufreq_policy, update);
pr_debug("handle_update for cpu %u called\n", policy->cpu);
- down_write(&policy->rwsem);
+
+ guard(cpufreq_policy_write)(policy);
+
refresh_frequency_limits(policy);
- up_write(&policy->rwsem);
}
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
@@ -1270,11 +1231,11 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
struct kobject *kobj;
struct completion *cmp;
- down_write(&policy->rwsem);
- cpufreq_stats_free_table(policy);
- kobj = &policy->kobj;
- cmp = &policy->kobj_unregister;
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stats_free_table(policy);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ }
kobject_put(kobj);
/*
@@ -1350,7 +1311,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
- policy->cpu = cpu;
return policy;
err_min_qos_notifier:
@@ -1419,35 +1379,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
kfree(policy);
}
-static int cpufreq_online(unsigned int cpu)
+static int cpufreq_policy_online(struct cpufreq_policy *policy,
+ unsigned int cpu, bool new_policy)
{
- struct cpufreq_policy *policy;
- bool new_policy;
unsigned long flags;
unsigned int j;
int ret;
- pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
-
- /* Check if this CPU already has a policy to manage it */
- policy = per_cpu(cpufreq_cpu_data, cpu);
- if (policy) {
- WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
- if (!policy_is_inactive(policy))
- return cpufreq_add_policy_cpu(policy, cpu);
+ guard(cpufreq_policy_write)(policy);
- /* This is the only online CPU for the policy. Start over. */
- new_policy = false;
- down_write(&policy->rwsem);
- policy->cpu = cpu;
- policy->governor = NULL;
- } else {
- new_policy = true;
- policy = cpufreq_policy_alloc(cpu);
- if (!policy)
- return -ENOMEM;
- down_write(&policy->rwsem);
- }
+ policy->cpu = cpu;
+ policy->governor = NULL;
if (!new_policy && cpufreq_driver->online) {
/* Recover policy->cpus using related_cpus */
@@ -1470,7 +1412,7 @@ static int cpufreq_online(unsigned int cpu)
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
- goto out_free_policy;
+ goto out_clear_policy;
}
/*
@@ -1621,7 +1563,55 @@ static int cpufreq_online(unsigned int cpu)
goto out_destroy_policy;
}
- up_write(&policy->rwsem);
+ return 0;
+
+out_destroy_policy:
+ for_each_cpu(j, policy->real_cpus)
+ remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
+
+out_offline_policy:
+ if (cpufreq_driver->offline)
+ cpufreq_driver->offline(policy);
+
+out_exit_policy:
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+out_clear_policy:
+ cpumask_clear(policy->cpus);
+
+ return ret;
+}
+
+static int cpufreq_online(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ bool new_policy;
+ int ret;
+
+ pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
+
+ /* Check if this CPU already has a policy to manage it */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy) {
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ if (!policy_is_inactive(policy))
+ return cpufreq_add_policy_cpu(policy, cpu);
+
+ /* This is the only online CPU for the policy. Start over. */
+ new_policy = false;
+ } else {
+ new_policy = true;
+ policy = cpufreq_policy_alloc(cpu);
+ if (!policy)
+ return -ENOMEM;
+ }
+
+ ret = cpufreq_policy_online(policy, cpu, new_policy);
+ if (ret) {
+ cpufreq_policy_free(policy);
+ return ret;
+ }
kobject_uevent(&policy->kobj, KOBJ_ADD);
@@ -1633,41 +1623,24 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
- /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ /*
+ * Let the per-policy boost flag mirror the cpufreq_driver boost during
+ * initialization for a new policy. For an existing policy, maintain the
+ * previous boost value unless global boost is disabled.
+ */
if (cpufreq_driver->set_boost && policy->boost_supported &&
- policy->boost_enabled != cpufreq_boost_enabled()) {
- policy->boost_enabled = cpufreq_boost_enabled();
- ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
+ (new_policy || !cpufreq_boost_enabled())) {
+ ret = policy_set_boost(policy, cpufreq_boost_enabled());
if (ret) {
/* If the set_boost fails, the online operation is not affected */
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
- str_enable_disable(policy->boost_enabled));
- policy->boost_enabled = !policy->boost_enabled;
+ str_enable_disable(cpufreq_boost_enabled()));
}
}
pr_debug("initialization complete\n");
return 0;
-
-out_destroy_policy:
- for_each_cpu(j, policy->real_cpus)
- remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
-
-out_offline_policy:
- if (cpufreq_driver->offline)
- cpufreq_driver->offline(policy);
-
-out_exit_policy:
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
-out_free_policy:
- cpumask_clear(policy->cpus);
- up_write(&policy->rwsem);
-
- cpufreq_policy_free(policy);
- return ret;
}
/**
@@ -1757,11 +1730,10 @@ static int cpufreq_offline(unsigned int cpu)
return 0;
}
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
__cpufreq_offline(cpu, policy);
- up_write(&policy->rwsem);
return 0;
}
@@ -1778,33 +1750,29 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy)
return;
- down_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ if (cpu_online(cpu))
+ __cpufreq_offline(cpu, policy);
- if (cpu_online(cpu))
- __cpufreq_offline(cpu, policy);
+ remove_cpu_dev_symlink(policy, cpu, dev);
- remove_cpu_dev_symlink(policy, cpu, dev);
+ if (!cpumask_empty(policy->real_cpus))
+ return;
- if (!cpumask_empty(policy->real_cpus)) {
- up_write(&policy->rwsem);
- return;
- }
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy
+ * are removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
- /*
- * Unregister cpufreq cooling once all the CPUs of the policy are
- * removed.
- */
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
+ /* We did light-weight exit earlier, do full tear down now */
+ if (cpufreq_driver->offline && cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
}
- /* We did light-weight exit earlier, do full tear down now */
- if (cpufreq_driver->offline && cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
- up_write(&policy->rwsem);
-
cpufreq_policy_free(policy);
}
@@ -1874,27 +1842,26 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy;
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
- ret_freq = cpufreq_driver->get(cpu);
+ unsigned int ret_freq = cpufreq_driver->get(cpu);
+
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
return ret_freq;
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
policy = cpufreq_cpu_get(cpu);
- if (policy) {
- ret_freq = policy->cur;
- cpufreq_cpu_put(policy);
- }
+ if (policy)
+ return policy->cur;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get);
@@ -1906,15 +1873,13 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
- if (policy) {
- ret_freq = policy->max;
- cpufreq_cpu_put(policy);
- }
+ policy = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->max;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
@@ -1926,15 +1891,13 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
- if (policy) {
- ret_freq = policy->cpuinfo.max_freq;
- cpufreq_cpu_put(policy);
- }
+ policy = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->cpuinfo.max_freq;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
@@ -1954,19 +1917,18 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
- if (policy) {
- down_read(&policy->rwsem);
- if (cpufreq_driver->get)
- ret_freq = __cpufreq_get(policy);
- up_read(&policy->rwsem);
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return 0;
- cpufreq_cpu_put(policy);
- }
+ guard(cpufreq_policy_read)(policy);
+
+ if (cpufreq_driver->get)
+ return __cpufreq_get(policy);
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_get);
@@ -2025,9 +1987,9 @@ void cpufreq_suspend(void)
for_each_active_policy(policy) {
if (has_target()) {
- down_write(&policy->rwsem);
- cpufreq_stop_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stop_governor(policy);
+ }
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -2068,9 +2030,9 @@ void cpufreq_resume(void)
pr_err("%s: Failed to resume driver: %s\n", __func__,
cpufreq_driver->name);
} else if (has_target()) {
- down_write(&policy->rwsem);
- ret = cpufreq_start_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ ret = cpufreq_start_governor(policy);
+ }
if (ret)
pr_err("%s: Failed to start governor for CPU%u's policy\n",
@@ -2438,15 +2400,9 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- int ret;
-
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
- ret = __cpufreq_driver_target(policy, target_freq, relation);
-
- up_write(&policy->rwsem);
-
- return ret;
+ return __cpufreq_driver_target(policy, target_freq, relation);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -2618,31 +2574,6 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
* POLICY INTERFACE *
*********************************************************************/
-/**
- * cpufreq_get_policy - get the current cpufreq_policy
- * @policy: struct cpufreq_policy into which the current cpufreq_policy
- * is written
- * @cpu: CPU to find the policy for
- *
- * Reads the current cpufreq policy.
- */
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
-{
- struct cpufreq_policy *cpu_policy;
- if (!policy)
- return -EINVAL;
-
- cpu_policy = cpufreq_cpu_get(cpu);
- if (!cpu_policy)
- return -EINVAL;
-
- memcpy(policy, cpu_policy, sizeof(*policy));
-
- cpufreq_cpu_put(cpu_policy);
- return 0;
-}
-EXPORT_SYMBOL(cpufreq_get_policy);
-
DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
/**
@@ -2793,6 +2724,21 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret;
}
+static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
+{
+ guard(cpufreq_policy_write)(policy);
+
+ /*
+ * BIOS might change freq behind our back
+ * -> ask driver for current freq and notify governors about a change
+ */
+ if (cpufreq_driver->get && has_target() &&
+ (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
+ return;
+
+ refresh_frequency_limits(policy);
+}
+
/**
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
* @cpu: CPU to re-evaluate the policy for.
@@ -2804,23 +2750,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
- /*
- * BIOS might change freq behind our back
- * -> ask driver for current freq and notify governors about a change
- */
- if (cpufreq_driver->get && has_target() &&
- (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
- goto unlock;
-
- refresh_frequency_limits(policy);
-
-unlock:
- cpufreq_cpu_release(policy);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
@@ -2829,7 +2765,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
* @cpu: CPU to update the policy limits for.
*
* Invoke the driver's ->update_limits callback if present or call
- * cpufreq_update_policy() for @cpu.
+ * cpufreq_policy_refresh() for @cpu.
*/
void cpufreq_update_limits(unsigned int cpu)
{
@@ -2840,9 +2776,9 @@ void cpufreq_update_limits(unsigned int cpu)
return;
if (cpufreq_driver->update_limits)
- cpufreq_driver->update_limits(cpu);
+ cpufreq_driver->update_limits(policy);
else
- cpufreq_update_policy(cpu);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
@@ -2876,8 +2812,10 @@ static int cpufreq_boost_trigger_state(int state)
unsigned long flags;
int ret = 0;
- if (cpufreq_driver->boost_enabled == state)
- return 0;
+ /*
+ * Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
+ * make sure all policies are in sync with global boost flag.
+ */
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver->boost_enabled = state;
@@ -2888,12 +2826,9 @@ static int cpufreq_boost_trigger_state(int state)
if (!policy->boost_supported)
continue;
- policy->boost_enabled = state;
- ret = cpufreq_driver->set_boost(policy, state);
- if (ret) {
- policy->boost_enabled = !policy->boost_enabled;
+ ret = policy_set_boost(policy, state);
+ if (ret)
goto err_reset_state;
- }
}
cpus_read_unlock();
@@ -3118,6 +3053,36 @@ static int __init cpufreq_core_init(void)
return 0;
}
+
+static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
+{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
+ return false;
+ }
+
+ return sugov_is_governor(policy);
+}
+
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
+{
+ unsigned int cpu;
+
+ /* Do not attempt EAS if schedutil is not being used. */
+ for_each_cpu(cpu, cpu_mask) {
+ if (!cpufreq_policy_is_good_for_eas(cpu)) {
+ pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
+ cpumask_pr_args(cpu_mask));
+ return false;
+ }
+ }
+
+ return true;
+}
+
module_param(off, int, 0444);
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index d23a97ba6478..320a0af2266a 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -225,12 +225,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
/* Enable Enhanced PowerSaver */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
- wrmsrl(MSR_IA32_MISC_ENABLE, val);
+ wrmsrq(MSR_IA32_MISC_ENABLE, val);
/* Can be locked at 0 */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
pr_info("Can't enable Enhanced PowerSaver\n");
return -ENODEV;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 36494b855e41..fc5a58088b35 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -21,7 +21,6 @@
#include <linux/cpufreq.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#include <linux/timex.h>
#include <linux/io.h>
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ba9bf06f1c77..64587d318267 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -221,6 +221,7 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
+ * @pd_registered: Set when a perf domain is registered for this CPU.
* @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
@@ -260,6 +261,9 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
+#ifdef CONFIG_ENERGY_MODEL
+ bool pd_registered;
+#endif
struct delayed_work hwp_notify_work;
};
@@ -303,6 +307,7 @@ static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
+#define INTEL_PSTATE_CORE_SCALING 100000
#define HYBRID_SCALING_FACTOR_ADL 78741
#define HYBRID_SCALING_FACTOR_MTL 80000
#define HYBRID_SCALING_FACTOR_LNL 86957
@@ -311,7 +316,7 @@ static int hybrid_scaling_factor;
static inline int core_get_scaling(void)
{
- return 100000;
+ return INTEL_PSTATE_CORE_SCALING;
}
#ifdef CONFIG_ACPI
@@ -601,7 +606,7 @@ static bool turbo_is_disabled(void)
if (!cpu_feature_enabled(X86_FEATURE_IDA))
return true;
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+ rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
@@ -623,7 +628,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return (s16)ret;
@@ -640,7 +645,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
* MSR_HWP_REQUEST, so need to read and get EPP.
*/
if (!hwp_req_data) {
- epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+ epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
&hwp_req_data);
if (epp)
return epp;
@@ -662,12 +667,12 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
- ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return ret;
epb = (epb & ~0x0f) | pref;
- wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
+ wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
return 0;
}
@@ -765,7 +770,7 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
* function, so it cannot run in parallel with the update below.
*/
WRITE_ONCE(cpu->hwp_req_cached, value);
- ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
if (!ret)
cpu->epp_cached = epp;
@@ -919,7 +924,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
if (ratio <= 0) {
u64 cap;
- rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
ratio = HWP_GUARANTEED_PERF(cap);
}
@@ -948,12 +953,124 @@ static struct cpudata *hybrid_max_perf_cpu __read_mostly;
*/
static DEFINE_MUTEX(hybrid_capacity_lock);
+#ifdef CONFIG_ENERGY_MODEL
+#define HYBRID_EM_STATE_COUNT 4
+
+static int hybrid_active_power(struct device *dev, unsigned long *power,
+ unsigned long *freq)
+{
+ /*
+ * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100%
+ * of the maximum capacity such that two CPUs of the same type will be
+ * regarded as equally attractive if the utilization of each of them
+ * falls into the same bin, which should prevent tasks from being
+ * migrated between them too often.
+ *
+ * For this purpose, return the "frequency" of 2 for the first
+ * performance level and otherwise leave the value set by the caller.
+ */
+ if (!*freq)
+ *freq = 2;
+
+ /* No power information. */
+ *power = EM_MAX_POWER;
+
+ return 0;
+}
+
+static int hybrid_get_cost(struct device *dev, unsigned long freq,
+ unsigned long *cost)
+{
+ struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate;
+ struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id);
+
+ /*
+ * The smaller the perf-to-frequency scaling factor, the larger the IPC
+ * ratio between the given CPU and the least capable CPU in the system.
+ * Regard that IPC ratio as the primary cost component and assume that
+ * the scaling factors for different CPU types will differ by at least
+ * 5% and they will not be above INTEL_PSTATE_CORE_SCALING.
+ *
+ * Add the freq value to the cost, so that the cost of running on CPUs
+ * of the same type in different "utilization bins" is different.
+ */
+ *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq;
+ /*
+ * Increase the cost slightly for CPUs able to access L3 to avoid
+ * touching it in case some other CPUs of the same type can do the work
+ * without it.
+ */
+ if (cacheinfo) {
+ unsigned int i;
+
+ /* Check if L3 cache is there. */
+ for (i = 0; i < cacheinfo->num_leaves; i++) {
+ if (cacheinfo->info_list[i].level == 3) {
+ *cost += 2;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static bool hybrid_register_perf_domain(unsigned int cpu)
+{
+ static const struct em_data_callback cb
+ = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost);
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ struct device *cpu_dev;
+
+ /*
+ * Registering EM perf domains without enabling asymmetric CPU capacity
+ * support is not really useful and one domain should not be registered
+ * more than once.
+ */
+ if (!hybrid_max_perf_cpu || cpudata->pd_registered)
+ return false;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return false;
+
+ if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+ cpumask_of(cpu), false))
+ return false;
+
+ cpudata->pd_registered = true;
+
+ return true;
+}
+
+static void hybrid_register_all_perf_domains(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ hybrid_register_perf_domain(cpu);
+}
+
+static void hybrid_update_perf_domain(struct cpudata *cpu)
+{
+ if (cpu->pd_registered)
+ em_adjust_cpu_capacity(cpu->cpu);
+}
+#else /* !CONFIG_ENERGY_MODEL */
+static inline bool hybrid_register_perf_domain(unsigned int cpu) { return false; }
+static inline void hybrid_register_all_perf_domains(void) {}
+static inline void hybrid_update_perf_domain(struct cpudata *cpu) {}
+#endif /* CONFIG_ENERGY_MODEL */
+
static void hybrid_set_cpu_capacity(struct cpudata *cpu)
{
arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
hybrid_max_perf_cpu->capacity_perf,
cpu->capacity_perf,
cpu->pstate.max_pstate_physical);
+ hybrid_update_perf_domain(cpu);
+
+ topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu));
pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
@@ -1042,6 +1159,11 @@ static void hybrid_refresh_cpu_capacity_scaling(void)
guard(mutex)(&hybrid_capacity_lock);
__hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Perf domains are not registered before setting hybrid_max_perf_cpu,
+ * so register them all after setting up CPU capacity scaling.
+ */
+ hybrid_register_all_perf_domains();
}
static void hybrid_init_cpu_capacity_scaling(bool refresh)
@@ -1069,7 +1191,7 @@ static void hybrid_init_cpu_capacity_scaling(bool refresh)
hybrid_refresh_cpu_capacity_scaling();
/*
* Disabling ITMT causes sched domains to be rebuilt to disable asym
- * packing and enable asym capacity.
+ * packing and enable asym capacity and EAS.
*/
sched_clear_itmt_support();
}
@@ -1091,7 +1213,7 @@ static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(cpu->hwp_cap_cached, cap);
cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
@@ -1147,6 +1269,14 @@ static void hybrid_update_capacity(struct cpudata *cpu)
}
hybrid_set_cpu_capacity(cpu);
+ /*
+ * If the CPU was offline to start with and it is going online for the
+ * first time, a perf domain needs to be registered for it if hybrid
+ * capacity scaling has been enabled already. In that case, sched
+ * domains need to be rebuilt to take the new perf domain into account.
+ */
+ if (hybrid_register_perf_domain(cpu->cpu))
+ em_rebuild_sched_domains();
unlock:
mutex_unlock(&hybrid_capacity_lock);
@@ -1165,7 +1295,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
- rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value);
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
@@ -1212,7 +1342,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
}
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
- wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
@@ -1259,7 +1389,7 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
mutex_lock(&hybrid_capacity_lock);
@@ -1288,7 +1418,7 @@ static void set_power_ctl_ee_state(bool input)
u64 power_ctl;
mutex_lock(&intel_pstate_driver_lock);
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
if (input) {
power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_ENABLE;
@@ -1296,7 +1426,7 @@ static void set_power_ctl_ee_state(bool input)
power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_DISABLE;
}
- wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ wrmsrq(MSR_IA32_POWER_CTL, power_ctl);
mutex_unlock(&intel_pstate_driver_lock);
}
@@ -1305,7 +1435,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata);
static void intel_pstate_hwp_reenable(struct cpudata *cpu)
{
intel_pstate_hwp_enable(cpu);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
}
static int intel_pstate_suspend(struct cpufreq_policy *policy)
@@ -1356,9 +1486,11 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
-static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
- struct cpufreq_policy *policy)
+static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
+ struct cpudata *cpudata)
{
+ guard(cpufreq_policy_write)(policy);
+
if (hwp_active)
intel_pstate_get_hwp_cap(cpudata);
@@ -1368,42 +1500,34 @@ static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
refresh_frequency_limits(policy);
}
-static void intel_pstate_update_limits(unsigned int cpu)
+static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
- struct cpudata *cpudata;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+ policy = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
- return;
+ return false;
- cpudata = all_cpu_data[cpu];
+ __intel_pstate_update_max_freq(policy, cpudata);
- __intel_pstate_update_max_freq(cpudata, policy);
+ return true;
+}
- /* Prevent the driver from being unregistered now. */
- mutex_lock(&intel_pstate_driver_lock);
+static void intel_pstate_update_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpudata = all_cpu_data[policy->cpu];
- cpufreq_cpu_release(policy);
+ __intel_pstate_update_max_freq(policy, cpudata);
hybrid_update_capacity(cpudata);
-
- mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_update_limits_for_all(void)
{
int cpu;
- for_each_possible_cpu(cpu) {
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
-
- if (!policy)
- continue;
-
- __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
-
- cpufreq_cpu_release(policy);
- }
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(all_cpu_data[cpu]);
mutex_lock(&hybrid_capacity_lock);
@@ -1706,7 +1830,7 @@ static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribut
u64 power_ctl;
int enable;
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
return sprintf(buf, "%d\n", !enable);
}
@@ -1843,13 +1967,8 @@ static void intel_pstate_notify_work(struct work_struct *work)
{
struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
-
- if (policy) {
- __intel_pstate_update_max_freq(cpudata, policy);
-
- cpufreq_cpu_release(policy);
+ if (intel_pstate_update_max_freq(cpudata)) {
/*
* The driver will not be unregistered while this function is
* running, so update the capacity without acquiring the driver
@@ -1858,7 +1977,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
hybrid_update_capacity(cpudata);
}
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
@@ -1880,7 +1999,7 @@ void notify_hwp_interrupt(void)
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;
- rdmsrl_safe(MSR_HWP_STATUS, &value);
+ rdmsrq_safe(MSR_HWP_STATUS, &value);
if (!(value & status_mask))
return;
@@ -1897,7 +2016,7 @@ void notify_hwp_interrupt(void)
return;
ack_intr:
- wrmsrl_safe(MSR_HWP_STATUS, 0);
+ wrmsrq_safe(MSR_HWP_STATUS, 0);
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
@@ -1908,8 +2027,8 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
raw_spin_lock_irq(&hwp_notify_lock);
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
@@ -1936,9 +2055,9 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
}
@@ -1977,9 +2096,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt till we activate again */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
intel_pstate_enable_hwp_interrupt(cpudata);
@@ -1993,7 +2112,7 @@ static int atom_get_min_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 8) & 0x7F;
}
@@ -2001,7 +2120,7 @@ static int atom_get_max_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 16) & 0x7F;
}
@@ -2009,7 +2128,7 @@ static int atom_get_turbo_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value);
return value & 0x7F;
}
@@ -2044,7 +2163,7 @@ static int silvermont_get_scaling(void)
static int silvermont_freq_table[] = {
83300, 100000, 133300, 116700, 80000};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0x7;
WARN_ON(i > 4);
@@ -2060,7 +2179,7 @@ static int airmont_get_scaling(void)
83300, 100000, 133300, 116700, 80000,
93300, 90000, 88900, 87500};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0xF;
WARN_ON(i > 8);
@@ -2071,7 +2190,7 @@ static void atom_get_vid(struct cpudata *cpudata)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_VIDS, value);
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
cpudata->vid.ratio = div_fp(
@@ -2079,7 +2198,7 @@ static void atom_get_vid(struct cpudata *cpudata)
int_tofp(cpudata->pstate.max_pstate -
cpudata->pstate.min_pstate));
- rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value);
cpudata->vid.turbo = value & 0x7f;
}
@@ -2087,7 +2206,7 @@ static int core_get_min_pstate(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 40) & 0xFF;
}
@@ -2095,7 +2214,7 @@ static int core_get_max_pstate_physical(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 8) & 0xFF;
}
@@ -2109,13 +2228,13 @@ static int core_get_tdp_ratio(int cpu, u64 plat_info)
int err;
/* Get the TDP level (0, 1, 2) to get ratios */
- err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
if (err)
return err;
/* TDP MSR are continuous starting at 0x648 */
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
- err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
+ err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
if (err)
return err;
@@ -2140,7 +2259,7 @@ static int core_get_max_pstate(int cpu)
int tdp_ratio;
int err;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
max_pstate = (plat_info >> 8) & 0xFF;
tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
@@ -2152,7 +2271,7 @@ static int core_get_max_pstate(int cpu)
return tdp_ratio;
}
- err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
if (!err) {
int tar_levels;
@@ -2172,7 +2291,7 @@ static int core_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (value) & 255;
if (ret <= nont)
@@ -2201,7 +2320,7 @@ static int knl_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
@@ -2247,7 +2366,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
* the CPU being updated, so force the register update to run on the
* right CPU.
*/
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, pstate));
}
@@ -2354,7 +2473,7 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
return;
hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
- wrmsrl(MSR_HWP_REQUEST, hwp_req);
+ wrmsrq(MSR_HWP_REQUEST, hwp_req);
cpu->last_update = cpu->sample.time;
}
@@ -2367,7 +2486,7 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
expired = time_after64(cpu->sample.time, cpu->last_update +
hwp_boost_hold_time_ns);
if (expired) {
- wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
+ wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached);
cpu->hwp_boost_min = 0;
}
}
@@ -2428,8 +2547,8 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
u64 tsc;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
local_irq_restore(flags);
@@ -2523,7 +2642,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
return;
cpu->pstate.current_pstate = pstate;
- wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
+ wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
static void intel_pstate_adjust_pstate(struct cpudata *cpu)
@@ -3103,19 +3222,19 @@ static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
WRITE_ONCE(cpu->hwp_req_cached, value);
if (fast_switch)
- wrmsrl(MSR_HWP_REQUEST, value);
+ wrmsrq(MSR_HWP_REQUEST, value);
else
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
u32 target_pstate, bool fast_switch)
{
if (fast_switch)
- wrmsrl(MSR_IA32_PERF_CTL,
+ wrmsrq(MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
else
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
@@ -3259,7 +3378,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
intel_pstate_get_hwp_cap(cpu);
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
cpu->epp_cached = intel_pstate_get_epp(cpu, value);
@@ -3326,7 +3445,7 @@ static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
* written by it may not be suitable.
*/
value &= ~HWP_DESIRED_PERF(~0L);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
WRITE_ONCE(cpu->hwp_req_cached, value);
}
@@ -3576,7 +3695,7 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
- rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+ rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr);
if (misc_pwr & BITMASK_OOB) {
pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
@@ -3632,7 +3751,7 @@ static bool intel_pstate_hwp_is_enabled(void)
{
u64 value;
- rdmsrl(MSR_PM_ENABLE, value);
+ rdmsrq(MSR_PM_ENABLE, value);
return !!(value & 0x1);
}
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 68ccd73c8129..ba0e08c8486a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -136,7 +136,7 @@ static void do_longhaul1(unsigned int mults_index)
{
union msr_bcr2 bcr2;
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = mults_index & 0xff;
@@ -144,16 +144,16 @@ static void do_longhaul1(unsigned int mults_index)
/* Sync to timer tick */
safe_halt();
/* Change frequency on next halt or sleep */
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
/* Invoke transition */
ACPI_FLUSH_CPU_CACHE();
halt();
/* Disable software clock multiplier */
local_irq_disable();
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
}
/* For processor with Longhaul MSR */
@@ -164,7 +164,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
union msr_longhaul longhaul;
u32 t;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Setup new frequency */
if (!revid_errata)
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
@@ -180,7 +180,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
/* Raise voltage if necessary */
if (can_scale_voltage && dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -194,12 +194,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
/* Change frequency on next halt or sleep */
longhaul.bits.EnableSoftBusRatio = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
halt();
@@ -212,12 +212,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
}
/* Disable bus ratio bit */
longhaul.bits.EnableSoftBusRatio = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Reduce voltage if necessary */
if (can_scale_voltage && !dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -231,7 +231,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
}
@@ -534,7 +534,7 @@ static void longhaul_setup_voltagescaling(void)
unsigned int j, speed, pos, kHz_step, numvscales;
int min_vid_speed;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!(longhaul.bits.RevisionID & 1)) {
pr_info("Voltage scaling not supported by CPU\n");
return;
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index fb2197dc170f..31039330a3ba 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -219,13 +219,13 @@ static void change_FID(int fid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.FID != fid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.FID = fid;
fidvidctl.bits.VIDC = 0;
fidvidctl.bits.FIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -234,13 +234,13 @@ static void change_VID(int vid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.VID != vid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.VID = vid;
fidvidctl.bits.FIDC = 0;
fidvidctl.bits.VIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -260,7 +260,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
fid = powernow_table[index].driver_data & 0xFF;
vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
freqs.old = fsb * fid_codes[cfid] / 10;
@@ -557,7 +557,7 @@ static unsigned int powernow_get(unsigned int cpu)
if (cpu)
return 0;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
return fsb * fid_codes[cfid] / 10;
@@ -598,7 +598,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -ENODEV;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
recalibrate_cpu_khz();
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 4e3ba6e68c32..f7512b4e923e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -482,7 +482,7 @@ static void check_supported_cpu(void *_rc)
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & P_STATE_TRANSITION_CAPABLE)
!= P_STATE_TRANSITION_CAPABLE) {
- pr_info("Power state transitions not supported\n");
+ pr_info_once("Power state transitions not supported\n");
return;
}
*rc = 0;
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
new file mode 100644
index 000000000000..94ed81644fe1
--- /dev/null
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust based implementation of the cpufreq-dt driver.
+
+use kernel::{
+ c_str,
+ clk::Clk,
+ cpu, cpufreq,
+ cpumask::CpumaskVar,
+ device::{Core, Device},
+ error::code::*,
+ fmt,
+ macros::vtable,
+ module_platform_driver, of, opp, platform,
+ prelude::*,
+ str::CString,
+ sync::Arc,
+};
+
+/// Finds exact supply name from the OF node.
+fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
+ let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?;
+ dev.property_present(&prop_name)
+ .then(|| CString::try_from_fmt(fmt!("{name}")).ok())
+ .flatten()
+}
+
+/// Finds supply name for the CPU from DT.
+fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> {
+ // Try "cpu0" for older DTs, fallback to "cpu".
+ let name = (cpu == 0)
+ .then(|| find_supply_name_exact(dev, "cpu0"))
+ .flatten()
+ .or_else(|| find_supply_name_exact(dev, "cpu"))?;
+
+ let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?;
+ list.push(name, GFP_KERNEL).ok()?;
+
+ Some(list)
+}
+
+/// Represents the cpufreq dt device.
+struct CPUFreqDTDevice {
+ opp_table: opp::Table,
+ freq_table: opp::FreqTable,
+ _mask: CpumaskVar,
+ _token: Option<opp::ConfigToken>,
+ _clk: Clk,
+}
+
+#[derive(Default)]
+struct CPUFreqDTDriver;
+
+#[vtable]
+impl opp::ConfigOps for CPUFreqDTDriver {}
+
+#[vtable]
+impl cpufreq::Driver for CPUFreqDTDriver {
+ const NAME: &'static CStr = c_str!("cpufreq-dt");
+ const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
+ const BOOST_ENABLED: bool = true;
+
+ type PData = Arc<CPUFreqDTDevice>;
+
+ fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> {
+ let cpu = policy.cpu();
+ // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq
+ // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device,
+ // once the CPU is hot-unplugged.
+ let dev = unsafe { cpu::from_cpu(cpu)? };
+ let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?;
+
+ mask.set(cpu);
+
+ let token = find_supply_names(dev, cpu)
+ .map(|names| {
+ opp::Config::<Self>::new()
+ .set_regulator_names(names)?
+ .set(dev)
+ })
+ .transpose()?;
+
+ // Get OPP-sharing information from "operating-points-v2" bindings.
+ let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) {
+ Ok(()) => false,
+ Err(e) if e == ENOENT => {
+ // "operating-points-v2" not supported. If the platform hasn't
+ // set sharing CPUs, fallback to all CPUs share the `Policy`
+ // for backward compatibility.
+ opp::Table::sharing_cpus(dev, &mut mask).is_err()
+ }
+ Err(e) => return Err(e),
+ };
+
+ // Initialize OPP tables for all policy cpus.
+ //
+ // For platforms not using "operating-points-v2" bindings, we do this
+ // before updating policy cpus. Otherwise, we will end up creating
+ // duplicate OPPs for the CPUs.
+ //
+ // OPPs might be populated at runtime, don't fail for error here unless
+ // it is -EPROBE_DEFER.
+ let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) {
+ Ok(table) => table,
+ Err(e) => {
+ if e == EPROBE_DEFER {
+ return Err(e);
+ }
+
+ // The table is added dynamically ?
+ opp::Table::from_dev(dev)?
+ }
+ };
+
+ // The OPP table must be initialized, statically or dynamically, by this point.
+ opp_table.opp_count()?;
+
+ // Set sharing cpus for fallback scenario.
+ if fallback {
+ mask.setall();
+ opp_table.set_sharing_cpus(&mut mask)?;
+ }
+
+ let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
+ if transition_latency == 0 {
+ transition_latency = cpufreq::ETERNAL_LATENCY_NS;
+ }
+
+ policy
+ .set_dvfs_possible_from_any_cpu(true)
+ .set_suspend_freq(opp_table.suspend_freq())
+ .set_transition_latency_ns(transition_latency);
+
+ let freq_table = opp_table.cpufreq_table()?;
+ // SAFETY: The `freq_table` is not dropped while it is getting used by the C code.
+ unsafe { policy.set_freq_table(&freq_table) };
+
+ // SAFETY: The returned `clk` is not dropped while it is getting used by the C code.
+ let clk = unsafe { policy.set_clk(dev, None)? };
+
+ mask.copy(policy.cpus());
+
+ Ok(Arc::new(
+ CPUFreqDTDevice {
+ opp_table,
+ freq_table,
+ _mask: mask,
+ _token: token,
+ _clk: clk,
+ },
+ GFP_KERNEL,
+ )?)
+ }
+
+ fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result {
+ Ok(())
+ }
+
+ fn online(_policy: &mut cpufreq::Policy) -> Result {
+ // We did light-weight tear down earlier, nothing to do here.
+ Ok(())
+ }
+
+ fn offline(_policy: &mut cpufreq::Policy) -> Result {
+ // Preserve policy->data and don't free resources on light-weight
+ // tear down.
+ Ok(())
+ }
+
+ fn suspend(policy: &mut cpufreq::Policy) -> Result {
+ policy.generic_suspend()
+ }
+
+ fn verify(data: &mut cpufreq::PolicyData) -> Result {
+ data.generic_verify()
+ }
+
+ fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result {
+ let Some(data) = policy.data::<Self::PData>() else {
+ return Err(ENOENT);
+ };
+
+ let freq = data.freq_table.freq(index)?;
+ data.opp_table.set_rate(freq)
+ }
+
+ fn get(policy: &mut cpufreq::Policy) -> Result<u32> {
+ policy.generic_get()
+ }
+
+ fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result {
+ Ok(())
+ }
+
+ fn register_em(policy: &mut cpufreq::Policy) {
+ policy.register_em_opp()
+ }
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <CPUFreqDTDriver as platform::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("operating-points-v2")), ())]
+);
+
+impl platform::Driver for CPUFreqDTDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _id_info: Option<&Self::IdInfo>,
+ ) -> Result<Pin<KBox<Self>>> {
+ cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
+ Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ }
+}
+
+module_platform_driver! {
+ type: CPUFreqDTDriver,
+ name: "cpufreq-dt",
+ author: "Viresh Kumar <viresh.kumar@linaro.org>",
+ description: "Generic CPUFreq DT driver",
+ license: "GPL v2",
+}
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 103d2519dff7..b360f03a116f 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -21,7 +21,6 @@
#include <linux/io.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#define MMCR_BASE 0xfffef000 /* The default base address */
#define OFFS_CPUCTL 0x2 /* CPU Control Register */
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 944e899eb1be..ef078426bfd5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -393,6 +393,40 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.set_boost = cpufreq_boost_set_sw,
};
+static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
+{
+ struct device_node *scmi_np = dev_of_node(scmi_dev);
+ struct device_node *cpu_np, *np;
+ struct device *cpu_dev;
+ int cpu, idx;
+
+ if (!scmi_np)
+ return false;
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ cpu_np = dev_of_node(cpu_dev);
+
+ np = of_parse_phandle(cpu_np, "clocks", 0);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+
+ idx = of_property_match_string(cpu_np, "power-domain-names", "perf");
+ np = of_parse_phandle(cpu_np, "power-domains", idx);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+ }
+
+ return false;
+}
+
static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
@@ -401,7 +435,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
handle = sdev->handle;
- if (!handle)
+ if (!handle || !scmi_dev_used_by_cpus(dev))
return -ENODEV;
scmi_cpufreq_driver.driver_data = sdev;
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 5fb5228f6bf1..2041f59116ce 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -43,7 +43,7 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
/* OSI mode is enabled, set the corresponding domain state. */
pd_state = state->data;
- psci_set_domain_state(*pd_state);
+ psci_set_domain_state(pd, pd->state_idx, *pd_state);
return 0;
}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index b46a83f5ffe4..4e1ba35deda9 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/psci.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -36,19 +36,30 @@ struct psci_cpuidle_data {
struct device *dev;
};
+struct psci_cpuidle_domain_state {
+ struct generic_pm_domain *pd;
+ unsigned int state_idx;
+ u32 state;
+};
+
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
-static DEFINE_PER_CPU(u32, domain_state);
+static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state);
static bool psci_cpuidle_use_syscore;
static bool psci_cpuidle_use_cpuhp;
-void psci_set_domain_state(u32 state)
+void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
+ u32 state)
{
- __this_cpu_write(domain_state, state);
+ struct psci_cpuidle_domain_state *ds = this_cpu_ptr(&psci_domain_state);
+
+ ds->pd = pd;
+ ds->state_idx = state_idx;
+ ds->state = state;
}
-static inline u32 psci_get_domain_state(void)
+static inline void psci_clear_domain_state(void)
{
- return __this_cpu_read(domain_state);
+ __this_cpu_write(psci_domain_state.state, 0);
}
static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
@@ -58,7 +69,8 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
u32 *states = data->psci_states;
struct device *pd_dev = data->dev;
- u32 state;
+ struct psci_cpuidle_domain_state *ds;
+ u32 state = states[idx];
int ret;
ret = cpu_pm_enter();
@@ -71,9 +83,9 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
else
pm_runtime_put_sync_suspend(pd_dev);
- state = psci_get_domain_state();
- if (!state)
- state = states[idx];
+ ds = this_cpu_ptr(&psci_domain_state);
+ if (ds->state)
+ state = ds->state;
trace_psci_domain_idle_enter(dev->cpu, state, s2idle);
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
@@ -86,8 +98,12 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
cpu_pm_exit();
+ /* Correct domain-idlestate statistics if we failed to enter. */
+ if (ret == -1 && ds->state)
+ pm_genpd_inc_rejected(ds->pd, ds->state_idx);
+
/* Clear the domain state to start fresh when back from idle. */
- psci_set_domain_state(0);
+ psci_clear_domain_state();
return ret;
}
@@ -121,7 +137,7 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
if (pd_dev) {
pm_runtime_put_sync(pd_dev);
/* Clear domain state to start fresh at next online. */
- psci_set_domain_state(0);
+ psci_clear_domain_state();
}
return 0;
@@ -147,7 +163,7 @@ static void psci_idle_syscore_switch(bool suspend)
/* Clear domain state to re-start fresh. */
if (!cleared) {
- psci_set_domain_state(0);
+ psci_clear_domain_state();
cleared = true;
}
}
@@ -407,14 +423,14 @@ deinit:
* to register cpuidle driver then rollback to cancel all CPUs
* registration.
*/
-static int psci_cpuidle_probe(struct platform_device *pdev)
+static int psci_cpuidle_probe(struct faux_device *fdev)
{
int cpu, ret;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
for_each_present_cpu(cpu) {
- ret = psci_idle_init_cpu(&pdev->dev, cpu);
+ ret = psci_idle_init_cpu(&fdev->dev, cpu);
if (ret)
goto out_fail;
}
@@ -434,26 +450,36 @@ out_fail:
return ret;
}
-static struct platform_driver psci_cpuidle_driver = {
+static struct faux_device_ops psci_cpuidle_ops = {
.probe = psci_cpuidle_probe,
- .driver = {
- .name = "psci-cpuidle",
- },
};
+static bool __init dt_idle_state_present(void)
+{
+ struct device_node *cpu_node __free(device_node) =
+ of_cpu_device_node_get(cpumask_first(cpu_possible_mask));
+ if (!cpu_node)
+ return false;
+
+ struct device_node *state_node __free(device_node) =
+ of_get_cpu_state_node(cpu_node, 0);
+ if (!state_node)
+ return false;
+
+ return !!of_match_node(psci_idle_state_match, state_node);
+}
+
static int __init psci_idle_init(void)
{
- struct platform_device *pdev;
- int ret;
+ struct faux_device *fdev;
- ret = platform_driver_register(&psci_cpuidle_driver);
- if (ret)
- return ret;
+ if (!dt_idle_state_present())
+ return 0;
- pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- platform_driver_unregister(&psci_cpuidle_driver);
- return PTR_ERR(pdev);
+ fdev = faux_device_create("psci-cpuidle", NULL, &psci_cpuidle_ops);
+ if (!fdev) {
+ pr_err("Failed to create psci-cpuidle device\n");
+ return -ENODEV;
}
return 0;
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
index ef004ec7a7c5..d29cbd796cd5 100644
--- a/drivers/cpuidle/cpuidle-psci.h
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -4,8 +4,10 @@
#define __CPUIDLE_PSCI_H
struct device_node;
+struct generic_pm_domain;
-void psci_set_domain_state(u32 state);
+void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
+ u32 state);
int psci_dt_parse_state_node(struct device_node *np, u32 *state);
#endif /* __CPUIDLE_PSCI_H */
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 39aa0aea61c6..52d5d26fc7c6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -255,7 +255,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
data->next_timer_ns = KTIME_MAX;
delta_tick = TICK_NSEC / 2;
- data->bucket = which_bucket(KTIME_MAX);
+ data->bucket = BUCKETS - 1;
}
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index 8fe5e1b47ef9..bfa55c1eab5b 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -19,7 +19,7 @@
*
* Of course, non-timer wakeup sources are more important in some use cases,
* but even then it is generally unnecessary to consider idle duration values
- * greater than the time time till the next timer event, referred as the sleep
+ * greater than the time till the next timer event, referred as the sleep
* length in what follows, because the closest timer will ultimately wake up the
* CPU anyway unless it is woken up earlier.
*
@@ -311,7 +311,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct cpuidle_state *s = &drv->states[i];
/*
- * Update the sums of idle state mertics for all of the states
+ * Update the sums of idle state metrics for all of the states
* shallower than the current one.
*/
intercept_sum += prev_bin->intercepts;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 47082782008a..5686369779be 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -530,13 +530,6 @@ source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
source "drivers/crypto/intel/Kconfig"
-config CRYPTO_DEV_CAVIUM_ZIP
- tristate "Cavium ZIP driver"
- depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- help
- Select this option if you want to enable compression/decompression
- acceleration on Cavium's ARM based SoCs
-
config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c97f0ebc55ec..22eadcc8f4a2 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -8,12 +8,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
-obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
-obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
@@ -50,3 +47,4 @@ obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
obj-y += starfive/
+obj-y += cavium/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 19b7fb4a93e8..f9cf00d690e2 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -33,22 +33,30 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
- algt->stat_fb_maxsg++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_maxsg++;
+
return true;
}
if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
- algt->stat_fb_leniv++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_leniv++;
+
return true;
}
if (areq->cryptlen == 0) {
- algt->stat_fb_len0++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_len0++;
+
return true;
}
if (areq->cryptlen % 16) {
- algt->stat_fb_mod16++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_mod16++;
+
return true;
}
@@ -56,12 +64,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
sg = areq->src;
while (sg) {
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_srcali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srcali++;
+
return true;
}
todo = min(len, sg->length);
if (todo % 4) {
- algt->stat_fb_srclen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srclen++;
+
return true;
}
len -= todo;
@@ -72,12 +84,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
sg = areq->dst;
while (sg) {
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_dstali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_dstali++;
+
return true;
}
todo = min(len, sg->length);
if (todo % 4) {
- algt->stat_fb_dstlen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_dstlen++;
+
return true;
}
len -= todo;
@@ -100,9 +116,7 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.skcipher.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt->stat_fb++;
-#endif
}
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -146,9 +160,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
op->keylen);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_req++;
-#endif
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_req++;
flow = rctx->flow;
@@ -275,13 +288,16 @@ theend_sgs:
} else {
if (nr_sgs > 0)
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
+
+ if (nr_sgd > 0)
+ dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
}
theend_iv:
if (areq->iv && ivsize > 0) {
- if (rctx->addr_iv)
+ if (!dma_mapping_error(ce->dev, rctx->addr_iv))
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, chan->backup_iv, ivsize);
@@ -434,17 +450,17 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm));
- memcpy(algt->fbname,
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
- CRYPTO_MAX_ALG_NAME);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ memcpy(algt->fbname,
+ crypto_skcipher_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
- err = pm_runtime_get_sync(op->ce->dev);
+ err = pm_runtime_resume_and_get(op->ce->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- pm_runtime_put_noidle(op->ce->dev);
crypto_free_skcipher(op->fallback_tfm);
return err;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index ec1ffda9ea32..658f520cee0c 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -832,13 +832,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
err = pm_runtime_set_suspended(ce->dev);
if (err)
return err;
- pm_runtime_enable(ce->dev);
- return err;
-}
-static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
-{
- pm_runtime_disable(ce->dev);
+ err = devm_pm_runtime_enable(ce->dev);
+ if (err)
+ return err;
+
+ return 0;
}
static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
@@ -1041,7 +1040,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
"sun8i-ce-ns", ce);
if (err) {
dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
- goto error_irq;
+ goto error_pm;
}
err = sun8i_ce_register_algs(ce);
@@ -1082,8 +1081,6 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return 0;
error_alg:
sun8i_ce_unregister_algs(ce);
-error_irq:
- sun8i_ce_pm_exit(ce);
error_pm:
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
@@ -1104,8 +1101,6 @@ static void sun8i_ce_remove(struct platform_device *pdev)
#endif
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
-
- sun8i_ce_pm_exit(ce);
}
static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 6072dd9f390b..bef44f350167 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -23,6 +23,18 @@
#include <linux/string.h>
#include "sun8i-ce.h"
+static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
+ algt->stat_fb++;
+ }
+}
+
int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
{
struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
@@ -48,15 +60,16 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
sizeof(struct sun8i_ce_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
- CRYPTO_MAX_ALG_NAME);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ memcpy(algt->fbname,
+ crypto_ahash_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
- err = pm_runtime_get_sync(op->ce->dev);
+ err = pm_runtime_resume_and_get(op->ce->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- pm_runtime_put_noidle(op->ce->dev);
crypto_free_ahash(op->fallback_tfm);
return err;
}
@@ -78,7 +91,9 @@ int sun8i_ce_hash_init(struct ahash_request *areq)
memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -90,7 +105,9 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -102,7 +119,9 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -113,21 +132,13 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = areq->result;
-
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ sun8i_ce_hash_stat_fb_inc(tfm);
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -139,10 +150,10 @@ int sun8i_ce_hash_update(struct ahash_request *areq)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -153,24 +164,14 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ sun8i_ce_hash_stat_fb_inc(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
-
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -181,24 +182,14 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
-
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ sun8i_ce_hash_stat_fb_inc(tfm);
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -213,22 +204,30 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
- algt->stat_fb_len0++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_len0++;
+
return true;
}
/* we need to reserve one SG for padding one */
if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
- algt->stat_fb_maxsg++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_maxsg++;
+
return true;
}
sg = areq->src;
while (sg) {
if (sg->length % 4) {
- algt->stat_fb_srclen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srclen++;
+
return true;
}
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_srcali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srcali++;
+
return true;
}
sg = sg_next(sg);
@@ -244,21 +243,11 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
struct sun8i_ce_alg_template *algt;
struct sun8i_ce_dev *ce;
struct crypto_engine *engine;
- struct scatterlist *sg;
- int nr_sgs, e, i;
+ int e;
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
- if (nr_sgs > MAX_SG - 1)
- return sun8i_ce_hash_digest_fb(areq);
-
- for_each_sg(areq->src, sg, nr_sgs, i) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
- return sun8i_ce_hash_digest_fb(areq);
- }
-
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
@@ -343,9 +332,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf = NULL;
+ void *buf, *result;
int j, i, todo;
- void *result = NULL;
u64 bs;
int digestsize;
dma_addr_t addr_res, addr_pad;
@@ -365,22 +353,22 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
if (!buf) {
err = -ENOMEM;
- goto theend;
+ goto err_out;
}
bf = (__le32 *)buf;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
if (!result) {
err = -ENOMEM;
- goto theend;
+ goto err_free_buf;
}
flow = rctx->flow;
chan = &ce->chanlist[flow];
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_req++;
-#endif
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_req++;
+
dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
cet = chan->tl;
@@ -398,7 +386,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto theend;
+ goto err_free_result;
}
len = areq->nbytes;
@@ -411,7 +399,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (len > 0) {
dev_err(ce->dev, "remaining len %d\n", len);
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
@@ -419,7 +407,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
byte_count = areq->nbytes;
@@ -441,7 +429,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
}
if (!j) {
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
@@ -450,7 +438,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
if (ce->variant->hash_t_dlen_in_bits)
@@ -463,16 +451,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+
+err_unmap_result:
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+ if (!err)
+ memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+err_unmap_src:
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
-theend:
- kfree(buf);
+err_free_result:
kfree(result);
+
+err_free_buf:
+ kfree(buf);
+
+err_out:
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
+
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 3b5c2af013d0..83df4d719053 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -308,8 +308,8 @@ struct sun8i_ce_hash_tfm_ctx {
* @flow: the flow to use for this request
*/
struct sun8i_ce_hash_reqctx {
- struct ahash_request fallback_req;
int flow;
+ struct ahash_request fallback_req; // keep at the end
};
/*
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 9b9605ce8ee6..8831bcb230c2 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
/* we need to copy all IVs from source in case DMA is bi-directionnal */
while (sg && len) {
- if (sg_dma_len(sg) == 0) {
+ if (sg->length == 0) {
sg = sg_next(sg);
continue;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 753f67a36dc5..8bc08089f044 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -150,7 +150,9 @@ int sun8i_ss_hash_init(struct ahash_request *areq)
memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -162,7 +164,9 @@ int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -174,7 +178,9 @@ int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -186,9 +192,10 @@ int sun8i_ss_hash_final(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
@@ -212,10 +219,10 @@ int sun8i_ss_hash_update(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -227,12 +234,11 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
@@ -256,12 +262,11 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index e0af611a95d8..38e8a61e9166 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -12,9 +12,6 @@
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <linux/hash.h>
-#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
@@ -72,7 +69,7 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
- __le32 iv[AES_IV_SIZE];
+ __le32 iv[AES_IV_SIZE / 4];
if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
return -EINVAL;
@@ -429,7 +426,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- __le32 iv[16];
+ __le32 iv[4];
u32 tmp_sa[SA_AES128_CCM_LEN + 4];
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
unsigned int len = req->cryptlen;
@@ -602,106 +599,3 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
{
return crypto4xx_crypt_aes_gcm(req, true);
}
-
-/*
- * HASH SHA1 Functions
- */
-static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
- unsigned int sa_len,
- unsigned char ha,
- unsigned char hm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg;
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_hash160 *sa;
- int rc;
-
- my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
- alg.u.hash);
- ctx->dev = my_alg->dev;
-
- /* Create SA */
- if (ctx->sa_in || ctx->sa_out)
- crypto4xx_free_sa(ctx);
-
- rc = crypto4xx_alloc_sa(ctx, sa_len);
- if (rc)
- return rc;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct crypto4xx_ctx));
- sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
- set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
- SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
- SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
- SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
- SA_OPCODE_HASH, DIR_INBOUND);
- set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
- CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
- SA_SEQ_MASK_OFF, SA_MC_ENABLE,
- SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
- SA_NOT_COPY_HDR);
- /* Need to zero hash digest in SA */
- memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
- memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
-
- return 0;
-}
-
-int crypto4xx_hash_init(struct ahash_request *req)
-{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int ds;
- struct dynamic_sa_ctl *sa;
-
- sa = ctx->sa_in;
- ds = crypto_ahash_digestsize(
- __crypto_ahash_cast(req->base.tfm));
- sa->sa_command_0.bf.digest_len = ds >> 2;
- sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
-
- return 0;
-}
-
-int crypto4xx_hash_update(struct ahash_request *req)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct scatterlist dst;
- unsigned int ds = crypto_ahash_digestsize(ahash);
-
- sg_init_one(&dst, req->result, ds);
-
- return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
- req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0, NULL);
-}
-
-int crypto4xx_hash_final(struct ahash_request *req)
-{
- return 0;
-}
-
-int crypto4xx_hash_digest(struct ahash_request *req)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct scatterlist dst;
- unsigned int ds = crypto_ahash_digestsize(ahash);
-
- sg_init_one(&dst, req->result, ds);
-
- return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
- req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0, NULL);
-}
-
-/*
- * SHA1 Algorithm
- */
-int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
-{
- return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
- SA_HASH_MODE_HASH);
-}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index ec3ccfa60445..8cdc66d520c9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -485,18 +485,6 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
}
}
-static void crypto4xx_copy_digest_to_dst(void *dst,
- struct pd_uinfo *pd_uinfo,
- struct crypto4xx_ctx *ctx)
-{
- struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
-
- if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy(dst, pd_uinfo->sr_va->save_digest,
- SA_HASH_ALG_SHA1_DIGEST_SIZE);
- }
-}
-
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
@@ -549,23 +537,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
skcipher_request_complete(req, 0);
}
-static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
- struct pd_uinfo *pd_uinfo)
-{
- struct crypto4xx_ctx *ctx;
- struct ahash_request *ahash_req;
-
- ahash_req = ahash_request_cast(pd_uinfo->async_req);
- ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
-
- crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
- crypto4xx_ret_sg_desc(dev, pd_uinfo);
-
- if (pd_uinfo->state & PD_ENTRY_BUSY)
- ahash_request_complete(ahash_req, -EINPROGRESS);
- ahash_request_complete(ahash_req, 0);
-}
-
static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
@@ -642,9 +613,6 @@ static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
case CRYPTO_ALG_TYPE_AEAD:
crypto4xx_aead_done(dev, pd_uinfo, pd);
break;
- case CRYPTO_ALG_TYPE_AHASH:
- crypto4xx_ahash_done(dev, pd_uinfo);
- break;
}
}
@@ -676,7 +644,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *req_sa,
const unsigned int sa_len,
const unsigned int assoclen,
@@ -912,8 +880,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
pd->pd_ctl.w = PD_CTL_HOST_READY |
- ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
- (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
PD_CTL_HASH_FINAL : 0);
pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
@@ -1019,10 +986,6 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
rc = crypto_register_aead(&alg->alg.u.aead);
break;
- case CRYPTO_ALG_TYPE_AHASH:
- rc = crypto_register_ahash(&alg->alg.u.hash);
- break;
-
case CRYPTO_ALG_TYPE_RNG:
rc = crypto_register_rng(&alg->alg.u.rng);
break;
@@ -1048,10 +1011,6 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
list_del(&alg->entry);
switch (alg->alg.type) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_unregister_ahash(&alg->alg.u.hash);
- break;
-
case CRYPTO_ALG_TYPE_AEAD:
crypto_unregister_aead(&alg->alg.u.aead);
break;
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 3adcc5e65694..ee36630c670f 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -16,7 +16,6 @@
#include <linux/ratelimit.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
@@ -135,7 +134,6 @@ struct crypto4xx_alg_common {
u32 type;
union {
struct skcipher_alg cipher;
- struct ahash_alg hash;
struct aead_alg aead;
struct rng_alg rng;
} u;
@@ -147,6 +145,12 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev;
};
+#if IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION >= 120000
+#define BUILD_PD_ACCESS __attribute__((access(read_only, 6, 7)))
+#else
+#define BUILD_PD_ACCESS
+#endif
+
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
int crypto4xx_build_pd(struct crypto_async_request *req,
@@ -154,11 +158,11 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *sa,
const unsigned int sa_len,
const unsigned int assoclen,
- struct scatterlist *dst_tmp);
+ struct scatterlist *dst_tmp) BUILD_PD_ACCESS;
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
@@ -177,11 +181,6 @@ int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
-int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-int crypto4xx_hash_digest(struct ahash_request *req);
-int crypto4xx_hash_final(struct ahash_request *req);
-int crypto4xx_hash_update(struct ahash_request *req);
-int crypto4xx_hash_init(struct ahash_request *req);
/*
* Note: Only use this function to copy items that is word aligned.
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 14bf86957d31..27c5d000b4b2 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1743,7 +1743,8 @@ static struct skcipher_alg aes_xts_alg = {
.base.cra_driver_name = "atmel-xts-aes",
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2220,7 +2221,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
{
- alg->cra_flags |= CRYPTO_ALG_ASYNC;
+ alg->cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->cra_alignmask = 0xf;
alg->cra_priority = ATMEL_AES_PRIORITY;
alg->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 67a170608566..2cc36da163e8 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1254,7 +1254,8 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm)
static void atmel_sha_alg_init(struct ahash_alg *alg)
{
alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
- alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
alg->halg.base.cra_module = THIS_MODULE;
alg->halg.base.cra_init = atmel_sha_cra_init;
@@ -2041,7 +2042,8 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
{
alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
- alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
alg->halg.base.cra_module = THIS_MODULE;
alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index de9717e221e4..098f5532f389 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -785,7 +785,7 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
{
alg->base.cra_priority = ATMEL_TDES_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
alg->base.cra_module = THIS_MODULE;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index d4b39184dbdb..38ff931059b4 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -573,6 +573,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
+ { .soc_id = "i.MX8QM", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile
index 4679c06b611f..75227c587ed0 100644
--- a/drivers/crypto/cavium/Makefile
+++ b/drivers/crypto/cavium/Makefile
@@ -2,4 +2,5 @@
#
# Makefile for Cavium crypto device drivers
#
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/
+obj-$(CONFIG_CRYPTO_DEV_CPT) += cpt/
+obj-$(CONFIG_CRYPTO_DEV_NITROX) += nitrox/
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
deleted file mode 100644
index 020d189d793d..000000000000
--- a/drivers/crypto/cavium/zip/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Cavium's ZIP Driver.
-#
-
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
-thunderx_zip-y := zip_main.o \
- zip_device.o \
- zip_crypto.o \
- zip_mem.o \
- zip_deflate.o \
- zip_inflate.o
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
deleted file mode 100644
index 54f6fb054119..000000000000
--- a/drivers/crypto/cavium/zip/common.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __COMMON_H__
-#define __COMMON_H__
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-/* Device specific zlib function definitions */
-#include "zip_device.h"
-
-/* ZIP device definitions */
-#include "zip_main.h"
-
-/* ZIP memory allocation/deallocation related definitions */
-#include "zip_mem.h"
-
-/* Device specific structure definitions */
-#include "zip_regs.h"
-
-#define ZIP_ERROR -1
-
-#define ZIP_FLUSH_FINISH 4
-
-#define RAW_FORMAT 0 /* for rawpipe */
-#define ZLIB_FORMAT 1 /* for zpipe */
-#define GZIP_FORMAT 2 /* for gzpipe */
-#define LZS_FORMAT 3 /* for lzspipe */
-
-/* Max number of ZIP devices supported */
-#define MAX_ZIP_DEVICES 2
-
-/* Configures the number of zip queues to be used */
-#define ZIP_NUM_QUEUES 2
-
-#define DYNAMIC_STOP_EXCESS 1024
-
-/* Maximum buffer sizes in direct mode */
-#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
-#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024)
-
-/**
- * struct zip_operation - common data structure for comp and decomp operations
- * @input: Next input byte is read from here
- * @output: Next output byte written here
- * @ctx_addr: Inflate context buffer address
- * @history: Pointer to the history buffer
- * @input_len: Number of bytes available at next_in
- * @input_total_len: Total number of input bytes read
- * @output_len: Remaining free space at next_out
- * @output_total_len: Total number of bytes output so far
- * @csum: Checksum value of the uncompressed data
- * @flush: Flush flag
- * @format: Format (depends on stream's wrap)
- * @speed: Speed depends on stream's level
- * @ccode: Compression code ( stream's strategy)
- * @lzs_flag: Flag for LZS support
- * @begin_file: Beginning of file indication for inflate
- * @history_len: Size of the history data
- * @end_file: Ending of the file indication for inflate
- * @compcode: Completion status of the ZIP invocation
- * @bytes_read: Input bytes read in current instruction
- * @bits_processed: Total bits processed for entire file
- * @sizeofptr: To distinguish between ILP32 and LP64
- * @sizeofzops: Optional just for padding
- *
- * This structure is used to maintain the required meta data for the
- * comp and decomp operations.
- */
-struct zip_operation {
- u8 *input;
- u8 *output;
- u64 ctx_addr;
- u64 history;
-
- u32 input_len;
- u32 input_total_len;
-
- u32 output_len;
- u32 output_total_len;
-
- u32 csum;
- u32 flush;
-
- u32 format;
- u32 speed;
- u32 ccode;
- u32 lzs_flag;
-
- u32 begin_file;
- u32 history_len;
-
- u32 end_file;
- u32 compcode;
- u32 bytes_read;
- u32 bits_processed;
-
- u32 sizeofptr;
- u32 sizeofzops;
-};
-
-static inline int zip_poll_result(union zip_zres_s *result)
-{
- int retries = 1000;
-
- while (!result->s.compcode) {
- if (!--retries) {
- pr_err("ZIP ERR: request timed out");
- return -ETIMEDOUT;
- }
- udelay(10);
- /*
- * Force re-reading of compcode which is updated
- * by the ZIP coprocessor.
- */
- rmb();
- }
- return 0;
-}
-
-/* error messages */
-#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#ifdef MSG_ENABLE
-/* Enable all messages */
-#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
-#else
-#define zip_msg(fmt, args...)
-#endif
-
-#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
-
-#ifdef DEBUG_LEVEL
-
-#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
- strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
-
-#if DEBUG_LEVEL >= 4
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 3
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 2
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG LEVEL >=4 */
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG_LEVEL */
-#else
-
-#define zip_dbg(fmt, args...)
-
-#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
deleted file mode 100644
index 02e87f2d50db..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "zip_crypto.h"
-
-static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
- int lzs_flag)
-{
- zip_ops->flush = ZIP_FLUSH_FINISH;
-
- /* equivalent to level 6 of opensource zlib */
- zip_ops->speed = 1;
-
- if (!lzs_flag) {
- zip_ops->ccode = 0; /* Auto Huffman */
- zip_ops->lzs_flag = 0;
- zip_ops->format = ZLIB_FORMAT;
- } else {
- zip_ops->ccode = 3; /* LZS Encoding */
- zip_ops->lzs_flag = 1;
- zip_ops->format = LZS_FORMAT;
- }
- zip_ops->begin_file = 1;
- zip_ops->history_len = 0;
- zip_ops->end_file = 1;
- zip_ops->compcode = 0;
- zip_ops->csum = 1; /* Adler checksum desired */
-}
-
-static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
-
- zip_static_init_zip_ops(comp_ctx, lzs_flag);
- zip_static_init_zip_ops(decomp_ctx, lzs_flag);
-
- comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!comp_ctx->input)
- return -ENOMEM;
-
- comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!comp_ctx->output)
- goto err_comp_input;
-
- decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!decomp_ctx->input)
- goto err_comp_output;
-
- decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!decomp_ctx->output)
- goto err_decomp_input;
-
- return 0;
-
-err_decomp_input:
- zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
-err_comp_output:
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
-err_comp_input:
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
- return -ENOMEM;
-}
-
-static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
-
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
- zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-}
-
-static int zip_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_comp;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
- memcpy(zip_ops->input, src, slen);
-
- ret = zip_deflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-static int zip_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_decomp;
- memcpy(zip_ops->input, src, slen);
-
- /* Work around for a bug in zlib which needs an extra bytes sometimes */
- if (zip_ops->ccode != 3) /* Not LZS Encoding */
- zip_ops->input[slen++] = 0;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
-
- ret = zip_inflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-/* SCOMP framework start */
-void *zip_alloc_scomp_ctx_deflate(void)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 0);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void *zip_alloc_scomp_ctx_lzs(void)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 1);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void zip_free_scomp_ctx(void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- zip_ctx_exit(zip_ctx);
- kfree_sensitive(zip_ctx);
-}
-
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_compress(src, slen, dst, dlen, zip_ctx);
-}
-
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_decompress(src, slen, dst, dlen, zip_ctx);
-} /* SCOMP framework end */
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
deleted file mode 100644
index 10899ece2d1f..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_CRYPTO_H__
-#define __ZIP_CRYPTO_H__
-
-#include <crypto/internal/scompress.h>
-#include "common.h"
-#include "zip_deflate.h"
-#include "zip_inflate.h"
-
-struct zip_kernel_ctx {
- struct zip_operation zip_comp;
- struct zip_operation zip_decomp;
-};
-
-void *zip_alloc_scomp_ctx_deflate(void);
-void *zip_alloc_scomp_ctx_lzs(void);
-void zip_free_scomp_ctx(void *zip_ctx);
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
deleted file mode 100644
index d7133f857d67..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/* Prepares the deflate zip command */
-static int prepare_zip_command(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD #0 */
- /* History gather */
- zip_cmd->s.hg = 0;
- /* compression enable = 1 for deflate */
- zip_cmd->s.ce = 1;
- /* sf (sync flush) */
- zip_cmd->s.sf = 1;
- /* ef (end of file) */
- if (zip_ops->flush == ZIP_FLUSH_FINISH) {
- zip_cmd->s.ef = 1;
- zip_cmd->s.sf = 0;
- }
-
- zip_cmd->s.cc = zip_ops->ccode;
- /* ss (compression speed/storage) */
- zip_cmd->s.ss = zip_ops->speed;
-
- /* IWORD #1 */
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
- zip_cmd->s.historylength = zip_ops->history_len;
- zip_cmd->s.dg = 0;
-
- /* IWORD # 6 and 7 - compression input/history pointer */
- zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
- zip_ops->history_len);
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
- /* maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- return 0;
-}
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepares zip command based on the input parameters */
- prepare_zip_command(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
- /* Loads zip command into command queues and rings door bell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Stats update for compression requests submitted */
- atomic64_inc(&zip_dev->stats.comp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Stats update for compression requests completed */
- atomic64_inc(&zip_dev->stats.comp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip instruction not yet completed");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip instruction completed successfully");
- zip_update_cmd_bufs(zip_dev, queue);
- break;
-
- case ZIP_CMD_DTRUNC:
- zip_dbg("Output Truncate error");
- /* Returning ZIP_ERROR to avoid copy to user */
- return ZIP_ERROR;
-
- default:
- zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
- return ZIP_ERROR;
- }
-
- /* Update the CRC depending on the format */
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine, need to feed it again */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Unknown Format:%d\n", zip_ops->format);
- }
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.comp_out_bytes);
-
- /* Update output_len */
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- /* Dynamic stop && strm->output_len < zipconstants[onfsize] */
- zip_err("output_len (%d) < total bytes written(%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
-
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h
deleted file mode 100644
index 1d32e76edc4d..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEFLATE_H__
-#define __ZIP_DEFLATE_H__
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine by ringing the doorbell.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
deleted file mode 100644
index f174ec29ed69..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/**
- * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
- *
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- *
- * Return: Bytes consumed in the command queue buffer.
- */
-static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
-{
- return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
- sizeof(u64 *));
-}
-
-/**
- * zip_load_instr - Submits the instruction into the ZIP command queue
- * @instr: Pointer to the instruction to be submitted
- * @zip_dev: Pointer to ZIP device structure to which the instruction is to
- * be submitted
- *
- * This function copies the ZIP instruction to the command queue and rings the
- * doorbell to notify the engine of the instruction submission. The command
- * queue is maintained in a circular fashion. When there is space for exactly
- * one instruction in the queue, next chunk pointer of the queue is made to
- * point to the head of the queue, thus maintaining a circular queue.
- *
- * Return: Queue number to which the instruction was submitted
- */
-u32 zip_load_instr(union zip_inst_s *instr,
- struct zip_device *zip_dev)
-{
- union zip_quex_doorbell dbell;
- u32 queue = 0;
- u32 consumed = 0;
- u64 *ncb_ptr = NULL;
- union zip_nptr_s ncp;
-
- /*
- * Distribute the instructions between the enabled queues based on
- * the CPU id.
- */
- if (raw_smp_processor_id() % 2 == 0)
- queue = 0;
- else
- queue = 1;
-
- zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue);
-
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /*
- * Command Queue implementation
- * 1. If there is place for new instructions, push the cmd at sw_head.
- * 2. If there is place for exactly one instruction, push the new cmd
- * at the sw_head. Make sw_head point to the sw_tail to make it
- * circular. Write sw_head's physical address to the "Next-Chunk
- * Buffer Ptr" to make it cmd_hw_tail.
- * 3. Ring the door bell.
- */
- zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
- zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
-
- consumed = zip_cmd_queue_consumed(zip_dev, queue);
- /* Check if there is space to push just one cmd */
- if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
- zip_dbg("Cmd queue space available for single command");
- /* Space for one cmd, pust it and make it circular queue */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
- ncb_ptr = zip_dev->iq[queue].sw_head;
-
- zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
- ncb_ptr, zip_dev->iq[queue].sw_head - 16);
-
- /* Using Circular command queue */
- zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
- /* Mark this buffer for free */
- zip_dev->iq[queue].free_flag = 1;
-
- /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
- ncp.u_reg64 = 0ull;
- ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
- *ncb_ptr = ncp.u_reg64;
- zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
- *ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
-
- zip_dev->iq[queue].pend_cnt++;
-
- } else {
- zip_dbg("Enough space is available for commands");
- /* Push this cmd to cmd queue buffer */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- zip_dev->iq[queue].pend_cnt++;
- }
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
-
- zip_dbg(" Pushed the new cmd : pend_cnt : %d",
- zip_dev->iq[queue].pend_cnt);
-
- /* Ring the doorbell */
- dbell.u_reg64 = 0ull;
- dbell.s.dbell_cnt = 1;
- zip_reg_write(dbell.u_reg64,
- (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
-
- /* Unlock cmd buffer lock */
- spin_unlock(&zip_dev->iq[queue].lock);
-
- return queue;
-}
-
-/**
- * zip_update_cmd_bufs - Updates the queue statistics after posting the
- * instruction
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- */
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
-{
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /* Check if the previous buffer can be freed */
- if (zip_dev->iq[queue].free_flag == 1) {
- zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
- /* Reset the free flag */
- zip_dev->iq[queue].free_flag = 0;
-
- /* Point the hw_tail to start of the new chunk buffer */
- zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
- } else {
- zip_dbg("Free flag not set. increment hw tail");
- zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
- }
-
- zip_dev->iq[queue].done_cnt++;
- zip_dev->iq[queue].pend_cnt--;
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
- zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
-
- spin_unlock(&zip_dev->iq[queue].lock);
-}
diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h
deleted file mode 100644
index 9e18b3b93d38..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEVICE_H__
-#define __ZIP_DEVICE_H__
-
-#include <linux/types.h>
-#include "zip_main.h"
-
-struct sg_info {
- /*
- * Pointer to the input data when scatter_gather == 0 and
- * pointer to the input gather list buffer when scatter_gather == 1
- */
- union zip_zptr_s *gather;
-
- /*
- * Pointer to the output data when scatter_gather == 0 and
- * pointer to the output scatter list buffer when scatter_gather == 1
- */
- union zip_zptr_s *scatter;
-
- /*
- * Holds size of the output buffer pointed by scatter list
- * when scatter_gather == 1
- */
- u64 scatter_buf_size;
-
- /* for gather data */
- u64 gather_enable;
-
- /* for scatter data */
- u64 scatter_enable;
-
- /* Number of gather list pointers for gather data */
- u32 gbuf_cnt;
-
- /* Number of scatter list pointers for scatter data */
- u32 sbuf_cnt;
-
- /* Buffers allocation state */
- u8 alloc_state;
-};
-
-/**
- * struct zip_state - Structure representing the required information related
- * to a command
- * @zip_cmd: Pointer to zip instruction structure
- * @result: Pointer to zip result structure
- * @ctx: Context pointer for inflate
- * @history: Decompression history pointer
- * @sginfo: Scatter-gather info structure
- */
-struct zip_state {
- union zip_inst_s zip_cmd;
- union zip_zres_s result;
- union zip_zptr_s *ctx;
- union zip_zptr_s *history;
- struct sg_info sginfo;
-};
-
-#define ZIP_CONTEXT_SIZE 2048
-#define ZIP_INFLATE_HISTORY_SIZE 32768
-#define ZIP_DEFLATE_HISTORY_SIZE 32768
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
deleted file mode 100644
index 7e0d73e2f89e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_inflate.h"
-
-static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD#0 */
-
- /* Decompression History Gather list - no gather list */
- zip_cmd->s.hg = 0;
- /* For decompression, CE must be 0x0. */
- zip_cmd->s.ce = 0;
- /* For decompression, SS must be 0x0. */
- zip_cmd->s.ss = 0;
- /* For decompression, SF should always be set. */
- zip_cmd->s.sf = 1;
-
- /* Begin File */
- if (zip_ops->begin_file == 0)
- zip_cmd->s.bf = 0;
- else
- zip_cmd->s.bf = 1;
-
- zip_cmd->s.ef = 1;
- /* 0: for Deflate decompression, 3: for LZS decompression */
- zip_cmd->s.cc = zip_ops->ccode;
-
- /* IWORD #1*/
-
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
-
- /*
- * HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
- * History data is added to a decompression operation via IWORD3.
- */
- zip_cmd->s.historylength = 0;
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
-
- /* Maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- zip_dbg("Data Direct Input case ");
-
- /* IWORD # 6 and 7 - input pointer */
- zip_cmd->s.dg = 0;
- zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
-
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- /* Returning 0 for time being.*/
- return 0;
-}
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepare inflate zip command */
- prepare_inflate_zcmd(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
-
- /* Load inflate command to zip queue and ring the doorbell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Decompression requests submitted stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Decompression requests completed stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip Instruction not yet completed\n");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip Instruction completed successfully\n");
- break;
-
- case ZIP_CMD_DYNAMIC_STOP:
- zip_dbg(" Dynamic stop Initiated\n");
- break;
-
- default:
- zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
- atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
- zip_update_cmd_bufs(zip_dev, queue);
- return ZIP_ERROR;
- }
-
- zip_update_cmd_bufs(zip_dev, queue);
-
- if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
- (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
- result_ptr->s.ef = 1;
-
- zip_ops->csum = result_ptr->s.adler32;
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.decomp_out_bytes);
-
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- zip_err("output_len (%d) < total bytes written (%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- zip_ops->bytes_read = result_ptr->s.totalbytesread;
- zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
- zip_ops->end_file = result_ptr->s.ef;
- if (zip_ops->end_file) {
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Format error:%d\n", zip_ops->format);
- }
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h
deleted file mode 100644
index 6b20f179978e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_INFLATE_H__
-#define __ZIP_INFLATE_H__
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
deleted file mode 100644
index abd58de4343d..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_crypto.h"
-
-#define DRV_NAME "ThunderX-ZIP"
-
-static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
-
-static const struct pci_device_id zip_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
- { 0, }
-};
-
-static void zip_debugfs_init(void);
-static void zip_debugfs_exit(void);
-static int zip_register_compression_device(void);
-static void zip_unregister_compression_device(void);
-
-void zip_reg_write(u64 val, u64 __iomem *addr)
-{
- writeq(val, addr);
-}
-
-u64 zip_reg_read(u64 __iomem *addr)
-{
- return readq(addr);
-}
-
-/*
- * Allocates new ZIP device structure
- * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
- */
-static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
-{
- struct zip_device *zip = NULL;
- int idx;
-
- for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
- if (!zip_dev[idx])
- break;
- }
-
- /* To ensure that the index is within the limit */
- if (idx < MAX_ZIP_DEVICES)
- zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
-
- if (!zip)
- return NULL;
-
- zip_dev[idx] = zip;
- zip->index = idx;
- return zip;
-}
-
-/**
- * zip_get_device - Get ZIP device based on node id of cpu
- *
- * @node: Node id of the current cpu
- * Return: Pointer to Zip device structure
- */
-struct zip_device *zip_get_device(int node)
-{
- if ((node < MAX_ZIP_DEVICES) && (node >= 0))
- return zip_dev[node];
-
- zip_err("ZIP device not found for node id %d\n", node);
- return NULL;
-}
-
-/**
- * zip_get_node_id - Get the node id of the current cpu
- *
- * Return: Node id of the current cpu
- */
-int zip_get_node_id(void)
-{
- return cpu_to_node(raw_smp_processor_id());
-}
-
-/* Initializes the ZIP h/w sub-system */
-static int zip_init_hw(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
- union zip_constants constants;
- union zip_que_ena que_ena;
- union zip_quex_map que_map;
- union zip_que_pri que_pri;
-
- union zip_quex_sbuf_addr que_sbuf_addr;
- union zip_quex_sbuf_ctl que_sbuf_ctl;
-
- int q = 0;
-
- /* Enable the ZIP Engine(Core) Clock */
- cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
- cmd_ctl.s.forceclk = 1;
- zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
-
- zip_msg("ZIP_CMD_CTL : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
-
- constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
- zip->depth = constants.s.depth;
- zip->onfsize = constants.s.onfsize;
- zip->ctxsize = constants.s.ctxsize;
-
- zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
- zip->depth, zip->onfsize, zip->ctxsize);
-
- /*
- * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
- * have the correct buffer pointer and size configured for each
- * instruction queue.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_sbuf_ctl.u_reg64 = 0ull;
- que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
- que_sbuf_ctl.s.inst_be = 0;
- que_sbuf_ctl.s.stream_id = 0;
- zip_reg_write(que_sbuf_ctl.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
-
- zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
- }
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
-
- spin_lock_init(&zip->iq[q].lock);
-
- if (zip_cmd_qbuf_alloc(zip, q)) {
- while (q != 0) {
- q--;
- zip_cmd_qbuf_free(zip, q);
- }
- return -ENOMEM;
- }
-
- /* Initialize tail ptr to head */
- zip->iq[q].sw_tail = zip->iq[q].sw_head;
- zip->iq[q].hw_tail = zip->iq[q].sw_head;
-
- /* Write the physical addr to register */
- que_sbuf_addr.u_reg64 = 0ull;
- que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
- ZIP_128B_ALIGN);
-
- zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
- (u64)que_sbuf_addr.s.ptr);
-
- zip_reg_write(que_sbuf_addr.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip->iq[q].sw_head, zip->iq[q].sw_tail,
- zip->iq[q].hw_tail);
- zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
- }
-
- /*
- * Queue-to-ZIP core mapping
- * If a queue is not mapped to a particular core, it is equivalent to
- * the ZIP core being disabled.
- */
- que_ena.u_reg64 = 0x0ull;
- /* Enabling queues based on ZIP_NUM_QUEUES */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_ena.s.ena |= (0x1 << q);
- zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
-
- zip_msg("QUE_ENA : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_map.u_reg64 = 0ull;
- /* Mapping each queue to two ZIP cores */
- que_map.s.zce = 0x3;
- zip_reg_write(que_map.u_reg64,
- (zip->reg_base + ZIP_QUEX_MAP(q)));
-
- zip_msg("QUE_MAP(%d) : 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
- }
-
- que_pri.u_reg64 = 0ull;
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
- zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
-
- zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
-
- return 0;
-}
-
-static void zip_reset(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
-
- cmd_ctl.u_reg64 = 0x0ull;
- cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
- zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
-}
-
-static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct device *dev = &pdev->dev;
- struct zip_device *zip = NULL;
- int err;
-
- zip = zip_alloc_device(pdev);
- if (!zip)
- return -ENOMEM;
-
- dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
- pdev->vendor, pdev->device, dev_to_node(dev));
-
- pci_set_drvdata(pdev, zip);
- zip->pdev = pdev;
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device");
- goto err_free_device;
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
- dev_err(dev, "PCI request regions failed 0x%x", err);
- goto err_disable_device;
- }
-
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
- goto err_release_regions;
- }
-
- /* MAP configuration registers */
- zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
- if (!zip->reg_base) {
- dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
- err = -ENOMEM;
- goto err_release_regions;
- }
-
- /* Initialize ZIP Hardware */
- err = zip_init_hw(zip);
- if (err)
- goto err_release_regions;
-
- /* Register with the Kernel Crypto Interface */
- err = zip_register_compression_device();
- if (err < 0) {
- zip_err("ZIP: Kernel Crypto Registration failed\n");
- goto err_register;
- }
-
- /* comp-decomp statistics are handled with debugfs interface */
- zip_debugfs_init();
-
- return 0;
-
-err_register:
- zip_reset(zip);
-
-err_release_regions:
- if (zip->reg_base)
- iounmap(zip->reg_base);
- pci_release_regions(pdev);
-
-err_disable_device:
- pci_disable_device(pdev);
-
-err_free_device:
- pci_set_drvdata(pdev, NULL);
-
- /* Remove zip_dev from zip_device list, free the zip_device memory */
- zip_dev[zip->index] = NULL;
- devm_kfree(dev, zip);
-
- return err;
-}
-
-static void zip_remove(struct pci_dev *pdev)
-{
- struct zip_device *zip = pci_get_drvdata(pdev);
- int q = 0;
-
- if (!zip)
- return;
-
- zip_debugfs_exit();
-
- zip_unregister_compression_device();
-
- if (zip->reg_base) {
- zip_reset(zip);
- iounmap(zip->reg_base);
- }
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
- /*
- * Free Command Queue buffers. This free should be called for all
- * the enabled Queues.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- zip_cmd_qbuf_free(zip, q);
-
- pci_set_drvdata(pdev, NULL);
- /* remove zip device from zip device list */
- zip_dev[zip->index] = NULL;
-}
-
-/* PCI Sub-System Interface */
-static struct pci_driver zip_driver = {
- .name = DRV_NAME,
- .id_table = zip_id_table,
- .probe = zip_probe,
- .remove = zip_remove,
-};
-
-/* Kernel Crypto Subsystem Interface */
-
-static struct scomp_alg zip_scomp_deflate = {
- .alloc_ctx = zip_alloc_scomp_ctx_deflate,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static struct scomp_alg zip_scomp_lzs = {
- .alloc_ctx = zip_alloc_scomp_ctx_lzs,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "lzs",
- .cra_driver_name = "lzs-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static int zip_register_compression_device(void)
-{
- int ret;
-
- ret = crypto_register_scomp(&zip_scomp_deflate);
- if (ret < 0) {
- zip_err("Deflate scomp algorithm registration failed\n");
- return ret;
- }
-
- ret = crypto_register_scomp(&zip_scomp_lzs);
- if (ret < 0) {
- zip_err("LZS scomp algorithm registration failed\n");
- goto err_unregister_scomp_deflate;
- }
-
- return ret;
-
-err_unregister_scomp_deflate:
- crypto_unregister_scomp(&zip_scomp_deflate);
-
- return ret;
-}
-
-static void zip_unregister_compression_device(void)
-{
- crypto_unregister_scomp(&zip_scomp_deflate);
- crypto_unregister_scomp(&zip_scomp_lzs);
-}
-
-/*
- * debugfs functions
- */
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-
-/* Displays ZIP device statistics */
-static int zip_stats_show(struct seq_file *s, void *unused)
-{
- u64 val = 0ull;
- u64 avg_chunk = 0ull, avg_cr = 0ull;
- u32 q = 0;
-
- int index = 0;
- struct zip_device *zip;
- struct zip_stats *st;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- u64 pending = 0;
-
- if (zip_dev[index]) {
- zip = zip_dev[index];
- st = &zip->stats;
-
- /* Get all the pending requests */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- val = zip_reg_read((zip->reg_base +
- ZIP_DBG_QUEX_STA(q)));
- pending += val >> 32 & 0xffffff;
- }
-
- val = atomic64_read(&st->comp_req_complete);
- avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
-
- val = atomic64_read(&st->comp_out_bytes);
- avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
- seq_printf(s, " ZIP Device %d Stats\n"
- "-----------------------------------\n"
- "Comp Req Submitted : \t%lld\n"
- "Comp Req Completed : \t%lld\n"
- "Compress In Bytes : \t%lld\n"
- "Compressed Out Bytes : \t%lld\n"
- "Average Chunk size : \t%llu\n"
- "Average Compression ratio : \t%llu\n"
- "Decomp Req Submitted : \t%lld\n"
- "Decomp Req Completed : \t%lld\n"
- "Decompress In Bytes : \t%lld\n"
- "Decompressed Out Bytes : \t%lld\n"
- "Decompress Bad requests : \t%lld\n"
- "Pending Req : \t%lld\n"
- "---------------------------------\n",
- index,
- (u64)atomic64_read(&st->comp_req_submit),
- (u64)atomic64_read(&st->comp_req_complete),
- (u64)atomic64_read(&st->comp_in_bytes),
- (u64)atomic64_read(&st->comp_out_bytes),
- avg_chunk,
- avg_cr,
- (u64)atomic64_read(&st->decomp_req_submit),
- (u64)atomic64_read(&st->decomp_req_complete),
- (u64)atomic64_read(&st->decomp_in_bytes),
- (u64)atomic64_read(&st->decomp_out_bytes),
- (u64)atomic64_read(&st->decomp_bad_reqs),
- pending);
- }
- }
- return 0;
-}
-
-/* Clears stats data */
-static int zip_clear_show(struct seq_file *s, void *unused)
-{
- int index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- memset(&zip_dev[index]->stats, 0,
- sizeof(struct zip_stats));
- seq_printf(s, "Cleared stats for zip %d\n", index);
- }
- }
-
- return 0;
-}
-
-static struct zip_registers zipregs[64] = {
- {"ZIP_CMD_CTL ", 0x0000ull},
- {"ZIP_THROTTLE ", 0x0010ull},
- {"ZIP_CONSTANTS ", 0x00A0ull},
- {"ZIP_QUE0_MAP ", 0x1400ull},
- {"ZIP_QUE1_MAP ", 0x1408ull},
- {"ZIP_QUE_ENA ", 0x0500ull},
- {"ZIP_QUE_PRI ", 0x0508ull},
- {"ZIP_QUE0_DONE ", 0x2000ull},
- {"ZIP_QUE1_DONE ", 0x2008ull},
- {"ZIP_QUE0_DOORBELL ", 0x4000ull},
- {"ZIP_QUE1_DOORBELL ", 0x4008ull},
- {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
- {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
- {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
- {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
- { NULL, 0}
-};
-
-/* Prints registers' contents */
-static int zip_regs_show(struct seq_file *s, void *unused)
-{
- u64 val = 0;
- int i = 0, index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- seq_printf(s, "--------------------------------\n"
- " ZIP Device %d Registers\n"
- "--------------------------------\n",
- index);
-
- i = 0;
-
- while (zipregs[i].reg_name) {
- val = zip_reg_read((zip_dev[index]->reg_base +
- zipregs[i].reg_offset));
- seq_printf(s, "%s: 0x%016llx\n",
- zipregs[i].reg_name, val);
- i++;
- }
- }
- }
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(zip_stats);
-DEFINE_SHOW_ATTRIBUTE(zip_clear);
-DEFINE_SHOW_ATTRIBUTE(zip_regs);
-
-/* Root directory for thunderx_zip debugfs entry */
-static struct dentry *zip_debugfs_root;
-
-static void zip_debugfs_init(void)
-{
- if (!debugfs_initialized())
- return;
-
- zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
-
- /* Creating files for entries inside thunderx_zip directory */
- debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
- &zip_stats_fops);
-
- debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
- &zip_clear_fops);
-
- debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
- &zip_regs_fops);
-
-}
-
-static void zip_debugfs_exit(void)
-{
- debugfs_remove_recursive(zip_debugfs_root);
-}
-
-#else
-static void __init zip_debugfs_init(void) { }
-static void __exit zip_debugfs_exit(void) { }
-#endif
-/* debugfs - end */
-
-module_pci_driver(zip_driver);
-
-MODULE_AUTHOR("Cavium Inc");
-MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, zip_id_table);
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
deleted file mode 100644
index e1e4fa92ce80..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MAIN_H__
-#define __ZIP_MAIN_H__
-
-#include "zip_device.h"
-#include "zip_regs.h"
-
-/* PCI device IDs */
-#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A
-
-/* ZIP device BARs */
-#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */
-
-/* Maximum available zip queues */
-#define ZIP_MAX_NUM_QUEUES 8
-
-#define ZIP_128B_ALIGN 7
-
-/* Command queue buffer size */
-#define ZIP_CMD_QBUF_SIZE (8064 + 8)
-
-struct zip_registers {
- char *reg_name;
- u64 reg_offset;
-};
-
-/* ZIP Compression - Decompression stats */
-struct zip_stats {
- atomic64_t comp_req_submit;
- atomic64_t comp_req_complete;
- atomic64_t decomp_req_submit;
- atomic64_t decomp_req_complete;
- atomic64_t comp_in_bytes;
- atomic64_t comp_out_bytes;
- atomic64_t decomp_in_bytes;
- atomic64_t decomp_out_bytes;
- atomic64_t decomp_bad_reqs;
-};
-
-/* ZIP Instruction Queue */
-struct zip_iq {
- u64 *sw_head;
- u64 *sw_tail;
- u64 *hw_tail;
- u64 done_cnt;
- u64 pend_cnt;
- u64 free_flag;
-
- /* ZIP IQ lock */
- spinlock_t lock;
-};
-
-/* ZIP Device */
-struct zip_device {
- u32 index;
- void __iomem *reg_base;
- struct pci_dev *pdev;
-
- /* Different ZIP Constants */
- u64 depth;
- u64 onfsize;
- u64 ctxsize;
-
- struct zip_iq iq[ZIP_MAX_NUM_QUEUES];
- struct zip_stats stats;
-};
-
-/* Prototypes */
-struct zip_device *zip_get_device(int node_id);
-int zip_get_node_id(void);
-void zip_reg_write(u64 val, u64 __iomem *addr);
-u64 zip_reg_read(u64 __iomem *addr);
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
-u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
-
-#endif /* ZIP_MAIN_H */
diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c
deleted file mode 100644
index b3e0843a9169..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-
-#include "common.h"
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, -ENOMEM otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
-{
- zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(ZIP_CMD_QBUF_SIZE));
-
- if (!zip->iq[q].sw_head)
- return -ENOMEM;
-
- memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
-
- zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
- return 0;
-}
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue number to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q)
-{
- zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
-
- free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
-}
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size)
-{
- u8 *ptr;
-
- ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(size));
-
- if (!ptr)
- return NULL;
-
- memset(ptr, 0, size);
-
- zip_dbg("Data buffer allocation success\n");
- return ptr;
-}
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size)
-{
- zip_dbg("Freeing data buffer 0x%lx\n", ptr);
-
- free_pages((u64)ptr, get_order(size));
-}
diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h
deleted file mode 100644
index f8f2f08c4a5c..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MEM_H__
-#define __ZIP_MEM_H__
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue nmber to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q);
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, 1 otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size);
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size);
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
deleted file mode 100644
index 874e0236c87e..000000000000
--- a/drivers/crypto/cavium/zip/zip_regs.h
+++ /dev/null
@@ -1,1347 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_REGS_H__
-#define __ZIP_REGS_H__
-
-/*
- * Configuration and status register (CSR) address and type definitions for
- * Cavium ZIP.
- */
-
-#include <linux/kern_levels.h>
-
-/* ZIP invocation result completion status codes */
-#define ZIP_CMD_NOTDONE 0x0
-
-/* Successful completion. */
-#define ZIP_CMD_SUCCESS 0x1
-
-/* Output truncated */
-#define ZIP_CMD_DTRUNC 0x2
-
-/* Dynamic Stop */
-#define ZIP_CMD_DYNAMIC_STOP 0x3
-
-/* Uncompress ran out of input data when IWORD0[EF] was set */
-#define ZIP_CMD_ITRUNC 0x4
-
-/* Uncompress found the reserved block type 3 */
-#define ZIP_CMD_RBLOCK 0x5
-
-/*
- * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input.
- */
-#define ZIP_CMD_NLEN 0x6
-
-/* Uncompress found a bad code in the main Huffman codes. */
-#define ZIP_CMD_BADCODE 0x7
-
-/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */
-#define ZIP_CMD_BADCODE2 0x8
-
-/* Compress found a zero-length input. */
-#define ZIP_CMD_ZERO_LEN 0x9
-
-/* The compress or decompress encountered an internal parity error. */
-#define ZIP_CMD_PARITY 0xA
-
-/*
- * Uncompress found a string identifier that precedes the uncompressed data and
- * decompression history.
- */
-#define ZIP_CMD_FATAL 0xB
-
-/**
- * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X
- * interrupt vectors.
- */
-enum zip_int_vec_e {
- ZIP_INT_VEC_E_ECCE = 0x10,
- ZIP_INT_VEC_E_FIFE = 0x11,
- ZIP_INT_VEC_E_QUE0_DONE = 0x0,
- ZIP_INT_VEC_E_QUE0_ERR = 0x8,
- ZIP_INT_VEC_E_QUE1_DONE = 0x1,
- ZIP_INT_VEC_E_QUE1_ERR = 0x9,
- ZIP_INT_VEC_E_QUE2_DONE = 0x2,
- ZIP_INT_VEC_E_QUE2_ERR = 0xa,
- ZIP_INT_VEC_E_QUE3_DONE = 0x3,
- ZIP_INT_VEC_E_QUE3_ERR = 0xb,
- ZIP_INT_VEC_E_QUE4_DONE = 0x4,
- ZIP_INT_VEC_E_QUE4_ERR = 0xc,
- ZIP_INT_VEC_E_QUE5_DONE = 0x5,
- ZIP_INT_VEC_E_QUE5_ERR = 0xd,
- ZIP_INT_VEC_E_QUE6_DONE = 0x6,
- ZIP_INT_VEC_E_QUE6_ERR = 0xe,
- ZIP_INT_VEC_E_QUE7_DONE = 0x7,
- ZIP_INT_VEC_E_QUE7_ERR = 0xf,
- ZIP_INT_VEC_E_ENUM_LAST = 0x12,
-};
-
-/**
- * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_addr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-
-};
-
-/**
- * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_ctl_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_inst_s - ZIP Instruction Structure.
- * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within
- * the structure).
- */
-union zip_inst_s {
- u64 u_reg64[16];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1;
- u64 reserved_56_62 : 7;
- u64 totaloutputlength : 24;
- u64 reserved_27_31 : 5;
- u64 exn : 3;
- u64 reserved_23_23 : 1;
- u64 exbits : 7;
- u64 reserved_12_15 : 4;
- u64 sf : 1;
- u64 ss : 2;
- u64 cc : 2;
- u64 ef : 1;
- u64 bf : 1;
- u64 ce : 1;
- u64 reserved_3_3 : 1;
- u64 ds : 1;
- u64 dg : 1;
- u64 hg : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 hg : 1;
- u64 dg : 1;
- u64 ds : 1;
- u64 reserved_3_3 : 1;
- u64 ce : 1;
- u64 bf : 1;
- u64 ef : 1;
- u64 cc : 2;
- u64 ss : 2;
- u64 sf : 1;
- u64 reserved_12_15 : 4;
- u64 exbits : 7;
- u64 reserved_23_23 : 1;
- u64 exn : 3;
- u64 reserved_27_31 : 5;
- u64 totaloutputlength : 24;
- u64 reserved_56_62 : 7;
- u64 doneint : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 historylength : 16;
- u64 reserved_96_111 : 16;
- u64 adlercrc32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adlercrc32 : 32;
- u64 reserved_96_111 : 16;
- u64 historylength : 16;
-#endif
- union zip_zptr_addr_s ctx_ptr_addr;
- union zip_zptr_ctl_s ctx_ptr_ctl;
- union zip_zptr_addr_s his_ptr_addr;
- union zip_zptr_ctl_s his_ptr_ctl;
- union zip_zptr_addr_s inp_ptr_addr;
- union zip_zptr_ctl_s inp_ptr_ctl;
- union zip_zptr_addr_s out_ptr_addr;
- union zip_zptr_ctl_s out_ptr_ctl;
- union zip_zptr_addr_s res_ptr_addr;
- union zip_zptr_ctl_s res_ptr_ctl;
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_817_831 : 15;
- u64 wq_ptr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 wq_ptr : 49;
- u64 reserved_817_831 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_882_895 : 14;
- u64 tt : 2;
- u64 reserved_874_879 : 6;
- u64 grp : 10;
- u64 tag : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 tag : 32;
- u64 grp : 10;
- u64 reserved_874_879 : 6;
- u64 tt : 2;
- u64 reserved_882_895 : 14;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#endif
- } s;
-};
-
-/**
- * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR)
- * Structure
- *
- * ZIP_NPTR structure is used to chain all the zip instruction buffers
- * together. ZIP instruction buffers are managed (allocated and released) by
- * the software.
- */
-union zip_nptr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-/**
- * union zip_zptr_s - ZIP Generic Pointer Structure.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_s {
- u64 u_reg64[2];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_zres_s - ZIP Result Structure
- *
- * The ZIP coprocessor writes the result structure after it completes the
- * invocation. The result structure is exactly 24 bytes, and each invocation of
- * the ZIP coprocessor produces exactly one result structure.
- */
-union zip_zres_s {
- u64 u_reg64[3];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 crc32 : 32;
- u64 adler32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adler32 : 32;
- u64 crc32 : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbyteswritten : 32;
- u64 totalbytesread : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 totalbytesread : 32;
- u64 totalbyteswritten : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbitsprocessed : 32;
- u64 doneint : 1;
- u64 reserved_155_158 : 4;
- u64 exn : 3;
- u64 reserved_151_151 : 1;
- u64 exbits : 7;
- u64 reserved_137_143 : 7;
- u64 ef : 1;
-
- volatile u64 compcode : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-
- volatile u64 compcode : 8;
- u64 ef : 1;
- u64 reserved_137_143 : 7;
- u64 exbits : 7;
- u64 reserved_151_151 : 1;
- u64 exn : 3;
- u64 reserved_155_158 : 4;
- u64 doneint : 1;
- u64 totalbitsprocessed : 32;
-#endif
- } s;
-};
-
-/**
- * union zip_cmd_ctl - Structure representing the register that controls
- * clock and reset.
- */
-union zip_cmd_ctl {
- u64 u_reg64;
- struct zip_cmd_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 forceclk : 1;
- u64 reset : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reset : 1;
- u64 forceclk : 1;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-#define ZIP_CMD_CTL 0x0ull
-
-/**
- * union zip_constants - Data structure representing the register that contains
- * all of the current implementation-related parameters of the zip core in this
- * chip.
- */
-union zip_constants {
- u64 u_reg64;
- struct zip_constants_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 nexec : 8;
- u64 reserved_49_55 : 7;
- u64 syncflush_capable : 1;
- u64 depth : 16;
- u64 onfsize : 12;
- u64 ctxsize : 12;
- u64 reserved_1_7 : 7;
- u64 disabled : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 disabled : 1;
- u64 reserved_1_7 : 7;
- u64 ctxsize : 12;
- u64 onfsize : 12;
- u64 depth : 16;
- u64 syncflush_capable : 1;
- u64 reserved_49_55 : 7;
- u64 nexec : 8;
-#endif
- } s;
-};
-
-#define ZIP_CONSTANTS 0x00A0ull
-
-/**
- * union zip_corex_bist_status - Represents registers which have the BIST
- * status of memories in zip cores.
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_corex_bist_status {
- u64 u_reg64;
- struct zip_corex_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_53_63 : 11;
- u64 bstatus : 53;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 53;
- u64 reserved_53_63 : 11;
-#endif
- } s;
-};
-
-static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
-{
- if (param1 <= 1)
- return 0x0520ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ctl_bist_status - Represents register that has the BIST status of
- * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data
- * buffer, output data buffers).
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_ctl_bist_status {
- u64 u_reg64;
- struct zip_ctl_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_9_63 : 55;
- u64 bstatus : 9;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 9;
- u64 reserved_9_63 : 55;
-#endif
- } s;
-};
-
-#define ZIP_CTL_BIST_STATUS 0x0510ull
-
-/**
- * union zip_ctl_cfg - Represents the register that controls the behavior of
- * the ZIP DMA engines.
- *
- * It is recommended to keep default values for normal operation. Changing the
- * values of the fields may be useful for diagnostics.
- */
-union zip_ctl_cfg {
- u64 u_reg64;
- struct zip_ctl_cfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_52_63 : 12;
- u64 ildf : 4;
- u64 reserved_36_47 : 12;
- u64 drtf : 4;
- u64 reserved_27_31 : 5;
- u64 stcf : 3;
- u64 reserved_19_23 : 5;
- u64 ldf : 3;
- u64 reserved_2_15 : 14;
- u64 busy : 1;
- u64 reserved_0_0 : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_0_0 : 1;
- u64 busy : 1;
- u64 reserved_2_15 : 14;
- u64 ldf : 3;
- u64 reserved_19_23 : 5;
- u64 stcf : 3;
- u64 reserved_27_31 : 5;
- u64 drtf : 4;
- u64 reserved_36_47 : 12;
- u64 ildf : 4;
- u64 reserved_52_63 : 12;
-#endif
- } s;
-};
-
-#define ZIP_CTL_CFG 0x0560ull
-
-/**
- * union zip_dbg_corex_inst - Represents the registers that reflect the status
- * of the current instruction that the ZIP core is executing or has executed.
- *
- * These registers are only for debug use.
- */
-union zip_dbg_corex_inst {
- u64 u_reg64;
- struct zip_dbg_corex_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_35_62 : 28;
- u64 qid : 3;
- u64 iid : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iid : 32;
- u64 qid : 3;
- u64 reserved_35_62 : 28;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_INST(u64 param1)
-{
- if (param1 <= 1)
- return 0x0640ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_corex_sta - Represents registers that reflect the status of
- * the zip cores.
- *
- * They are for debug use only.
- */
-union zip_dbg_corex_sta {
- u64 u_reg64;
- struct zip_dbg_corex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_37_62 : 26;
- u64 ist : 5;
- u64 nie : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nie : 32;
- u64 ist : 5;
- u64 reserved_37_62 : 26;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_STA(u64 param1)
-{
- if (param1 <= 1)
- return 0x0680ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_quex_sta - Represets registers that reflect status of the zip
- * instruction queues.
- *
- * They are for debug use only.
- */
-union zip_dbg_quex_sta {
- u64 u_reg64;
- struct zip_dbg_quex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_56_62 : 7;
- u64 rqwc : 24;
- u64 nii : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nii : 32;
- u64 rqwc : 24;
- u64 reserved_56_62 : 7;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
-{
- if (param1 <= 7)
- return 0x1800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ecc_ctl - Represents the register that enables ECC for each
- * individual internal memory that requires ECC.
- *
- * For debug purpose, it can also flip one or two bits in the ECC data.
- */
-union zip_ecc_ctl {
- u64 u_reg64;
- struct zip_ecc_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_19_63 : 45;
- u64 vmem_cdis : 1;
- u64 vmem_fs : 2;
- u64 reserved_15_15 : 1;
- u64 idf1_cdis : 1;
- u64 idf1_fs : 2;
- u64 reserved_11_11 : 1;
- u64 idf0_cdis : 1;
- u64 idf0_fs : 2;
- u64 reserved_7_7 : 1;
- u64 gspf_cdis : 1;
- u64 gspf_fs : 2;
- u64 reserved_3_3 : 1;
- u64 iqf_cdis : 1;
- u64 iqf_fs : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iqf_fs : 2;
- u64 iqf_cdis : 1;
- u64 reserved_3_3 : 1;
- u64 gspf_fs : 2;
- u64 gspf_cdis : 1;
- u64 reserved_7_7 : 1;
- u64 idf0_fs : 2;
- u64 idf0_cdis : 1;
- u64 reserved_11_11 : 1;
- u64 idf1_fs : 2;
- u64 idf1_cdis : 1;
- u64 reserved_15_15 : 1;
- u64 vmem_fs : 2;
- u64 vmem_cdis : 1;
- u64 reserved_19_63 : 45;
-#endif
- } s;
-};
-
-#define ZIP_ECC_CTL 0x0568ull
-
-/* NCB - zip_ecce_ena_w1c */
-union zip_ecce_ena_w1c {
- u64 u_reg64;
- struct zip_ecce_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1C 0x0598ull
-
-/* NCB - zip_ecce_ena_w1s */
-union zip_ecce_ena_w1s {
- u64 u_reg64;
- struct zip_ecce_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1S 0x0590ull
-
-/**
- * union zip_ecce_int - Represents the register that contains the status of the
- * ECC interrupt sources.
- */
-union zip_ecce_int {
- u64 u_reg64;
- struct zip_ecce_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT 0x0580ull
-
-/* NCB - zip_ecce_int_w1s */
-union zip_ecce_int_w1s {
- u64 u_reg64;
- struct zip_ecce_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT_W1S 0x0588ull
-
-/* NCB - zip_fife_ena_w1c */
-union zip_fife_ena_w1c {
- u64 u_reg64;
- struct zip_fife_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1C 0x0090ull
-
-/* NCB - zip_fife_ena_w1s */
-union zip_fife_ena_w1s {
- u64 u_reg64;
- struct zip_fife_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1S 0x0088ull
-
-/* NCB - zip_fife_int */
-union zip_fife_int {
- u64 u_reg64;
- struct zip_fife_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT 0x0078ull
-
-/* NCB - zip_fife_int_w1s */
-union zip_fife_int_w1s {
- u64 u_reg64;
- struct zip_fife_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT_W1S 0x0080ull
-
-/**
- * union zip_msix_pbax - Represents the register that is the MSI-X PBA table
- *
- * The bit number is indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_pbax {
- u64 u_reg64;
- struct zip_msix_pbax_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 pend : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pend : 64;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_PBAX(u64 param1)
-{
- if (param1 == 0)
- return 0x0000838000FF0000ull;
- pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_addr {
- u64 u_reg64;
- struct zip_msix_vecx_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 47;
- u64 reserved_1_1 : 1;
- u64 secvec : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 secvec : 1;
- u64 reserved_1_1 : 1;
- u64 addr : 47;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_ctl {
- u64 u_reg64;
- struct zip_msix_vecx_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_33_63 : 31;
- u64 mask : 1;
- u64 reserved_20_31 : 12;
- u64 data : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data : 20;
- u64 reserved_20_31 : 12;
- u64 mask : 1;
- u64 reserved_33_63 : 31;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done - Represents the registers that contain the per-queue
- * instruction done count.
- */
-union zip_quex_done {
- u64 u_reg64;
- struct zip_quex_done_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE(u64 param1)
-{
- if (param1 <= 7)
- return 0x2000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ack - Represents the registers on write to which will
- * decrement the per-queue instructiona done count.
- */
-union zip_quex_done_ack {
- u64 u_reg64;
- struct zip_quex_done_ack_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done_ack : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ack : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
-{
- if (param1 <= 7)
- return 0x2200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1c - Represents the register which when written
- * 1 to will disable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1c {
- u64 u_reg64;
- struct zip_quex_done_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x2600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1s - Represents the register that when written 1 to
- * will enable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1s {
- u64 u_reg64;
- struct zip_quex_done_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x2400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_wait - Represents the register that specifies the per
- * queue interrupt coalescing settings.
- */
-union zip_quex_done_wait {
- u64 u_reg64;
- struct zip_quex_done_wait_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_48_63 : 16;
- u64 time_wait : 16;
- u64 reserved_20_31 : 12;
- u64 num_wait : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 num_wait : 20;
- u64 reserved_20_31 : 12;
- u64 time_wait : 16;
- u64 reserved_48_63 : 16;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
-{
- if (param1 <= 7)
- return 0x2800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_doorbell - Represents doorbell registers for the ZIP
- * instruction queues.
- */
-union zip_quex_doorbell {
- u64 u_reg64;
- struct zip_quex_doorbell_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 dbell_cnt : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dbell_cnt : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
-{
- if (param1 <= 7)
- return 0x4000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1c {
- u64 u_reg64;
- struct zip_quex_err_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x3600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1s {
- u64 u_reg64;
- struct zip_quex_err_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_err_int - Represents registers that contain the per-queue
- * error interrupts.
- */
-union zip_quex_err_int {
- u64 u_reg64;
- struct zip_quex_err_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
-{
- if (param1 <= 7)
- return 0x3000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
- return 0;
-}
-
-/* NCB - zip_que#_err_int_w1s */
-union zip_quex_err_int_w1s {
- u64 u_reg64;
- struct zip_quex_err_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_gcfg - Represents the registers that reflect status of the
- * zip instruction queues,debug use only.
- */
-union zip_quex_gcfg {
- u64 u_reg64;
- struct zip_quex_gcfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_4_63 : 60;
- u64 iqb_ldwb : 1;
- u64 cbw_sty : 1;
- u64 l2ld_cmd : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 l2ld_cmd : 2;
- u64 cbw_sty : 1;
- u64 iqb_ldwb : 1;
- u64 reserved_4_63 : 60;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_GCFG(u64 param1)
-{
- if (param1 <= 7)
- return 0x1A00ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_map - Represents the registers that control how each
- * instruction queue maps to zip cores.
- */
-union zip_quex_map {
- u64 u_reg64;
- struct zip_quex_map_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 zce : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 zce : 2;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_MAP(u64 param1)
-{
- if (param1 <= 7)
- return 0x1400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_MAP: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_addr - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed after SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_CTL.
- */
-union zip_quex_sbuf_addr {
- u64 u_reg64;
- struct zip_quex_sbuf_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 ptr : 42;
- u64 off : 7;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 off : 7;
- u64 ptr : 42;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
-{
- if (param1 <= 7)
- return 0x1000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_ctl - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed before SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_ADDR.
- */
-union zip_quex_sbuf_ctl {
- u64 u_reg64;
- struct zip_quex_sbuf_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_45_63 : 19;
- u64 size : 13;
- u64 inst_be : 1;
- u64 reserved_24_30 : 7;
- u64 stream_id : 8;
- u64 reserved_12_15 : 4;
- u64 aura : 12;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 aura : 12;
- u64 reserved_12_15 : 4;
- u64 stream_id : 8;
- u64 reserved_24_30 : 7;
- u64 inst_be : 1;
- u64 size : 13;
- u64 reserved_45_63 : 19;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
-{
- if (param1 <= 7)
- return 0x1200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_que_ena - Represents queue enable register
- *
- * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue.
- */
-union zip_que_ena {
- u64 u_reg64;
- struct zip_que_ena_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 ena : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ena : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_ENA 0x0500ull
-
-/**
- * union zip_que_pri - Represents the register that defines the priority
- * between instruction queues.
- */
-union zip_que_pri {
- u64 u_reg64;
- struct zip_que_pri_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 pri : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pri : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_PRI 0x0508ull
-
-/**
- * union zip_throttle - Represents the register that controls the maximum
- * number of in-flight X2I data fetch transactions.
- *
- * Writing 0 to this register causes the ZIP module to temporarily suspend NCB
- * accesses; it is not recommended for normal operation, but may be useful for
- * diagnostics.
- */
-union zip_throttle {
- u64 u_reg64;
- struct zip_throttle_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_6_63 : 58;
- u64 ld_infl : 6;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ld_infl : 6;
- u64 reserved_6_63 : 58;
-#endif
- } s;
-};
-
-#define ZIP_THROTTLE 0x0010ull
-
-#endif /* _CSRS_ZIP__ */
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index d11daaf47f06..685d42ec7ade 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -7,15 +7,16 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index afae30adb703..91b1189c47de 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -7,14 +7,15 @@
* Author: Gary R Hook <ghook@amd.com>
*/
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/des.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index ecd58b38c46e..bc90aba5162a 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -7,14 +7,17 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/ccp.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/ccp.h>
+#include <linux/module.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/akcipher.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index cb8e99936abb..109b5aef4034 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -8,13 +8,14 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/utils.h>
#include <linux/ccp.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include "ccp-dev.h"
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 2e87ca0e292a..3451bada884e 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -33,6 +33,7 @@
#include <asm/cacheflush.h>
#include <asm/e820/types.h>
#include <asm/sev.h>
+#include <asm/msr.h>
#include "psp-dev.h"
#include "sev-dev.h"
@@ -109,6 +110,15 @@ static void *sev_init_ex_buffer;
*/
static struct sev_data_range_list *snp_range_list;
+static void __sev_firmware_shutdown(struct sev_device *sev, bool panic);
+
+static int snp_shutdown_on_panic(struct notifier_block *nb,
+ unsigned long reason, void *arg);
+
+static struct notifier_block snp_panic_notifier = {
+ .notifier_call = snp_shutdown_on_panic,
+};
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -1060,7 +1070,7 @@ static inline int __sev_do_init_locked(int *psp_ret)
static void snp_set_hsave_pa(void *arg)
{
- wrmsrl(MSR_VM_HSAVE_PA, 0);
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
}
static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
@@ -1112,7 +1122,7 @@ static int __sev_snp_init_locked(int *error)
if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR);
- return 0;
+ return -EOPNOTSUPP;
}
/* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
@@ -1176,21 +1186,34 @@ static int __sev_snp_init_locked(int *error)
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(cmd, arg, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n",
+ cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT",
+ rc, *error);
return rc;
+ }
/* Prepare for first SNP guest launch after INIT. */
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n",
+ rc, *error);
return rc;
+ }
sev->snp_initialized = true;
dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &snp_panic_notifier);
+
sev_es_tmr_size = SNP_TMR_SIZE;
- return rc;
+ return 0;
}
static void __sev_platform_init_handle_tmr(struct sev_device *sev)
@@ -1287,16 +1310,22 @@ static int __sev_platform_init_locked(int *error)
if (error)
*error = psp_ret;
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n",
+ sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc);
return rc;
+ }
sev->state = SEV_STATE_INIT;
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
+ *error, rc);
return rc;
+ }
dev_dbg(sev->dev, "SEV firmware initialized\n");
@@ -1319,19 +1348,9 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
if (sev->state == SEV_STATE_INIT)
return 0;
- /*
- * Legacy guests cannot be running while SNP_INIT(_EX) is executing,
- * so perform SEV-SNP initialization at probe time.
- */
rc = __sev_snp_init_locked(&args->error);
- if (rc && rc != -ENODEV) {
- /*
- * Don't abort the probe if SNP INIT failed,
- * continue to initialize the legacy SEV firmware.
- */
- dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n",
- rc, args->error);
- }
+ if (rc && rc != -ENODEV)
+ return rc;
/* Defer legacy SEV/SEV-ES support if allowed by caller/module. */
if (args->probe && !psp_init_on_probe)
@@ -1367,8 +1386,11 @@ static int __sev_platform_shutdown_locked(int *error)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
+ if (ret) {
+ dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n",
+ *error, ret);
return ret;
+ }
sev->state = SEV_STATE_UNINIT;
dev_dbg(sev->dev, "SEV firmware shutdown\n");
@@ -1389,6 +1411,37 @@ static int sev_get_platform_state(int *state, int *error)
return rc;
}
+static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ struct sev_platform_init_args init_args = {0};
+ int rc;
+
+ rc = _sev_platform_init_locked(&init_args);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
+static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ int error, rc;
+
+ rc = __sev_snp_init_locked(&error);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
{
int state, rc;
@@ -1441,24 +1494,31 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
int rc;
if (!writable)
return -EPERM;
if (sev->state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
+ rc = sev_move_to_init_state(argp, &shutdown_required);
if (rc)
return rc;
}
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+ rc = __sev_do_cmd_locked(cmd, NULL, &argp->error);
+
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
+ return rc;
}
static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
+ bool shutdown_required = false;
struct sev_data_pek_csr data;
void __user *input_address;
void *blob = NULL;
@@ -1490,7 +1550,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
cmd:
if (sev->state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_blob;
}
@@ -1511,6 +1571,9 @@ cmd:
}
e_free_blob:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(blob);
return ret;
}
@@ -1682,9 +1745,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error);
/* SHUTDOWN may require DF_FLUSH */
if (*error == SEV_RET_DFFLUSH_REQUIRED) {
- ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL);
+ int dfflush_error = SEV_RET_NO_FW_CALL;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error);
if (ret) {
- dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n");
+ dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n",
+ ret, dfflush_error);
return ret;
}
/* reissue the shutdown command */
@@ -1692,7 +1758,8 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
error);
}
if (ret) {
- dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n");
+ dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n",
+ ret, *error);
return ret;
}
@@ -1718,6 +1785,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
+
+ /* Reset TMR size back to default */
+ sev_es_tmr_size = SEV_TMR_SIZE;
+
return ret;
}
@@ -1726,6 +1799,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
struct sev_data_pek_cert_import data;
+ bool shutdown_required = false;
void *pek_blob, *oca_blob;
int ret;
@@ -1756,7 +1830,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
/* If platform is not in INIT state then transition it to INIT */
if (sev->state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_oca;
}
@@ -1764,6 +1838,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
e_free_oca:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(oca_blob);
e_free_pek:
kfree(pek_blob);
@@ -1880,32 +1957,23 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
struct sev_data_pdh_cert_export data;
void __user *input_cert_chain_address;
void __user *input_pdh_cert_address;
+ bool shutdown_required = false;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (sev->state != SEV_STATE_INIT) {
- if (!writable)
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- return ret;
- }
-
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
memset(&data, 0, sizeof(data));
+ input_pdh_cert_address = (void __user *)input.pdh_cert_address;
+ input_cert_chain_address = (void __user *)input.cert_chain_address;
+
/* Userspace wants to query the certificate length. */
if (!input.pdh_cert_address ||
!input.pdh_cert_len ||
!input.cert_chain_address)
goto cmd;
- input_pdh_cert_address = (void __user *)input.pdh_cert_address;
- input_cert_chain_address = (void __user *)input.cert_chain_address;
-
/* Allocate a physically contiguous buffer to store the PDH blob. */
if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE)
return -EFAULT;
@@ -1931,6 +1999,17 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
data.cert_chain_len = input.cert_chain_len;
cmd:
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->state != SEV_STATE_INIT) {
+ if (!writable) {
+ ret = -EPERM;
+ goto e_free_cert;
+ }
+ ret = sev_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto e_free_cert;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
/* If we query the length, FW responded with expected data. */
@@ -1957,6 +2036,9 @@ cmd:
}
e_free_cert:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(cert_blob);
e_free_pdh:
kfree(pdh_blob);
@@ -1966,12 +2048,13 @@ e_free_pdh:
static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
struct sev_data_snp_addr buf;
struct page *status_page;
+ int ret, error;
void *data;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
status_page = alloc_page(GFP_KERNEL_ACCOUNT);
@@ -1980,6 +2063,12 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
data = page_address(status_page);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
/*
* Firmware expects status page to be in firmware-owned state, otherwise
* it will report firmware error code INVALID_PAGE_STATE (0x1A).
@@ -2008,6 +2097,9 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
ret = -EFAULT;
cleanup:
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
__free_pages(status_page, 0);
return ret;
}
@@ -2016,21 +2108,33 @@ static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_data_snp_commit buf;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized)
- return -EINVAL;
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
buf.len = sizeof(buf);
- return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_config config;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2039,17 +2143,29 @@ static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable
if (copy_from_user(&config, (void __user *)argp->data, sizeof(config)))
return -EFAULT;
- return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_vlek_load input;
+ bool shutdown_required = false;
+ int ret, error;
void *blob;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2068,8 +2184,18 @@ static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
input.vlek_wrapped_address = __psp_pa(blob);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error);
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+cleanup:
kfree(blob);
return ret;
@@ -2339,6 +2465,15 @@ static void sev_firmware_shutdown(struct sev_device *sev)
mutex_unlock(&sev_cmd_mutex);
}
+void sev_platform_shutdown(void)
+{
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ sev_firmware_shutdown(psp_master->sev_data);
+}
+EXPORT_SYMBOL_GPL(sev_platform_shutdown);
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
@@ -2373,10 +2508,6 @@ static int snp_shutdown_on_panic(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct notifier_block snp_panic_notifier = {
- .notifier_call = snp_shutdown_on_panic,
-};
-
int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
void *data, int *error)
{
@@ -2390,9 +2521,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- struct sev_platform_init_args args = {0};
u8 api_major, api_minor, build;
- int rc;
if (!sev)
return;
@@ -2415,18 +2544,6 @@ void sev_pci_init(void)
api_major, api_minor, build,
sev->api_major, sev->api_minor, sev->build);
- /* Initialize the platform */
- args.probe = true;
- rc = sev_platform_init(&args);
- if (rc)
- dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
- args.error, rc);
-
- dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ?
- "-SNP" : "", sev->api_major, sev->api_minor, sev->build);
-
- atomic_notifier_chain_register(&panic_notifier_list,
- &snp_panic_notifier);
return;
err:
@@ -2443,7 +2560,4 @@ void sev_pci_exit(void)
return;
sev_firmware_shutdown(sev);
-
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &snp_panic_notifier);
}
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 2ebc878da160..e1be2072d680 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -375,6 +375,7 @@ static const struct tee_vdata teev1 = {
static const struct tee_vdata teev2 = {
.ring_wptr_reg = 0x10950, /* C2PMSG_20 */
.ring_rptr_reg = 0x10954, /* C2PMSG_21 */
+ .info_reg = 0x109e8, /* C2PMSG_58 */
};
static const struct platform_access_vdata pa_v1 = {
@@ -440,6 +441,7 @@ static const struct psp_vdata pspv5 = {
.cmdresp_reg = 0x10944, /* C2PMSG_17 */
.cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
.cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
@@ -535,6 +537,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
+ { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d3f5d108b898..7c41f9593d03 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -862,7 +862,7 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
return -EINVAL;
}
- algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL);
if (!algs)
return -ENOMEM;
@@ -5224,7 +5224,7 @@ static int qm_pre_store_caps(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(qm_cap_query_info);
- qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
+ qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL);
if (!qm_cap)
return -ENOMEM;
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 1dc2378aa88b..e050f5ff5efb 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -491,8 +491,9 @@ static int img_hash_init(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -555,10 +556,10 @@ static int img_hash_update(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -570,9 +571,10 @@ static int img_hash_final(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -584,11 +586,12 @@ static int img_hash_finup(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
+
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -600,8 +603,9 @@ static int img_hash_import(struct ahash_request *req, const void *in)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -613,8 +617,9 @@ static int img_hash_export(struct ahash_request *req, void *out)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c
index df1b05ac5a57..ac13d90a2b7c 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-hash.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c
@@ -97,12 +97,20 @@ void eip93_hash_handle_result(struct crypto_async_request *async, int err)
static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
{
- u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 };
- u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 };
- u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
- u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 };
+ static const u32 sha256_init[] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+ };
+ static const u32 sha224_init[] = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7
+ };
+ static const u32 sha1_init[] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4
+ };
+ static const u32 md5_init[] = {
+ MD5_H0, MD5_H1, MD5_H2, MD5_H3
+ };
/* Init HASH constant */
switch (hash) {
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index f44c08f5f5ec..d2b632193beb 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -2043,7 +2043,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 09d9589f2d68..23f585219fb4 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -725,7 +725,7 @@ static int alloc_wq_table(int max_wqs)
for (cpu = 0; cpu < nr_cpus; cpu++) {
entry = per_cpu_ptr(wq_table, cpu);
- entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL);
+ entry->wqs = kcalloc(max_wqs, sizeof(*entry->wqs), GFP_KERNEL);
if (!entry->wqs) {
free_wq_table();
return -ENOMEM;
@@ -894,7 +894,7 @@ out:
static void rebalance_wq_table(void)
{
const struct cpumask *node_cpus;
- int node, cpu, iaa = -1;
+ int node_cpu, node, cpu, iaa = 0;
if (nr_iaa == 0)
return;
@@ -905,36 +905,29 @@ static void rebalance_wq_table(void)
clear_wq_table();
if (nr_iaa == 1) {
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- if (WARN_ON(wq_table_add_wqs(0, cpu))) {
- pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu);
- return;
- }
+ for_each_possible_cpu(cpu) {
+ if (WARN_ON(wq_table_add_wqs(0, cpu)))
+ goto err;
}
return;
}
for_each_node_with_cpus(node) {
+ cpu = 0;
node_cpus = cpumask_of_node(node);
- for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) {
- int node_cpu = cpumask_nth(cpu, node_cpus);
-
- if (WARN_ON(node_cpu >= nr_cpu_ids)) {
- pr_debug("node_cpu %d doesn't exist!\n", node_cpu);
- return;
- }
-
- if ((cpu % cpus_per_iaa) == 0)
- iaa++;
-
- if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) {
- pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
- return;
- }
+ for_each_cpu(node_cpu, node_cpus) {
+ iaa = cpu / cpus_per_iaa;
+ if (WARN_ON(wq_table_add_wqs(iaa, node_cpu)))
+ goto err;
+ cpu++;
}
}
+
+ return;
+err:
+ pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
}
static inline int check_completion(struct device *dev,
@@ -999,12 +992,9 @@ out:
static int deflate_generic_decompress(struct acomp_req *req)
{
- ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req));
+ ACOMP_FBREQ_ON_STACK(fbreq, req);
int ret;
- acomp_request_set_callback(fbreq, 0, NULL, NULL);
- acomp_request_set_params(fbreq, req->src, req->dst, req->slen,
- req->dlen);
ret = crypto_acomp_decompress(fbreq);
req->dlen = fbreq->dlen;
@@ -1020,8 +1010,7 @@ static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc);
+ dma_addr_t dst_addr, unsigned int *dlen);
static void iaa_desc_complete(struct idxd_desc *idxd_desc,
enum idxd_complete_type comp_type,
@@ -1087,10 +1076,10 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
if (ctx->compress && compression_ctx->verify_compress) {
+ u32 *compression_crc = acomp_request_ctx(ctx->req);
dma_addr_t src_addr, dst_addr;
- u32 compression_crc;
- compression_crc = idxd_desc->iax_completion->crc;
+ *compression_crc = idxd_desc->iax_completion->crc;
ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
if (ret) {
@@ -1100,8 +1089,7 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
- ctx->req->slen, dst_addr, &ctx->req->dlen,
- compression_crc);
+ ctx->req->slen, dst_addr, &ctx->req->dlen);
if (ret) {
dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
err = -EIO;
@@ -1130,11 +1118,11 @@ out:
static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 *compression_crc)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1187,8 +1175,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: compression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1282,11 +1269,11 @@ out:
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1346,10 +1333,10 @@ static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
goto err;
}
- if (compression_crc != idxd_desc->iax_completion->crc) {
+ if (*compression_crc != idxd_desc->iax_completion->crc) {
ret = -EINVAL;
dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:"
- " comp=0x%x, decomp=0x%x\n", compression_crc,
+ " comp=0x%x, decomp=0x%x\n", *compression_crc,
idxd_desc->iax_completion->crc);
print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET,
8, 1, idxd_desc->iax_completion, 64, 0);
@@ -1369,8 +1356,7 @@ err:
static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- bool disable_async)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1412,7 +1398,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src1_size = slen;
desc->completion_addr = idxd_desc->compl_dma;
- if (ctx->use_irq && !disable_async) {
+ if (ctx->use_irq) {
desc->flags |= IDXD_OP_FLAG_RCI;
idxd_desc->crypto.req = req;
@@ -1425,8 +1411,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode && !disable_async)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: decompression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1446,7 +1431,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
update_total_decomp_calls();
update_wq_decomp_calls(wq);
- if (ctx->async_mode && !disable_async) {
+ if (ctx->async_mode) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
@@ -1474,7 +1459,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
*dlen = req->dlen;
- if (!ctx->async_mode || disable_async)
+ if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
/* Update stats */
@@ -1496,7 +1481,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
dma_addr_t src_addr, dst_addr;
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
- u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
@@ -1557,7 +1541,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
- &req->dlen, &compression_crc);
+ &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1569,7 +1553,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
}
ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, compression_crc);
+ dst_addr, &req->dlen);
if (ret)
dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret);
@@ -1655,7 +1639,7 @@ static int iaa_comp_adecompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, false);
+ dst_addr, &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1699,6 +1683,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = {
.cra_driver_name = "deflate-iaa",
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct iaa_compression_ctx),
+ .cra_reqsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_priority = IAA_ALG_PRIORITY,
}
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 02fb8abe4e6e..359c61f0c8a1 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -70,6 +70,18 @@ config CRYPTO_DEV_QAT_420XX
To compile this as a module, choose M here: the module
will be called qat_420xx.
+config CRYPTO_DEV_QAT_6XXX
+ tristate "Support for Intel(R) QuickAssist Technology QAT_6XXX"
+ depends on (X86 || COMPILE_TEST)
+ depends on PCI
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) QuickAssist Technology QAT_6xxx
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_6xxx.
+
config CRYPTO_DEV_QAT_DH895xCCVF
tristate "Support for Intel(R) DH895xCC Virtual Function"
depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
index 235b69f4f3f7..abef14207afa 100644
--- a/drivers/crypto/intel/qat/Makefile
+++ b/drivers/crypto/intel/qat/Makefile
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+subdir-ccflags-y := -I$(src)/qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
index 72b24b1804cf..f6df54d2993e 100644
--- a/drivers/crypto/intel/qat/qat_420xx/Makefile
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
qat_420xx-y := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 4feeef83f7a3..7c3c0f561c95 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -9,15 +9,14 @@
#include <adf_common_drv.h>
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
-#include <adf_gen4_dc.h>
#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include <adf_gen4_ras.h>
-#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
#include <adf_gen4_vf_mig.h>
+#include <adf_timer.h>
#include "adf_420xx_hw_data.h"
#include "icp_qat_hw.h"
@@ -93,7 +92,6 @@ static const struct adf_fw_config adf_fw_dcc_config[] = {
static struct adf_hw_device_class adf_420xx_class = {
.name = ADF_420XX_DEVICE_NAME,
.type = DEV_420XX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
@@ -469,8 +467,8 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
- hw_data->start_timer = adf_gen4_timer_start;
- hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
index 8084aa0f7f41..cfa00daeb4fb 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -14,7 +14,7 @@
#include "adf_420xx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_420XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -186,11 +186,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_420XX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
index e8480bb80dee..188b611445e6 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 4eb6ef99efdd..bd0b1b1015c0 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -9,15 +9,14 @@
#include <adf_common_drv.h>
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
-#include <adf_gen4_dc.h>
#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
-#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
#include <adf_gen4_vf_mig.h>
+#include <adf_timer.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -96,7 +95,6 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
@@ -422,13 +420,13 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->num_rps = ADF_GEN4_MAX_RPS;
switch (dev_id) {
- case ADF_402XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
hw_data->get_ena_thd_mask = get_ena_thd_mask;
break;
- case ADF_401XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
hw_data->fw_name = ADF_4XXX_FW;
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->uof_get_name = uof_get_name_4xxx;
@@ -455,8 +453,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
- hw_data->start_timer = adf_gen4_timer_start;
- hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 5537a9991e4e..c9be5dcddb27 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -14,9 +14,9 @@
#include "adf_4xxx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_4XXX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_401XX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_402XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -188,11 +188,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_4XXX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_6xxx/Makefile b/drivers/crypto/intel/qat/qat_6xxx/Makefile
new file mode 100644
index 000000000000..4b4de67cb0c2
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx.o
+qat_6xxx-y := adf_drv.o adf_6xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
new file mode 100644
index 000000000000..359a6447ccb8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <adf_accel_devices.h>
+#include <adf_admin.h>
+#include <adf_cfg.h>
+#include <adf_cfg_services.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen6_pm.h>
+#include <adf_gen6_ras.h>
+#include <adf_gen6_shared.h>
+#include <adf_timer.h>
+#include "adf_6xxx_hw_data.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_51_comp.h"
+
+#define RP_GROUP_0_MASK (BIT(0) | BIT(2))
+#define RP_GROUP_1_MASK (BIT(1) | BIT(3))
+#define RP_GROUP_ALL_MASK (RP_GROUP_0_MASK | RP_GROUP_1_MASK)
+
+#define ADF_AE_GROUP_0 GENMASK(3, 0)
+#define ADF_AE_GROUP_1 GENMASK(7, 4)
+#define ADF_AE_GROUP_2 BIT(8)
+
+struct adf_ring_config {
+ u32 ring_mask;
+ enum adf_cfg_service_type ring_type;
+ const unsigned long *thrd_mask;
+};
+
+static u32 rmask_two_services[] = {
+ RP_GROUP_0_MASK,
+ RP_GROUP_1_MASK,
+};
+
+enum adf_gen6_rps {
+ RP0 = 0,
+ RP1 = 1,
+ RP2 = 2,
+ RP3 = 3,
+ RP_MAX = RP3
+};
+
+/*
+ * thrd_mask_[sym|asym|cpr|dcc]: these static arrays define the thread
+ * configuration for handling requests of specific services across the
+ * accelerator engines. Each element in an array corresponds to an
+ * accelerator engine, with the value being a bitmask that specifies which
+ * threads within that engine are capable of processing the particular service.
+ *
+ * For example, a value of 0x0C means that threads 2 and 3 are enabled for the
+ * service in the respective accelerator engine.
+ */
+static const unsigned long thrd_mask_sym[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x0C, 0x0C, 0x0C, 0x0C, 0x1C, 0x1C, 0x1C, 0x1C, 0x00
+};
+
+static const unsigned long thrd_mask_asym[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x70, 0x70, 0x70, 0x70, 0x60, 0x60, 0x60, 0x60, 0x00
+};
+
+static const unsigned long thrd_mask_cpr[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00
+};
+
+static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00
+};
+
+static const char *const adf_6xxx_fw_objs[] = {
+ [ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ,
+};
+
+static const struct adf_fw_config adf_default_fw_config[] = {
+ { ADF_AE_GROUP_1, ADF_FW_DC_OBJ },
+ { ADF_AE_GROUP_0, ADF_FW_CY_OBJ },
+ { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
+};
+
+static struct adf_hw_device_class adf_6xxx_class = {
+ .name = ADF_6XXX_DEVICE_NAME,
+ .type = DEV_6XXX,
+};
+
+static bool services_supported(unsigned long mask)
+{
+ int num_svc;
+
+ if (mask >= BIT(SVC_BASE_COUNT))
+ return false;
+
+ num_svc = hweight_long(mask);
+ switch (num_svc) {
+ case ADF_ONE_SERVICE:
+ return true;
+ case ADF_TWO_SERVICES:
+ case ADF_THREE_SERVICES:
+ return !test_bit(SVC_DCC, &mask);
+ default:
+ return false;
+ }
+}
+
+static int get_service(unsigned long *mask)
+{
+ if (test_and_clear_bit(SVC_ASYM, mask))
+ return SVC_ASYM;
+
+ if (test_and_clear_bit(SVC_SYM, mask))
+ return SVC_SYM;
+
+ if (test_and_clear_bit(SVC_DC, mask))
+ return SVC_DC;
+
+ if (test_and_clear_bit(SVC_DCC, mask))
+ return SVC_DCC;
+
+ return -EINVAL;
+}
+
+static enum adf_cfg_service_type get_ring_type(enum adf_services service)
+{
+ switch (service) {
+ case SVC_SYM:
+ return SYM;
+ case SVC_ASYM:
+ return ASYM;
+ case SVC_DC:
+ case SVC_DCC:
+ return COMP;
+ default:
+ return UNUSED;
+ }
+}
+
+static const unsigned long *get_thrd_mask(enum adf_services service)
+{
+ switch (service) {
+ case SVC_SYM:
+ return thrd_mask_sym;
+ case SVC_ASYM:
+ return thrd_mask_asym;
+ case SVC_DC:
+ return thrd_mask_cpr;
+ case SVC_DCC:
+ return thrd_mask_dcc;
+ default:
+ return NULL;
+ }
+}
+
+static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config *rp_config,
+ unsigned int *num_services)
+{
+ unsigned int i, nservices;
+ unsigned long mask;
+ int ret, service;
+
+ ret = adf_get_service_mask(accel_dev, &mask);
+ if (ret)
+ return ret;
+
+ nservices = hweight_long(mask);
+ if (nservices > MAX_NUM_CONCURR_SVC)
+ return -EINVAL;
+
+ for (i = 0; i < nservices; i++) {
+ service = get_service(&mask);
+ if (service < 0)
+ return service;
+
+ rp_config[i].ring_type = get_ring_type(service);
+ rp_config[i].thrd_mask = get_thrd_mask(service);
+
+ /*
+ * If there is only one service enabled, use all ring pairs for
+ * that service.
+ * If there are two services enabled, use ring pairs 0 and 2 for
+ * one service and ring pairs 1 and 3 for the other service.
+ */
+ switch (nservices) {
+ case ADF_ONE_SERVICE:
+ rp_config[i].ring_mask = RP_GROUP_ALL_MASK;
+ break;
+ case ADF_TWO_SERVICES:
+ rp_config[i].ring_mask = rmask_two_services[i];
+ break;
+ case ADF_THREE_SERVICES:
+ rp_config[i].ring_mask = BIT(i);
+
+ /* If ASYM is enabled, use additional ring pair */
+ if (service == SVC_ASYM)
+ rp_config[i].ring_mask |= BIT(RP3);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ *num_services = nservices;
+
+ return 0;
+}
+
+static u32 adf_gen6_get_arb_mask(struct adf_accel_dev *accel_dev, unsigned int ae)
+{
+ struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
+ unsigned int num_services, i, thrd;
+ u32 ring_mask, thd2arb_mask = 0;
+ const unsigned long *p_mask;
+
+ if (get_rp_config(accel_dev, rp_config, &num_services))
+ return 0;
+
+ /*
+ * The thd2arb_mask maps ring pairs to threads within an accelerator engine.
+ * It ensures that jobs submitted to ring pairs are scheduled on threads capable
+ * of handling the specified service type.
+ *
+ * Each group of 4 bits in the mask corresponds to a thread, with each bit
+ * indicating whether a job from a ring pair can be scheduled on that thread.
+ * The use of 4 bits is due to the organization of ring pairs into groups of
+ * four, where each group shares the same configuration.
+ */
+ for (i = 0; i < num_services; i++) {
+ p_mask = &rp_config[i].thrd_mask[ae];
+ ring_mask = rp_config[i].ring_mask;
+
+ for_each_set_bit(thrd, p_mask, ADF_NUM_THREADS_PER_AE)
+ thd2arb_mask |= ring_mask << (thrd * 4);
+ }
+
+ return thd2arb_mask;
+}
+
+static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ enum adf_cfg_service_type rps[ADF_GEN6_NUM_BANKS_PER_VF] = { };
+ struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
+ unsigned int num_services, rp_num, i;
+ unsigned long cfg_mask;
+ u16 ring_to_svc_map;
+
+ if (get_rp_config(accel_dev, rp_config, &num_services))
+ return 0;
+
+ /*
+ * Loop through the configured services and populate the `rps` array that
+ * contains what service that particular ring pair can handle (i.e. symmetric
+ * crypto, asymmetric crypto, data compression or compression chaining).
+ */
+ for (i = 0; i < num_services; i++) {
+ cfg_mask = rp_config[i].ring_mask;
+ for_each_set_bit(rp_num, &cfg_mask, ADF_GEN6_NUM_BANKS_PER_VF)
+ rps[rp_num] = rp_config[i].ring_type;
+ }
+
+ /*
+ * The ring_mask is structured into segments of 3 bits, with each
+ * segment representing the service configuration for a specific ring pair.
+ * Since ring pairs are organized into groups of 4, the ring_mask contains 4
+ * such 3-bit segments, each corresponding to one ring pair.
+ *
+ * The device has 64 ring pairs, which are organized in groups of 4, namely
+ * 16 groups. Each group has the same configuration, represented here by
+ * `ring_to_svc_map`.
+ */
+ ring_to_svc_map = rps[RP0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP2] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP3] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_ACCELERATORS_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return self ? hweight32(self->ae_mask) : 0;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_1;
+}
+
+static void get_arb_info(struct arb_info *arb_info)
+{
+ arb_info->arb_cfg = ADF_GEN6_ARB_CONFIG;
+ arb_info->arb_offset = ADF_GEN6_ARB_OFFSET;
+ arb_info->wt2sam_offset = ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET;
+}
+
+static void get_admin_info(struct admin_info *admin_csrs_info)
+{
+ admin_csrs_info->mailbox_offset = ADF_GEN6_MAILBOX_BASE_OFFSET;
+ admin_csrs_info->admin_msg_ur = ADF_GEN6_ADMINMSGUR_OFFSET;
+ admin_csrs_info->admin_msg_lr = ADF_GEN6_ADMINMSGLR_OFFSET;
+}
+
+static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_COUNTER_FREQ;
+}
+
+static void enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ /*
+ * Enable all error notification bits in errsou3 except VFLR
+ * notification on host.
+ */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_VFLNOTIFY);
+}
+
+static void enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+
+ /* Enable bundle interrupts */
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET, 0);
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+ /* Enable misc interrupts */
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_MASK_OFFSET, 0);
+}
+
+static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u64 val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u64 val = ADF_SSM_WDT_DEFAULT_VALUE;
+
+ /* Enable watchdog timer for sym and dc */
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTATHL_OFFSET, ADF_SSMWDTATHH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val);
+
+ /* Enable watchdog timer for pke */
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke);
+}
+
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_GEN6_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ unsigned int i;
+
+ for (i = 0; i <= ADF_GEN6_ETR_MAX_BANKS; i++)
+ ADF_CSR_WR(csr, ADF_GEN6_MSIX_RTTABLE_OFFSET(i), i);
+}
+
+static int reset_ring_pair(void __iomem *csr, u32 bank_number)
+{
+ u32 status;
+ int ret;
+
+ /*
+ * Write rpresetctl register BIT(0) as 1.
+ * Since rpresetctl registers have no RW fields, no need to preserve
+ * values for other bits. Just write directly.
+ */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_RESET);
+
+ /* Read rpresetsts register and wait for rp reset to complete */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US,
+ ADF_RPRESET_POLL_TIMEOUT_US, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+ if (ret)
+ return ret;
+
+ /* When ring pair reset is done, clear rpresetsts */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), ADF_WQM_CSR_RPRESETSTS_STATUS);
+
+ return 0;
+}
+
+static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number);
+
+ ret = reset_ring_pair(csr, bank_number);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "ring pair reset failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
+
+ return ret;
+}
+
+static int build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_hw_comp_51_config_csr_lower hw_comp_lower_csr = { };
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1;
+ lower_val = ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+static int build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = 0;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+static void adf_gen6_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = build_comp_block;
+ dc_ops->build_decomp_block = build_decomp_block;
+}
+
+static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u32 *thd2arb_map = hw_data->thd_to_arb_map;
+ unsigned int i;
+
+ for (i = 0; i < hw_data->num_engines; i++) {
+ thd2arb_map[i] = adf_gen6_get_arb_mask(accel_dev, i);
+ dev_dbg(&GET_DEV(accel_dev), "ME:%d arb_mask:%#x\n", i, thd2arb_map[i]);
+ }
+
+ return 0;
+}
+
+static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
+{
+ u32 value;
+
+ /*
+ * After each PF FLR, for each of the 64 ring pairs in the PF, the
+ * driver must program the ringmodectl CSRs.
+ */
+ value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number));
+ value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_MASK, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
+ value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_EN_MASK, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
+ ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value);
+}
+
+static int set_vc_config(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ u32 value;
+ int err;
+
+ /*
+ * After each PF FLR, the driver must program the Port Virtual Channel (VC)
+ * Control Registers.
+ * Read PVC0CTL then write the masked values.
+ */
+ pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value);
+ value |= FIELD_PREP(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
+ err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value);
+ if (err) {
+ dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n");
+ return pcibios_err_to_errno(err);
+ }
+
+ /* Read PVC1CTL then write masked values */
+ pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value);
+ value |= FIELD_PREP(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
+ value |= FIELD_PREP(ADF_GEN6_PVC1CTL_VCEN_MASK, ADF_GEN6_PVC1CTL_VCEN_ON);
+ err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value);
+ if (err)
+ dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n");
+
+ return pcibios_err_to_errno(err);
+}
+
+static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ u32 i;
+
+ for (i = 0; i < hw_data->num_banks; i++) {
+ dev_dbg(&GET_DEV(accel_dev), "set virtual channels for bank:%d\n", i);
+ set_vc_csr_for_bank(csr, i);
+ }
+
+ return set_vc_config(accel_dev);
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ unsigned long fuses = self->fuses[ADF_FUSECTL4];
+ u32 mask = ADF_6XXX_ACCELENGINES_MASK;
+
+ /*
+ * If bit 0 is set in the fuses, the first 4 engines are disabled.
+ * If bit 4 is set, the second group of 4 engines are disabled.
+ * If bit 8 is set, the admin engine (bit 8) is disabled.
+ */
+ if (test_bit(0, &fuses))
+ mask &= ~ADF_AE_GROUP_0;
+
+ if (test_bit(4, &fuses))
+ mask &= ~ADF_AE_GROUP_1;
+
+ if (test_bit(8, &fuses))
+ mask &= ~ADF_AE_GROUP_2;
+
+ return mask;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ u32 capabilities_sym, capabilities_asym;
+ u32 capabilities_dc;
+ unsigned long mask;
+ u32 caps = 0;
+ u32 fusectl1;
+
+ fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
+
+ /* Read accelerator capabilities mask */
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_AES_V2;
+
+ /* A set bit in fusectl1 means the corresponding feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_UCS_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ capabilities_asym = 0;
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_CPR_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ if (adf_get_service_mask(accel_dev, &mask))
+ return 0;
+
+ if (test_bit(SVC_ASYM, &mask))
+ caps |= capabilities_asym;
+ if (test_bit(SVC_SYM, &mask))
+ caps |= capabilities_sym;
+ if (test_bit(SVC_DC, &mask))
+ caps |= capabilities_dc;
+ if (test_bit(SVC_DCC, &mask)) {
+ /*
+ * Sym capabilities are available for chaining operations,
+ * but sym crypto instances cannot be supported
+ */
+ caps = capabilities_dc | capabilities_sym;
+ caps &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ }
+
+ return caps;
+}
+
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
+{
+ return ARRAY_SIZE(adf_default_fw_config);
+}
+
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs);
+ int id;
+
+ id = adf_default_fw_config[obj_num].obj;
+ if (id >= num_fw_objs)
+ return NULL;
+
+ return adf_6xxx_fw_objs[id];
+}
+
+static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ return uof_get_name(accel_dev, obj_num);
+}
+
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ return adf_default_fw_config[obj_num].obj;
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ return adf_default_fw_config[obj_num].ae_mask;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ if (adf_gen6_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Failed to generate thread to arbiter mapping");
+
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
+}
+
+static int adf_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u32 status;
+ u32 csr;
+ int ret;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_GEN6_ERRMSK2);
+ csr |= ADF_GEN6_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN6_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_GEN6_PM_INTERRUPT, ADF_GEN6_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_GEN6_PM_INIT_STATE,
+ ADF_GEN6_PM_POLL_DELAY_US,
+ ADF_GEN6_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN6_PM_STATUS);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+ return ret;
+ }
+
+ dev_dbg(&GET_DEV(accel_dev), "Setting virtual channels for device qat_dev%d\n",
+ accel_dev->accel_id);
+
+ ret = adf_gen6_set_vc(accel_dev);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to set virtual channels\n");
+
+ return ret;
+}
+
+static int enable_pm(struct adf_accel_dev *accel_dev)
+{
+ return adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+}
+
+static int dev_config(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ return ret;
+
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_DC:
+ case SVC_DCC:
+ ret = adf_gen6_comp_dev_config(accel_dev);
+ break;
+ default:
+ ret = adf_gen6_no_dev_config(accel_dev);
+ break;
+ }
+ if (ret)
+ return ret;
+
+ __set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+}
+
+void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &adf_6xxx_class;
+ hw_data->instance_id = adf_6xxx_class.instances++;
+ hw_data->num_banks = ADF_GEN6_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN6_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN6_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN6_MAX_ACCELERATORS;
+ hw_data->num_engines = ADF_6XXX_MAX_ACCELENGINES;
+ hw_data->num_logical_accel = 1;
+ hw_data->tx_rx_gap = ADF_GEN6_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN6_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = 0;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = enable_error_correction;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_arb_info = get_arb_info;
+ hw_data->get_admin_info = get_admin_info;
+ hw_data->get_accel_cap = get_accel_cap;
+ hw_data->get_sku = get_sku;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = enable_ints;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->admin_ae_mask = ADF_6XXX_ADMIN_AE_MASK;
+ hw_data->fw_name = ADF_6XXX_FW;
+ hw_data->fw_mmp_name = ADF_6XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_6xxx;
+ hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = set_ssm_wdtimer;
+ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = ring_pair_reset;
+ hw_data->dev_config = dev_config;
+ hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
+ hw_data->init_device = adf_init_device;
+ hw_data->enable_pm = enable_pm;
+ hw_data->services_supported = services_supported;
+
+ adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen6_init_dc_ops(&hw_data->dc_ops);
+ adf_gen6_init_ras_ops(&hw_data->ras_ops);
+}
+
+void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
+{
+ if (hw_data->dev_class->instances)
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
new file mode 100644
index 000000000000..78e2e2c5816e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_6XXX_HW_DATA_H_
+#define ADF_6XXX_HW_DATA_H_
+
+#include <linux/bits.h>
+#include <linux/time.h>
+#include <linux/units.h>
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_dc.h"
+
+/* PCIe configuration space */
+#define ADF_GEN6_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
+#define ADF_GEN6_SRAM_BAR 0
+#define ADF_GEN6_PMISC_BAR 1
+#define ADF_GEN6_ETR_BAR 2
+#define ADF_6XXX_MAX_ACCELENGINES 9
+
+/* Clocks frequency */
+#define ADF_GEN6_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
+/* Physical function fuses */
+#define ADF_GEN6_FUSECTL0_OFFSET 0x2C8
+#define ADF_GEN6_FUSECTL1_OFFSET 0x2CC
+#define ADF_GEN6_FUSECTL4_OFFSET 0x2D8
+
+/* Accelerators */
+#define ADF_GEN6_ACCELERATORS_MASK 0x1
+#define ADF_GEN6_MAX_ACCELERATORS 1
+
+/* MSI-X interrupt */
+#define ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET 0x41A040
+#define ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET 0x41A044
+#define ADF_GEN6_SMIAPF_MASK_OFFSET 0x41A084
+#define ADF_GEN6_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 4))
+
+/* Bank and ring configuration */
+#define ADF_GEN6_NUM_RINGS_PER_BANK 2
+#define ADF_GEN6_NUM_BANKS_PER_VF 4
+#define ADF_GEN6_ETR_MAX_BANKS 64
+#define ADF_GEN6_RX_RINGS_OFFSET 1
+#define ADF_GEN6_TX_RINGS_MASK 0x1
+
+/* Arbiter configuration */
+#define ADF_GEN6_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
+#define ADF_GEN6_ARB_OFFSET 0x000
+#define ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET 0x400
+
+/* Admin interface configuration */
+#define ADF_GEN6_ADMINMSGUR_OFFSET 0x500574
+#define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578
+#define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970
+
+/*
+ * Watchdog timers
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000ULL
+#define ADF_SSMWDTATHL_OFFSET 0x5208
+#define ADF_SSMWDTATHH_OFFSET 0x520C
+#define ADF_SSMWDTCNVL_OFFSET 0x5408
+#define ADF_SSMWDTCNVH_OFFSET 0x540C
+#define ADF_SSMWDTUCSL_OFFSET 0x5808
+#define ADF_SSMWDTUCSH_OFFSET 0x580C
+#define ADF_SSMWDTDCPRL_OFFSET 0x5A08
+#define ADF_SSMWDTDCPRH_OFFSET 0x5A0C
+#define ADF_SSMWDTPKEL_OFFSET 0x5E08
+#define ADF_SSMWDTPKEH_OFFSET 0x5E0C
+
+/* Ring reset */
+#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_RPRESET_POLL_DELAY_US 20
+#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + (bank) * 8)
+#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
+#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+
+/* Controls and sets up the corresponding ring mode of operation */
+#define ADF_GEN6_CSR_RINGMODECTL(bank) (0x9000 + (bank) * 4)
+
+/* Specifies the traffic class to use for the transactions to/from the ring */
+#define ADF_GEN6_RINGMODECTL_TC_MASK GENMASK(18, 16)
+#define ADF_GEN6_RINGMODECTL_TC_DEFAULT 0x7
+
+/* Specifies usage of tc for the transactions to/from this ring */
+#define ADF_GEN6_RINGMODECTL_TC_EN_MASK GENMASK(20, 19)
+
+/*
+ * Use the value programmed in the tc field for request descriptor
+ * and metadata read transactions
+ */
+#define ADF_GEN6_RINGMODECTL_TC_EN_OP1 0x1
+
+/* VC0 Resource Control Register */
+#define ADF_GEN6_PVC0CTL_OFFSET 0x204
+#define ADF_GEN6_PVC0CTL_TCVCMAP_OFFSET 1
+#define ADF_GEN6_PVC0CTL_TCVCMAP_MASK GENMASK(7, 1)
+#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x7F
+
+/* VC1 Resource Control Register */
+#define ADF_GEN6_PVC1CTL_OFFSET 0x210
+#define ADF_GEN6_PVC1CTL_TCVCMAP_OFFSET 1
+#define ADF_GEN6_PVC1CTL_TCVCMAP_MASK GENMASK(7, 1)
+#define ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT 0x40
+#define ADF_GEN6_PVC1CTL_VCEN_OFFSET 31
+#define ADF_GEN6_PVC1CTL_VCEN_MASK BIT(31)
+/* RW bit: 0x1 - enables a Virtual Channel, 0x0 - disables */
+#define ADF_GEN6_PVC1CTL_VCEN_ON 0x1
+
+/* Error source mask registers */
+#define ADF_GEN6_ERRMSK0 0x41A210
+#define ADF_GEN6_ERRMSK1 0x41A214
+#define ADF_GEN6_ERRMSK2 0x41A218
+#define ADF_GEN6_ERRMSK3 0x41A21C
+
+#define ADF_GEN6_VFLNOTIFY BIT(7)
+
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
+/* Physical function fuses */
+#define ADF_6XXX_ACCELENGINES_MASK GENMASK(8, 0)
+#define ADF_6XXX_ADMIN_AE_MASK GENMASK(8, 8)
+
+/* Firmware binaries */
+#define ADF_6XXX_FW "qat_6xxx.bin"
+#define ADF_6XXX_MMP "qat_6xxx_mmp.bin"
+#define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin"
+#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
+#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
+
+enum icp_qat_gen6_slice_mask {
+ ICP_ACCEL_GEN6_MASK_UCS_SLICE = BIT(0),
+ ICP_ACCEL_GEN6_MASK_AUTH_SLICE = BIT(1),
+ ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2),
+ ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3),
+ ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4),
+ ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6),
+};
+
+void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data);
+
+#endif /* ADF_6XXX_HW_DATA_H_ */
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
new file mode 100644
index 000000000000..c1dc9c56fdf5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
+
+#include "adf_gen6_shared.h"
+#include "adf_6xxx_hw_data.h"
+
+static int bar_map[] = {
+ 0, /* SRAM */
+ 2, /* PMISC */
+ 4, /* ETR */
+};
+
+static void adf_device_down(void *accel_dev)
+{
+ adf_dev_down(accel_dev);
+}
+
+static void adf_dbgfs_cleanup(void *accel_dev)
+{
+ adf_dbgfs_exit(accel_dev);
+}
+
+static void adf_cfg_device_remove(void *accel_dev)
+{
+ adf_cfg_dev_remove(accel_dev);
+}
+
+static void adf_cleanup_hw_data(void *accel_dev)
+{
+ struct adf_accel_dev *accel_device = accel_dev;
+
+ if (accel_device->hw_device) {
+ adf_clean_hw_data_6xxx(accel_device->hw_device);
+ accel_device->hw_device = NULL;
+ }
+}
+
+static void adf_devmgr_remove(void *accel_dev)
+{
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct adf_accel_dev *accel_dev;
+ struct adf_bar *bar;
+ unsigned int i;
+ int ret;
+
+ if (num_possible_nodes() > 1 && dev_to_node(dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ return dev_err_probe(dev, -EINVAL, "Invalid NUMA configuration.\n");
+ }
+
+ accel_dev = devm_kzalloc(dev, sizeof(*accel_dev), GFP_KERNEL);
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ INIT_LIST_HEAD(&accel_dev->list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+ accel_dev->owner = THIS_MODULE;
+
+ hw_data = devm_kzalloc(dev, sizeof(*hw_data), GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]);
+
+ if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE))
+ return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n");
+
+ /* Enable PCI device */
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable PCI device.\n");
+
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add new accelerator device.\n");
+
+ ret = devm_add_action_or_reset(dev, adf_devmgr_remove, accel_dev);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_6xxx(accel_dev->hw_device);
+
+ ret = devm_add_action_or_reset(dev, adf_cleanup_hw_data, accel_dev);
+ if (ret)
+ return ret;
+
+ /* Get Accelerators and Accelerator Engine masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* If the device has no acceleration engines then ignore it */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ (~hw_data->ae_mask & ADF_GEN6_ACCELERATORS_MASK)) {
+ ret = -EFAULT;
+ return dev_err_probe(dev, ret, "No acceleration units were found.\n");
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adf_cfg_device_remove, accel_dev);
+ if (ret)
+ return ret;
+
+ /* Set DMA identifier */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return dev_err_probe(dev, ret, "No usable DMA configuration.\n");
+
+ ret = adf_gen6_cfg_dev_init(accel_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize configuration.\n");
+
+ /* Get accelerator capability mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ ret = -EINVAL;
+ return dev_err_probe(dev, ret, "Failed to get capabilities mask.\n");
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bar_map); i++) {
+ bar = &accel_pci_dev->pci_bars[i];
+
+ /* Map 64-bit PCIe BAR */
+ bar->virt_addr = pcim_iomap_region(pdev, bar_map[i], pci_name(pdev));
+ if (IS_ERR(bar->virt_addr)) {
+ ret = PTR_ERR(bar->virt_addr);
+ return dev_err_probe(dev, ret, "Failed to ioremap PCI region.\n");
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * The PCI config space is saved at this point and will be restored
+ * after a Function Level Reset (FLR) as the FLR does not completely
+ * restore it.
+ */
+ ret = pci_save_state(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to save pci state.\n");
+
+ accel_dev->ras_errors.enabled = true;
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = devm_add_action_or_reset(dev, adf_dbgfs_cleanup, accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_sysfs_init(accel_dev);
+
+ return ret;
+}
+
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_6XXX) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_6XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_6XXX_FW);
+MODULE_FIRMWARE(ADF_6XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology for GEN6 Devices");
+MODULE_SOFTDEP("pre: crypto-intel_qat");
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
index d9e568572da8..43604c025f0c 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index e78f7bfd30b8..07f2c42a68f5 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -5,7 +5,6 @@
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c3xxx_class = {
.name = ADF_C3XXX_DEVICE_NAME,
.type = DEV_C3XXX,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index b825b35ab4bf..bceb5dd8b148 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c3xxx_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C3XXX_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C3XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
index 31a908a211ac..03f6745b4aa2 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index a512ca4efd3f..db3c33fa1881 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class c3xxxiov_class = {
.name = ADF_C3XXXVF_DEVICE_NAME,
.type = DEV_C3XXXVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
index cbdaaa135e84..f3d722bef088 100644
--- a/drivers/crypto/intel/qat/qat_c62x/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
qat_c62x-y := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 32ebe09477a8..0b410b41474d 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -5,7 +5,6 @@
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c62x_class = {
.name = ADF_C62X_DEVICE_NAME,
.type = DEV_C62X,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index 8a7bdec358d6..23ccb72b6ea2 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c62x_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C62X_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C62X_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
index 60e499b041ec..ed7f3f722d99 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 4aaaaf921734..7f00035d3661 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class c62xiov_class = {
.name = ADF_C62XVF_DEVICE_NAME,
.type = DEV_C62XVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index af5df29fd2e3..66bb295ace28 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -8,19 +8,19 @@ intel_qat-y := adf_accel_engine.o \
adf_cfg_services.o \
adf_clock.o \
adf_ctl_drv.o \
+ adf_dc.o \
adf_dev_mgr.o \
adf_gen2_config.o \
- adf_gen2_dc.o \
adf_gen2_hw_csr_data.o \
adf_gen2_hw_data.o \
adf_gen4_config.o \
- adf_gen4_dc.o \
adf_gen4_hw_csr_data.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
adf_gen4_ras.o \
- adf_gen4_timer.o \
adf_gen4_vf_mig.o \
+ adf_gen6_ras.o \
+ adf_gen6_shared.o \
adf_hw_arbiter.o \
adf_init.o \
adf_isr.o \
@@ -30,6 +30,7 @@ intel_qat-y := adf_accel_engine.o \
adf_sysfs.o \
adf_sysfs_ras_counters.o \
adf_sysfs_rl.o \
+ adf_timer.o \
adf_transport.o \
qat_algs.o \
qat_algs_send.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index dc21551153cb..2ee526063213 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -12,6 +12,7 @@
#include <linux/qat/qat_mig_dev.h>
#include <linux/wordpart.h>
#include "adf_cfg_common.h"
+#include "adf_dc.h"
#include "adf_rl.h"
#include "adf_telemetry.h"
#include "adf_pfvf_msg.h"
@@ -25,14 +26,18 @@
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_420XX_DEVICE_NAME "420xx"
-#define ADF_4XXX_PCI_DEVICE_ID 0x4940
-#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_401XX_PCI_DEVICE_ID 0x4942
-#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
-#define ADF_402XX_PCI_DEVICE_ID 0x4944
-#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
-#define ADF_420XX_PCI_DEVICE_ID 0x4946
-#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
+#define ADF_6XXX_DEVICE_NAME "6xxx"
+#define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940
+#define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941
+#define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942
+#define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943
+#define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944
+#define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945
+#define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946
+#define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947
+#define PCI_DEVICE_ID_INTEL_QAT_6XXX 0x4948
+#define PCI_DEVICE_ID_INTEL_QAT_6XXX_IOV 0x4949
+
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -267,7 +272,8 @@ struct adf_pfvf_ops {
};
struct adf_dc_ops {
- void (*build_deflate_ctx)(void *ctx);
+ int (*build_comp_block)(void *ctx, enum adf_dc_algo algo);
+ int (*build_decomp_block)(void *ctx, enum adf_dc_algo algo);
};
struct qat_migdev_ops {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index acad526eb741..573388c37100 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -449,6 +449,7 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+EXPORT_SYMBOL_GPL(adf_init_admin_pm);
int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr,
size_t buff_size)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 89df3888d7ea..15fdf9854b81 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -48,6 +48,7 @@ enum adf_device_type {
DEV_C3XXXVF,
DEV_4XXX,
DEV_420XX,
+ DEV_6XXX,
};
struct adf_dev_status_info {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index 30abcd9e1283..c39871291da7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -116,7 +116,7 @@ int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
return adf_service_mask_to_string(mask, out, out_len);
}
-static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask)
+int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
size_t len;
@@ -138,6 +138,7 @@ static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *
return ret;
}
+EXPORT_SYMBOL_GPL(adf_get_service_mask);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index f6bafc15cbc6..3742c450878f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -32,5 +32,6 @@ enum {
int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
size_t in_len, char *out, size_t out_len);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
+int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_dc.c
index 47261b1c1da6..3e8fb4e3ed97 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dc.c
@@ -1,22 +1,21 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation */
#include "adf_accel_devices.h"
-#include "adf_gen2_dc.h"
+#include "adf_dc.h"
#include "icp_qat_fw_comp.h"
-static void qat_comp_build_deflate_ctx(void *ctx)
+int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo)
{
- struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
+ struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ int ret;
memset(req_tmpl, 0, sizeof(*req_tmpl));
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
QAT_COMN_PTR_TYPE_SGL);
@@ -26,12 +25,14 @@ static void qat_comp_build_deflate_ctx(void *ctx)
ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
- cd_pars->u.sl.comp_slice_cfg_word[0] =
- ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
- ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
- ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
- ICP_QAT_HW_COMPRESSION_DEPTH_1,
- ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ /* Build HW config block for compression */
+ ret = GET_DC_OPS(accel_dev)->build_comp_block(ctx, algo);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to build compression block\n");
+ return ret;
+ }
+
req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
req_pars->req_par_flags =
@@ -45,26 +46,19 @@ static void qat_comp_build_deflate_ctx(void *ctx)
ICP_QAT_FW_COMP_NO_XXHASH_ACC,
ICP_QAT_FW_COMP_CNV_ERROR_NONE,
ICP_QAT_FW_COMP_NO_APPEND_CRC,
- ICP_QAT_FW_COMP_NO_DROP_DATA);
+ ICP_QAT_FW_COMP_NO_DROP_DATA,
+ ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS);
ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
/* Fill second half of the template for decompression */
memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
req_tmpl++;
- header = &req_tmpl->comn_hdr;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
- cd_pars = &req_tmpl->cd_pars;
- cd_pars->u.sl.comp_slice_cfg_word[0] =
- ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
- ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
- ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
- ICP_QAT_HW_COMPRESSION_DEPTH_1,
- ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
-}
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
- dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
+ /* Build HW config block for decompression */
+ ret = GET_DC_OPS(accel_dev)->build_decomp_block(req_tmpl, algo);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to build decompression block\n");
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dc.h b/drivers/crypto/intel/qat/qat_common/adf_dc.h
new file mode 100644
index 000000000000..6cb5e09054a6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_DC_H
+#define ADF_DC_H
+
+struct adf_accel_dev;
+
+enum adf_dc_algo {
+ QAT_DEFLATE,
+ QAT_LZ4,
+ QAT_LZ4S,
+ QAT_ZSTD,
+};
+
+int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo);
+
+#endif /* ADF_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
index 4f86696800c9..78957fa900b7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
@@ -8,6 +8,7 @@ enum adf_fw_objs {
ADF_FW_ASYM_OBJ,
ADF_FW_DC_OBJ,
ADF_FW_ADMIN_OBJ,
+ ADF_FW_CY_OBJ,
};
struct adf_fw_config {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
deleted file mode 100644
index 6eae023354d7..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN2_DC_H
-#define ADF_GEN2_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index 2b263442c856..6a505e9a5cf9 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include "adf_common_drv.h"
+#include "adf_dc.h"
#include "adf_gen2_hw_data.h"
+#include "icp_qat_fw_comp.h"
#include "icp_qat_hw.h"
#include <linux/pci.h>
@@ -169,3 +171,58 @@ void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
}
}
EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
+
+static int adf_gen2_build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ return 0;
+}
+
+static int adf_gen2_build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ return 0;
+}
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = adf_gen2_build_comp_block;
+ dc_ops->build_decomp_block = adf_gen2_build_decomp_block;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index 708e9186127b..59bad368a921 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -88,5 +88,6 @@ void adf_gen2_get_arb_info(struct arb_info *arb_info);
void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
index a716545a764c..34a63cf40db2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
index f97e7a880f3a..afcdfdd0a37a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
@@ -11,7 +11,7 @@
#include "qat_compression.h"
#include "qat_crypto.h"
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -117,7 +117,7 @@ err:
return ret;
}
-static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -187,7 +187,7 @@ err:
return ret;
}
-static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+int adf_no_dev_config(struct adf_accel_dev *accel_dev)
{
unsigned long val;
int ret;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
index bb87655f69a8..38a674c27e40 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
@@ -7,5 +7,8 @@
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev);
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev);
+int adf_comp_dev_config(struct adf_accel_dev *accel_dev);
+int adf_no_dev_config(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
deleted file mode 100644
index 5859238e37de..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_comp.h"
-#include "icp_qat_hw_20_comp.h"
-#include "adf_gen4_dc.h"
-
-static void qat_comp_build_deflate(void *ctx)
-{
- struct icp_qat_fw_comp_req *req_tmpl =
- (struct icp_qat_fw_comp_req *)ctx;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
- struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
- struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
- struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
- u32 upper_val;
- u32 lower_val;
-
- memset(req_tmpl, 0, sizeof(*req_tmpl));
- header->hdr_flags =
- ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
- header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
- header->comn_req_flags =
- ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
- QAT_COMN_PTR_TYPE_SGL);
- header->serv_specif_flags =
- ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
- ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
- hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
- hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
- hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
- hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
- hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
- hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
- hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
- hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
-
- upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
- lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
-
- cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
- cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
-
- req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
- req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
- req_pars->req_par_flags =
- ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
- ICP_QAT_FW_COMP_EOP,
- ICP_QAT_FW_COMP_BFINAL,
- ICP_QAT_FW_COMP_CNV,
- ICP_QAT_FW_COMP_CNV_RECOVERY,
- ICP_QAT_FW_COMP_NO_CNV_DFX,
- ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
- ICP_QAT_FW_COMP_NO_XXHASH_ACC,
- ICP_QAT_FW_COMP_CNV_ERROR_NONE,
- ICP_QAT_FW_COMP_NO_APPEND_CRC,
- ICP_QAT_FW_COMP_NO_DROP_DATA);
-
- /* Fill second half of the template for decompression */
- memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
- req_tmpl++;
- header = &req_tmpl->comn_hdr;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
- cd_pars = &req_tmpl->cd_pars;
-
- hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
- lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
-
- cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
- cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
-}
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
- dc_ops->build_deflate_ctx = qat_comp_build_deflate;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
deleted file mode 100644
index 0b1a6774412e..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN4_DC_H
-#define ADF_GEN4_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 099949a2421c..0406cb09c5bb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -9,6 +9,8 @@
#include "adf_fw_config.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_20_comp.h"
u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
{
@@ -663,3 +665,71 @@ int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
+
+static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = { };
+ struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = { };
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 upper_val;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
+ hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
+ hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
+ hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
+ hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
+
+ upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
+ lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
+
+ return 0;
+}
+
+static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = { };
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+ lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = adf_gen4_build_comp_block;
+ dc_ops->build_decomp_block = adf_gen4_build_decomp_block;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 51fc2eaa263e..e4f4d5fa616d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -7,6 +7,7 @@
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
+#include "adf_dc.h"
/* PCIe configuration space */
#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
@@ -180,5 +181,6 @@ int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
u32 bank_number, struct bank_state *state);
bool adf_gen4_services_supported(unsigned long service_mask);
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
index 17d1b774d4a8..2c8708117f70 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
@@ -4,6 +4,7 @@
#define ADF_GEN4_PFVF_H
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#ifdef CONFIG_PCI_IOV
void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
new file mode 100644
index 000000000000..9a5b995f7ada
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_PM_H
+#define ADF_GEN6_PM_H
+
+#include <linux/bits.h>
+#include <linux/time.h>
+
+struct adf_accel_dev;
+
+/* Power management */
+#define ADF_GEN6_PM_POLL_DELAY_US 20
+#define ADF_GEN6_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_GEN6_PM_STATUS 0x50A00C
+#define ADF_GEN6_PM_INTERRUPT 0x50A028
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN6_PM_SOU BIT(18)
+
+/* cpm_pm_interrupt bitfields */
+#define ADF_GEN6_PM_DRV_ACTIVE BIT(20)
+
+#define ADF_GEN6_PM_DEFAULT_IDLE_FILTER 0x6
+
+/* cpm_pm_status bitfields */
+#define ADF_GEN6_PM_INIT_STATE BIT(21)
+
+#endif /* ADF_GEN6_PM_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c
new file mode 100644
index 000000000000..967253082a98
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "adf_common_drv.h"
+#include "adf_gen6_ras.h"
+#include "adf_sysfs_ras_counters.h"
+
+static void enable_errsou_reporting(void __iomem *csr)
+{
+ /* Enable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, 0);
+
+ /* Enable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, 0);
+
+ /*
+ * Enable uncorrectable error reporting in ERRSOU2
+ * but disable PM interrupt by default
+ */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, ADF_GEN6_ERRSOU2_PM_INT_BIT);
+
+ /* Enable uncorrectable error reporting in ERRSOU3 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, 0);
+}
+
+static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
+
+ /* Enable acceleration engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, ae_mask);
+
+ /* Enable acceleration engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, ae_mask);
+}
+
+static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ /* Enable HI CPP agents command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE,
+ ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK);
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_MASK);
+}
+
+static void enable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg, mask;
+
+ /* Enable RI memory error reporting */
+ mask = ADF_GEN6_RIMEM_PARERR_FATAL_MASK | ADF_GEN6_RIMEM_PARERR_CERR_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, mask);
+
+ /* Enable IOSF primary command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, ADF_GEN6_RIMISCSTS_BIT);
+
+ /* Enable TI internal memory parity error reporting */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_CI_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_CD_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, reg);
+
+ /* Enable error handling in RI, TI CPP interface control registers */
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, ADF_GEN6_RICPPINTCTL_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, ADF_GEN6_TICPPINTCTL_MASK);
+
+ /*
+ * Enable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
+ reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
+ reg |= ADF_GEN6_TIMISCCTL_BIT;
+ ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
+}
+
+static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ /* Enable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, 0);
+}
+
+static void adf_gen6_enable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ enable_errsou_reporting(csr);
+ enable_ae_error_reporting(accel_dev, csr);
+ enable_cpp_error_reporting(accel_dev, csr);
+ enable_ti_ri_error_reporting(csr);
+ enable_ssm_error_reporting(accel_dev, csr);
+}
+
+static void disable_errsou_reporting(void __iomem *csr)
+{
+ u32 val;
+
+ /* Disable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, ADF_GEN6_ERRSOU0_MASK);
+
+ /* Disable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, ADF_GEN6_ERRMSK1_MASK);
+
+ /* Disable uncorrectable error reporting in ERRSOU2 */
+ val = ADF_CSR_RD(csr, ADF_GEN6_ERRMSK2);
+ val |= ADF_GEN6_ERRSOU2_DIS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, val);
+
+ /* Disable uncorrectable error reporting in ERRSOU3 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_ERRSOU3_DIS_MASK);
+}
+
+static void disable_ae_error_reporting(void __iomem *csr)
+{
+ /* Disable acceleration engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, 0);
+
+ /* Disable acceleration engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, 0);
+}
+
+static void disable_cpp_error_reporting(void __iomem *csr)
+{
+ /* Disable HI CPP agents command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE, 0);
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK);
+}
+
+static void disable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg;
+
+ /* Disable RI memory error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, 0);
+
+ /* Disable IOSF primary command parity error reporting */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_RIMISCCTL);
+ reg &= ~ADF_GEN6_RIMISCSTS_BIT;
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, reg);
+
+ /* Disable TI internal memory parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, ADF_GEN6_TI_CI_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, ADF_GEN6_TI_CD_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, ADF_GEN6_TI_TRNSB_PAR_STS_MASK);
+
+ /* Disable error handling in RI, TI CPP interface control registers */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTCTL);
+ reg &= ~ADF_GEN6_RICPPINTCTL_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTCTL);
+ reg &= ~ADF_GEN6_TICPPINTCTL_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, reg);
+
+ /*
+ * Disable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
+ reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
+}
+
+static void disable_ssm_error_reporting(void __iomem *csr)
+{
+ /* Disable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, ADF_GEN6_INTMASKSSM_MASK);
+}
+
+static void adf_gen6_disable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ disable_errsou_reporting(csr);
+ disable_ae_error_reporting(csr);
+ disable_cpp_error_reporting(csr);
+ disable_ti_ri_error_reporting(csr);
+ disable_ssm_error_reporting(csr);
+}
+
+static void adf_gen6_process_errsou0(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ae, errsou;
+
+ ae = ADF_CSR_RD(csr, ADF_GEN6_HIAECORERRLOG_CPP0);
+ ae &= GET_HW_DATA(accel_dev)->ae_mask;
+
+ dev_warn(&GET_DEV(accel_dev), "Correctable error detected: %#x\n", ae);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+
+ /* Clear interrupt from ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOG_CPP0, ae);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
+ if (errsou & ADF_GEN6_ERRSOU0_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou0 still set: %#x\n", errsou);
+}
+
+static void adf_handle_cpp_ae_unc(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ae;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT))
+ return;
+
+ ae = ADF_CSR_RD(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0);
+ ae &= GET_HW_DATA(accel_dev)->ae_mask;
+ if (ae) {
+ dev_err(&GET_DEV(accel_dev), "Uncorrectable error detected: %#x\n", ae);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0, ae);
+ }
+}
+
+static void adf_handle_cpp_cmd_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 cmd_par_err;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT))
+ return;
+
+ cmd_par_err = ADF_CSR_RD(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG);
+ cmd_par_err &= ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK;
+ if (cmd_par_err) {
+ dev_err(&GET_DEV(accel_dev), "HI CPP agent command parity error: %#x\n",
+ cmd_par_err);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG, cmd_par_err);
+ }
+}
+
+static void adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 rimem_parerr_sts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT))
+ return;
+
+ rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN6_RIMEM_PARERR_STS);
+ rimem_parerr_sts &= ADF_GEN6_RIMEM_PARERR_CERR_MASK |
+ ADF_GEN6_RIMEM_PARERR_FATAL_MASK;
+ if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_CERR_MASK) {
+ dev_err(&GET_DEV(accel_dev), "RI memory parity correctable error: %#x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ }
+
+ if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_FATAL_MASK) {
+ dev_err(&GET_DEV(accel_dev), "RI memory parity fatal error: %#x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN6_RIMEM_PARERR_STS, rimem_parerr_sts);
+}
+
+static void adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_ci_par_sts;
+
+ ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_STS);
+ ti_ci_par_sts &= ADF_GEN6_TI_CI_PAR_STS_MASK;
+ if (ti_ci_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI memory parity error: %#x\n", ti_ci_par_sts);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_STS, ti_ci_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+}
+
+static void adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_pullfub_par_sts;
+
+ ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS);
+ ti_pullfub_par_sts &= ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
+ if (ti_pullfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI pull parity error: %#x\n", ti_pullfub_par_sts);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS, ti_pullfub_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+}
+
+static void adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_pushfub_par_sts;
+
+ ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS);
+ ti_pushfub_par_sts &= ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
+ if (ti_pushfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI push parity error: %#x\n", ti_pushfub_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS, ti_pushfub_par_sts);
+ }
+}
+
+static void adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_cd_par_sts;
+
+ ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_STS);
+ ti_cd_par_sts &= ADF_GEN6_TI_CD_PAR_STS_MASK;
+ if (ti_cd_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI CD parity error: %#x\n", ti_cd_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_STS, ti_cd_par_sts);
+ }
+}
+
+static void adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_trnsb_par_sts;
+
+ ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_STS);
+ ti_trnsb_par_sts &= ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
+ if (ti_trnsb_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI TRNSB parity error: %#x\n", ti_trnsb_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_STS, ti_trnsb_par_sts);
+ }
+}
+
+static void adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 rimiscsts;
+
+ rimiscsts = ADF_CSR_RD(csr, ADF_GEN6_RIMISCSTS);
+ rimiscsts &= ADF_GEN6_RIMISCSTS_BIT;
+ if (rimiscsts) {
+ dev_err(&GET_DEV(accel_dev), "Command parity error detected on IOSFP: %#x\n",
+ rimiscsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCSTS, rimiscsts);
+ }
+}
+
+static void adf_handle_ti_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return;
+
+ adf_handle_ti_ci_par_sts(accel_dev, csr);
+ adf_handle_ti_pullfub_par_sts(accel_dev, csr);
+ adf_handle_ti_pushfub_par_sts(accel_dev, csr);
+ adf_handle_ti_cd_par_sts(accel_dev, csr);
+ adf_handle_ti_trnsb_par_sts(accel_dev, csr);
+ adf_handle_iosfp_cmd_parerr(accel_dev, csr);
+}
+
+static void adf_handle_sfi_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Command parity error detected on streaming fabric interface\n");
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_gen6_process_errsou1(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_cpp_ae_unc(accel_dev, csr, errsou);
+ adf_handle_cpp_cmd_par_err(accel_dev, csr, errsou);
+ adf_handle_ri_mem_par_err(accel_dev, csr, errsou);
+ adf_handle_ti_err(accel_dev, csr, errsou);
+ adf_handle_sfi_cmd_parerr(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
+ if (errsou & ADF_GEN6_ERRSOU1_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou1 still set: %#x\n", errsou);
+}
+
+static void adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 reg;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_CERRSSMSH);
+ reg &= ADF_GEN6_CERRSSMSH_ERROR_BIT;
+ if (reg) {
+ dev_warn(&GET_DEV(accel_dev),
+ "Correctable error on ssm shared memory: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ ADF_CSR_WR(csr, ADF_GEN6_CERRSSMSH, reg);
+ }
+}
+
+static void adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_UERRSSMSH);
+ reg &= ADF_GEN6_UERRSSMSH_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal error on ssm shared memory: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_UERRSSMSH, reg);
+ }
+}
+
+static void adf_handle_pperr_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_PPERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_PPERR);
+ reg &= ADF_GEN6_PPERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal push or pull data error: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_PPERR, reg);
+ }
+}
+
+static void adf_handle_scmpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_SCM_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on SCM: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_cpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_CPP_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on CPP: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_rfpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_RF_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on RF Parity: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_unexp_cpl_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_UNEXP_CPL_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal error for AXI unexpected tag/length: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN6_IAINTSTATSSM);
+
+ iastatssm &= ADF_GEN6_IAINTSTATSSM_MASK;
+ if (!iastatssm)
+ return;
+
+ adf_handle_uerrssmsh(accel_dev, csr, iastatssm);
+ adf_handle_pperr_err(accel_dev, csr, iastatssm);
+ adf_handle_scmpar_err(accel_dev, csr, iastatssm);
+ adf_handle_cpppar_err(accel_dev, csr, iastatssm);
+ adf_handle_rfpar_err(accel_dev, csr, iastatssm);
+ adf_handle_unexp_cpl_err(accel_dev, csr, iastatssm);
+
+ ADF_CSR_WR(csr, ADF_GEN6_IAINTSTATSSM, iastatssm);
+}
+
+static void adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU2_SSM_ERR_BIT))
+ return;
+
+ adf_handle_cerrssmsh(accel_dev, csr);
+ adf_handle_iaintstatssm(accel_dev, csr);
+}
+
+static void adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 reg;
+
+ if (!(errsou & ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_CPP_CFC_ERR_STATUS);
+ if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: data parity: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: command parity: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ if (reg & ADF_GEN6_CPP_CFC_FATAL_ERR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: errors: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_STATUS_CLR,
+ ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK);
+}
+
+static void adf_gen6_process_errsou2(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_ssm(accel_dev, csr, errsou);
+ adf_handle_cpp_cfc_err(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
+ if (errsou & ADF_GEN6_ERRSOU2_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou2 still set: %#x\n", errsou);
+}
+
+static void adf_handle_timiscsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 timiscsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_TIMISCSTS_BIT))
+ return;
+
+ timiscsts = ADF_CSR_RD(csr, ADF_GEN6_TIMISCSTS);
+ if (timiscsts) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error in transmit interface: %#x\n",
+ timiscsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+}
+
+static void adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ricppintsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK))
+ return;
+
+ ricppintsts = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTSTS);
+ ricppintsts &= ADF_GEN6_RICPPINTSTS_MASK;
+ if (ricppintsts) {
+ dev_err(&GET_DEV(accel_dev), "RI push pull error: %#x\n", ricppintsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTSTS, ricppintsts);
+ }
+}
+
+static void adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ticppintsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK))
+ return;
+
+ ticppintsts = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTSTS);
+ ticppintsts &= ADF_GEN6_TICPPINTSTS_MASK;
+ if (ticppintsts) {
+ dev_err(&GET_DEV(accel_dev), "TI push pull error: %#x\n", ticppintsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTSTS, ticppintsts);
+ }
+}
+
+static void adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks;
+ u32 atufaultstatus;
+ u32 i;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT))
+ return;
+
+ for (i = 0; i < max_rp_num; i++) {
+ atufaultstatus = ADF_CSR_RD(csr, ADF_GEN6_ATUFAULTSTATUS(i));
+
+ atufaultstatus &= ADF_GEN6_ATUFAULTSTATUS_BIT;
+ if (atufaultstatus) {
+ dev_err(&GET_DEV(accel_dev), "Ring pair (%u) ATU detected fault: %#x\n", i,
+ atufaultstatus);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_ATUFAULTSTATUS(i), atufaultstatus);
+ }
+ }
+}
+
+static void adf_handle_rlterror(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 rlterror;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_RLTERROR_BIT))
+ return;
+
+ rlterror = ADF_CSR_RD(csr, ADF_GEN6_RLT_ERRLOG);
+ rlterror &= ADF_GEN6_RLT_ERRLOG_MASK;
+ if (rlterror) {
+ dev_err(&GET_DEV(accel_dev), "Error in rate limiting block: %#x\n", rlterror);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_RLT_ERRLOG, rlterror);
+ }
+}
+
+static void adf_handle_vflr(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Uncorrectable error in VF\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+}
+
+static void adf_handle_tc_vc_map_error(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Violation of PCIe TC VC mapping\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_pcie_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "DEVHALT due to an error in an incoming transaction\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_pg_req_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Error due to response failure in response to a page request\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_xlt_cpl_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Error status for a address translation request\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_ti_int_err_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "DEVHALT due to a TI internal memory error\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_gen6_process_errsou3(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_timiscsts(accel_dev, csr, errsou);
+ adf_handle_ricppintsts(accel_dev, csr, errsou);
+ adf_handle_ticppintsts(accel_dev, csr, errsou);
+ adf_handle_atufaultstatus(accel_dev, csr, errsou);
+ adf_handle_rlterror(accel_dev, csr, errsou);
+ adf_handle_vflr(accel_dev, csr, errsou);
+ adf_handle_tc_vc_map_error(accel_dev, csr, errsou);
+ adf_handle_pcie_devhalt(accel_dev, csr, errsou);
+ adf_handle_pg_req_devhalt(accel_dev, csr, errsou);
+ adf_handle_xlt_cpl_devhalt(accel_dev, csr, errsou);
+ adf_handle_ti_int_err_devhalt(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
+ if (errsou & ADF_GEN6_ERRSOU3_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou3 still set: %#x\n", errsou);
+}
+
+static void adf_gen6_is_reset_required(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ bool *reset_required)
+{
+ u8 reset, dev_state;
+ u32 gensts;
+
+ gensts = ADF_CSR_RD(csr, ADF_GEN6_GENSTS);
+ dev_state = FIELD_GET(ADF_GEN6_GENSTS_DEVICE_STATE_MASK, gensts);
+ reset = FIELD_GET(ADF_GEN6_GENSTS_RESET_TYPE_MASK, gensts);
+ if (dev_state == ADF_GEN6_GENSTS_DEVHALT && reset == ADF_GEN6_GENSTS_PFLR) {
+ *reset_required = true;
+ return;
+ }
+
+ if (reset == ADF_GEN6_GENSTS_COLD_RESET)
+ dev_err(&GET_DEV(accel_dev), "Fatal error, cold reset required\n");
+
+ *reset_required = false;
+}
+
+static bool adf_gen6_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ bool handled = false;
+ u32 errsou;
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
+ if (errsou & ADF_GEN6_ERRSOU0_MASK) {
+ adf_gen6_process_errsou0(accel_dev, csr);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
+ if (errsou & ADF_GEN6_ERRSOU1_MASK) {
+ adf_gen6_process_errsou1(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
+ if (errsou & ADF_GEN6_ERRSOU2_MASK) {
+ adf_gen6_process_errsou2(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
+ if (errsou & ADF_GEN6_ERRSOU3_MASK) {
+ adf_gen6_process_errsou3(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ adf_gen6_is_reset_required(accel_dev, csr, reset_required);
+
+ return handled;
+}
+
+void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops)
+{
+ ras_ops->enable_ras_errors = adf_gen6_enable_ras;
+ ras_ops->disable_ras_errors = adf_gen6_disable_ras;
+ ras_ops->handle_interrupt = adf_gen6_handle_interrupt;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_ras_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h
new file mode 100644
index 000000000000..66ced271d173
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h
@@ -0,0 +1,504 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_RAS_H_
+#define ADF_GEN6_RAS_H_
+
+#include <linux/bits.h>
+
+struct adf_ras_ops;
+
+/* Error source registers */
+#define ADF_GEN6_ERRSOU0 0x41A200
+#define ADF_GEN6_ERRSOU1 0x41A204
+#define ADF_GEN6_ERRSOU2 0x41A208
+#define ADF_GEN6_ERRSOU3 0x41A20C
+
+/* Error source mask registers */
+#define ADF_GEN6_ERRMSK0 0x41A210
+#define ADF_GEN6_ERRMSK1 0x41A214
+#define ADF_GEN6_ERRMSK2 0x41A218
+#define ADF_GEN6_ERRMSK3 0x41A21C
+
+/* ERRSOU0 Correctable error mask */
+#define ADF_GEN6_ERRSOU0_MASK BIT(0)
+
+#define ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT BIT(0)
+#define ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT BIT(1)
+#define ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2)
+#define ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3)
+#define ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT BIT(4)
+
+#define ADF_GEN6_ERRSOU1_MASK ( \
+ (ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT) | \
+ (ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT) | \
+ (ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT))
+
+#define ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT BIT(0)
+#define ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT BIT(1)
+#define ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT BIT(2)
+#define ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT BIT(3)
+#define ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT BIT(4)
+
+#define ADF_GEN6_ERRMSK1_MASK ( \
+ (ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT) | \
+ (ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT) | \
+ (ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT))
+
+/* HI AE Uncorrectable error log */
+#define ADF_GEN6_HIAEUNCERRLOG_CPP0 0x41A300
+
+/* HI AE Uncorrectable error log enable */
+#define ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0 0x41A320
+
+/* HI AE Correctable error log */
+#define ADF_GEN6_HIAECORERRLOG_CPP0 0x41A308
+
+/* HI AE Correctable error log enable */
+#define ADF_GEN6_HIAECORERRLOGENABLE_CPP0 0x41A318
+
+/* HI CPP Agent Command parity error log */
+#define ADF_GEN6_HICPPAGENTCMDPARERRLOG 0x41A310
+
+/* HI CPP Agent command parity error logging enable */
+#define ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE 0x41A314
+
+#define ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1B
+
+/* RI Memory parity error status register */
+#define ADF_GEN6_RIMEM_PARERR_STS 0x41B128
+
+/* RI Memory parity error reporting enable */
+#define ADF_GEN6_RI_MEM_PAR_ERR_EN0 0x41B12C
+
+/*
+ * RI Memory parity error mask
+ * BIT(4) - ri_tlq_phdr parity error
+ * BIT(5) - ri_tlq_pdata parity error
+ * BIT(6) - ri_tlq_nphdr parity error
+ * BIT(7) - ri_tlq_npdata parity error
+ * BIT(8) - ri_tlq_cplhdr parity error
+ * BIT(10) - BIT(13) - ri_tlq_cpldata[0:3] parity error
+ * BIT(19) - ri_cds_cmd_fifo parity error
+ * BIT(20) - ri_obc_ricpl_fifo parity error
+ * BIT(21) - ri_obc_tiricpl_fifo parity error
+ * BIT(22) - ri_obc_cppcpl_fifo parity error
+ * BIT(23) - ri_obc_pendcpl_fifo parity error
+ * BIT(24) - ri_cpp_cmd_fifo parity error
+ * BIT(25) - ri_cds_ticmd_fifo parity error
+ * BIT(26) - riti_cmd_fifo parity error
+ * BIT(27) - ri_int_msixtbl parity error
+ * BIT(28) - ri_int_imstbl parity error
+ * BIT(30) - ri_kpt_fuses parity error
+ */
+#define ADF_GEN6_RIMEM_PARERR_FATAL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \
+ BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27) | \
+ BIT(28) | BIT(30))
+
+#define ADF_GEN6_RIMEM_PARERR_CERR_MASK \
+ (BIT(10) | BIT(11) | BIT(12) | BIT(13))
+
+/* TI CI parity status */
+#define ADF_GEN6_TI_CI_PAR_STS 0x50060C
+
+/* TI CI parity reporting mask */
+#define ADF_GEN6_TI_CI_PAR_ERR_MASK 0x500608
+
+/*
+ * TI CI parity status mask
+ * BIT(0) - CdCmdQ_sts patiry error status
+ * BIT(1) - CdDataQ_sts parity error status
+ * BIT(3) - CPP_SkidQ_sts parity error status
+ */
+#define ADF_GEN6_TI_CI_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(3))
+
+/* TI PULLFUB parity status */
+#define ADF_GEN6_TI_PULL0FUB_PAR_STS 0x500618
+
+/* TI PULLFUB parity error reporting mask */
+#define ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK 0x500614
+
+/*
+ * TI PULLFUB parity status mask
+ * BIT(0) - TrnPullReqQ_sts parity status
+ * BIT(1) - TrnSharedDataQ_sts parity status
+ * BIT(2) - TrnPullReqDataQ_sts parity status
+ * BIT(4) - CPP_CiPullReqQ_sts parity status
+ * BIT(5) - CPP_TrnPullReqQ_sts parity status
+ * BIT(6) - CPP_PullidQ_sts parity status
+ * BIT(7) - CPP_WaitDataQ_sts parity status
+ * BIT(8) - CPP_CdDataQ_sts parity status
+ * BIT(9) - CPP_TrnDataQP0_sts parity status
+ * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status
+ * BIT(12) - CPP_TrnDataQP1_sts parity status
+ * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status
+ */
+#define ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \
+ BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14))
+
+/* TI PUSHUB parity status */
+#define ADF_GEN6_TI_PUSHFUB_PAR_STS 0x500630
+
+/* TI PUSHFUB parity error reporting mask */
+#define ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK 0x50062C
+
+/*
+ * TI PUSHUB parity status mask
+ * BIT(0) - SbPushReqQ_sts parity status
+ * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status
+ * BIT(4) - CPP_CdPushReqQ_sts parity status
+ * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status
+ * BIT(7) - CPP_SbPushReqQ_sts parity status
+ * BIT(8) - CPP_SbPushDataQP_sts parity status
+ * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status
+ */
+#define ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \
+ BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10))
+
+/* TI CD parity status */
+#define ADF_GEN6_TI_CD_PAR_STS 0x50063C
+
+/* TI CD parity error mask */
+#define ADF_GEN6_TI_CD_PAR_ERR_MASK 0x500638
+
+/*
+ * TI CD parity status mask
+ * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status
+ * BIT(16) - Leaf2ClusterRam_sts parity status
+ * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status
+ * BIT(19) - VirtualQ_sts parity status
+ * BIT(20) - DtRdQ_sts parity status
+ * BIT(21) - DtWrQ_sts parity status
+ * BIT(22) - RiCmdQ_sts parity status
+ * BIT(23) - BypassQ_sts parity status
+ * BIT(24) - DtRdQ_sc_sts parity status
+ * BIT(25) - DtWrQ_sc_sts parity status
+ */
+#define ADF_GEN6_TI_CD_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \
+ BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \
+ BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25))
+
+/* TI TRNSB parity status */
+#define ADF_GEN6_TI_TRNSB_PAR_STS 0x500648
+
+/* TI TRNSB parity error reporting mask */
+#define ADF_GEN6_TI_TRNSB_PAR_ERR_MASK 0x500644
+
+/*
+ * TI TRNSB parity status mask
+ * BIT(0) - TrnPHdrQP_sts parity status
+ * BIT(1) - TrnPHdrQRF_sts parity status
+ * BIT(2) - TrnPDataQP_sts parity status
+ * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status
+ * BIT(7) - TrnNpHdrQP_sts parity status
+ * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status
+ * BIT(10) - TrnCplHdrQ_sts parity status
+ * BIT(11) - TrnPutObsReqQ_sts parity status
+ * BIT(12) - TrnPushReqQ_sts parity status
+ * BIT(13) - SbSplitIdRam_sts parity status
+ * BIT(14) - SbReqCountQ_sts parity status
+ * BIT(15) - SbCplTrkRam_sts parity status
+ * BIT(16) - SbGetObsReqQ_sts parity status
+ * BIT(17) - SbEpochIdQ_sts parity status
+ * BIT(18) - SbAtCplHdrQ_sts parity status
+ * BIT(19) - SbAtCplDataQ_sts parity status
+ * BIT(20) - SbReqCountRam_sts parity status
+ * BIT(21) - SbAtCplHdrQ_sc_sts parity status
+ */
+#define ADF_GEN6_TI_TRNSB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \
+ BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \
+ BIT(19) | BIT(20) | BIT(21))
+
+/* Status register to log misc error on RI */
+#define ADF_GEN6_RIMISCSTS 0x41B1B8
+
+/* Status control register to log misc RI error */
+#define ADF_GEN6_RIMISCCTL 0x41B1BC
+
+/*
+ * ERRSOU2 bit mask
+ * BIT(0) - SSM Interrupt Mask
+ * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error
+ * BIT(2) - BIT(4) - CPP attention interrupts
+ * BIT(18) - PM interrupt
+ */
+#define ADF_GEN6_ERRSOU2_SSM_ERR_BIT BIT(0)
+#define ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1)
+#define ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK \
+ (BIT(2) | BIT(3) | BIT(4))
+
+#define ADF_GEN6_ERRSOU2_PM_INT_BIT BIT(18)
+
+#define ADF_GEN6_ERRSOU2_MASK \
+ (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)
+
+#define ADF_GEN6_ERRSOU2_DIS_MASK \
+ (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK)
+
+#define ADF_GEN6_IAINTSTATSSM 0x28
+
+/* IAINTSTATSSM error bit mask definitions */
+#define ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT BIT(0)
+#define ADF_GEN6_IAINTSTATSSM_PPERR_BIT BIT(2)
+#define ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT BIT(4)
+#define ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT BIT(5)
+#define ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT BIT(6)
+#define ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT BIT(7)
+
+#define ADF_GEN6_IAINTSTATSSM_MASK \
+ (ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_PPERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT)
+
+#define ADF_GEN6_UERRSSMSH 0x18
+
+/*
+ * UERRSSMSH error bit mask definitions
+ *
+ * BIT(0) - Indicates one uncorrectable error
+ * BIT(15) - Indicates multiple uncorrectable errors
+ * in device shared memory
+ */
+#define ADF_GEN6_UERRSSMSH_MASK (BIT(0) | BIT(15))
+
+/*
+ * CERRSSMSH error bit
+ * BIT(0) - Indicates one correctable error
+ */
+#define ADF_GEN6_CERRSSMSH_ERROR_BIT (BIT(0) | BIT(15) | BIT(24))
+#define ADF_GEN6_CERRSSMSH 0x10
+
+#define ADF_GEN6_INTMASKSSM 0x0
+
+/*
+ * Error reporting mask in INTMASKSSM
+ * BIT(0) - Shared memory uncorrectable interrupt mask
+ * BIT(2) - PPERR interrupt mask
+ * BIT(4) - SCM parity error interrupt mask
+ * BIT(5) - CPP parity error interrupt mask
+ * BIT(6) - SHRAM RF parity error interrupt mask
+ * BIT(7) - AXI unexpected completion error mask
+ */
+#define ADF_GEN6_INTMASKSSM_MASK \
+ (BIT(0) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7))
+
+/* CPP push or pull error */
+#define ADF_GEN6_PPERR 0x8
+
+#define ADF_GEN6_PPERR_MASK (BIT(0) | BIT(1))
+
+/*
+ * SSM_FERR_STATUS error bit mask definitions
+ */
+#define ADF_GEN6_SCM_PAR_ERR_MASK BIT(5)
+#define ADF_GEN6_CPP_PAR_ERR_MASK (BIT(0) | BIT(1) | BIT(2))
+#define ADF_GEN6_UNEXP_CPL_ERR_MASK (BIT(3) | BIT(4) | BIT(10) | BIT(11))
+#define ADF_GEN6_RF_PAR_ERR_MASK BIT(16)
+
+#define ADF_GEN6_SSM_FERR_STATUS 0x9C
+
+#define ADF_GEN6_CPP_CFC_ERR_STATUS 0x640C04
+
+/*
+ * BIT(0) - Indicates one or more CPP CFC errors
+ * BIT(1) - Indicates multiple CPP CFC errors
+ * BIT(7) - Indicates CPP CFC command parity error type
+ * BIT(8) - Indicates CPP CFC data parity error type
+ */
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT BIT(0)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8)
+#define ADF_GEN6_CPP_CFC_FATAL_ERR_BIT \
+ (ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT | \
+ ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT)
+
+/*
+ * BIT(0) - Enables CFC to detect and log a push/pull data error
+ * BIT(1) - Enables CFC to generate interrupt to PCIEP for a CPP error
+ * BIT(4) - When 1 parity detection is disabled
+ * BIT(5) - When 1 parity detection is disabled on CPP command bus
+ * BIT(6) - When 1 parity detection is disabled on CPP push/pull bus
+ * BIT(9) - When 1 RF parity error detection is disabled
+ */
+#define ADF_GEN6_CPP_CFC_ERR_CTRL_MASK (BIT(0) | BIT(1))
+
+#define ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK \
+ (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10))
+
+#define ADF_GEN6_CPP_CFC_ERR_CTRL 0x640C00
+
+/*
+ * BIT(0) - Clears bit(0) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when an error is reported on CPP
+ * BIT(1) - Clears bit(1) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when multiple errors are reported on CPP
+ * BIT(2) - Clears bit(2) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when attention interrupt is reported
+ */
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK (BIT(0) | BIT(1) | BIT(2))
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR 0x640C08
+
+/*
+ * ERRSOU3 bit masks
+ * BIT(0) - indicates error response order overflow and/or BME error
+ * BIT(1) - indicates RI push/pull error
+ * BIT(2) - indicates TI push/pull error
+ * BIT(5) - indicates TI pull parity error
+ * BIT(6) - indicates RI push parity error
+ * BIT(7) - indicates VFLR interrupt
+ * BIT(8) - indicates ring pair interrupts for ATU detected fault
+ * BIT(9) - indicates rate limiting error
+ */
+#define ADF_GEN6_ERRSOU3_TIMISCSTS_BIT BIT(0)
+#define ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK (BIT(1) | BIT(6))
+#define ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK (BIT(2) | BIT(5))
+#define ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT BIT(7)
+#define ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8)
+#define ADF_GEN6_ERRSOU3_RLTERROR_BIT BIT(9)
+#define ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT BIT(16)
+#define ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT BIT(17)
+#define ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT BIT(18)
+#define ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT BIT(19)
+#define ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT BIT(20)
+
+#define ADF_GEN6_ERRSOU3_MASK ( \
+ (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \
+ (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT))
+
+#define ADF_GEN6_ERRSOU3_DIS_MASK ( \
+ (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \
+ (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT))
+
+/* Rate limiting error log register */
+#define ADF_GEN6_RLT_ERRLOG 0x508814
+
+#define ADF_GEN6_RLT_ERRLOG_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* TI misc status register */
+#define ADF_GEN6_TIMISCSTS 0x50054C
+
+/* TI misc error reporting mask */
+#define ADF_GEN6_TIMISCCTL 0x500548
+
+/*
+ * TI Misc error reporting control mask
+ * BIT(0) - Enables error detection and logging in TIMISCSTS register
+ * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default
+ * BIT(2) - Enables the D-F-x counter within the dispatch arbiter
+ * to start based on the command triggered from
+ * BIT(30) - Disables VFLR functionality
+ * bits 1, 2 and 30 value should be preserved and not meant to be changed
+ * within RAS.
+ */
+#define ADF_GEN6_TIMISCCTL_BIT BIT(0)
+#define ADF_GEN6_TIMSCCTL_RELAY_MASK (BIT(1) | BIT(2) | BIT(30))
+
+/* RI CPP interface status register */
+#define ADF_GEN6_RICPPINTSTS 0x41A330
+
+/*
+ * Uncorrectable error mask in RICPPINTSTS register
+ * BIT(0) - RI asserted the CPP error signal during a push
+ * BIT(1) - RI detected the CPP error signal asserted during a pull
+ * BIT(2) - RI detected a push data parity error
+ * BIT(3) - RI detected a push valid parity error
+ */
+#define ADF_GEN6_RICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* RI CPP interface register control */
+#define ADF_GEN6_RICPPINTCTL 0x41A32C
+
+/*
+ * Control bit mask for RICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting
+ * on the RI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting
+ * on the RI CPP Pull interface
+ * BIT(2) - value of 1 enables error detection and reporting
+ * on the RI Parity
+ * BIT(3) - value of 1 enable checking parity on CPP
+ */
+#define ADF_GEN6_RICPPINTCTL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* TI CPP interface status register */
+#define ADF_GEN6_TICPPINTSTS 0x50053C
+
+/*
+ * Uncorrectable error mask in TICPPINTSTS register
+ * BIT(0) - value of 1 indicates that the TI asserted
+ * the CPP error signal during a push
+ * BIT(1) - value of 1 indicates that the TI detected
+ * the CPP error signal asserted during a pull
+ * BIT(2) - value of 1 indicates that the TI detected
+ * a pull data parity error
+ */
+#define ADF_GEN6_TICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2))
+
+/* TI CPP interface status register control */
+#define ADF_GEN6_TICPPINTCTL 0x500538
+
+/*
+ * Control bit mask for TICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(2) - value of 1 enables parity error detection and logging on
+ * the TI CPP Pull interface
+ * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking
+ */
+#define ADF_GEN6_TICPPINTCTL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* ATU fault status register */
+#define ADF_GEN6_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4))
+
+#define ADF_GEN6_ATUFAULTSTATUS_BIT BIT(0)
+
+/* Command parity error detected on IOSFP command to QAT */
+#define ADF_GEN6_RIMISCSTS_BIT BIT(0)
+
+#define ADF_GEN6_GENSTS 0x41A220
+#define ADF_GEN6_GENSTS_DEVICE_STATE_MASK GENMASK(1, 0)
+#define ADF_GEN6_GENSTS_RESET_TYPE_MASK GENMASK(3, 2)
+#define ADF_GEN6_GENSTS_PFLR 0x1
+#define ADF_GEN6_GENSTS_COLD_RESET 0x3
+#define ADF_GEN6_GENSTS_DEVHALT 0x1
+
+void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops);
+
+#endif /* ADF_GEN6_RAS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
new file mode 100644
index 000000000000..58a072e2f936
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/export.h>
+
+#include "adf_gen4_config.h"
+#include "adf_gen4_hw_csr_data.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_gen6_shared.h"
+
+struct adf_accel_dev;
+struct adf_pfvf_ops;
+struct adf_hw_csr_ops;
+
+/*
+ * QAT GEN4 and GEN6 devices often differ in terms of supported features,
+ * options and internal logic. However, some of the mechanisms and register
+ * layout are shared between those two GENs. This file serves as an abstraction
+ * layer that allows to use existing GEN4 implementation that is also
+ * applicable to GEN6 without additional overhead and complexity.
+ */
+void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ adf_gen4_init_pf_pfvf_ops(pfvf_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_pf_pfvf_ops);
+
+void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ return adf_gen4_init_hw_csr_ops(csr_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_hw_csr_ops);
+
+int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ return adf_gen4_cfg_dev_init(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_cfg_dev_init);
+
+int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_comp_dev_config(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_comp_dev_config);
+
+int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_no_dev_config(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_no_dev_config);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
new file mode 100644
index 000000000000..bc8e71e984fc
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_SHARED_H_
+#define ADF_GEN6_SHARED_H_
+
+struct adf_hw_csr_ops;
+struct adf_accel_dev;
+struct adf_pfvf_ops;
+
+void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev);
+int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev);
+int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev);
+#endif/* ADF_GEN6_SHARED_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_timer.c
index 35ccb91d6ec1..8962a49f145a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_timer.c
@@ -12,9 +12,9 @@
#include "adf_admin.h"
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
-#include "adf_gen4_timer.h"
+#include "adf_timer.h"
-#define ADF_GEN4_TIMER_PERIOD_MS 200
+#define ADF_DEFAULT_TIMER_PERIOD_MS 200
/* This periodic update is used to trigger HB, RL & TL fw events */
static void work_handler(struct work_struct *work)
@@ -27,16 +27,16 @@ static void work_handler(struct work_struct *work)
accel_dev = timer_ctx->accel_dev;
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
- msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+ msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS));
time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
- ADF_GEN4_TIMER_PERIOD_MS);
+ ADF_DEFAULT_TIMER_PERIOD_MS);
if (adf_send_admin_tim_sync(accel_dev, time_periods))
dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
}
-int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
+int adf_timer_start(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx;
@@ -50,13 +50,13 @@ int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
- msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+ msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS));
return 0;
}
-EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
+EXPORT_SYMBOL_GPL(adf_timer_start);
-void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
+void adf_timer_stop(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx = accel_dev->timer;
@@ -68,4 +68,4 @@ void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
kfree(timer_ctx);
accel_dev->timer = NULL;
}
-EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
+EXPORT_SYMBOL_GPL(adf_timer_stop);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_timer.h
index 66a709e7b358..68e5136d6ba1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_timer.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2023 Intel Corporation */
-#ifndef ADF_GEN4_TIMER_H_
-#define ADF_GEN4_TIMER_H_
+#ifndef ADF_TIMER_H_
+#define ADF_TIMER_H_
#include <linux/ktime.h>
#include <linux/workqueue.h>
@@ -15,7 +15,7 @@ struct adf_timer {
ktime_t initial_ktime;
};
-int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
-void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
+int adf_timer_start(struct adf_accel_dev *accel_dev);
+void adf_timer_stop(struct adf_accel_dev *accel_dev);
-#endif /* ADF_GEN4_TIMER_H_ */
+#endif /* ADF_TIMER_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
index 04f645957e28..81969c515a17 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
@@ -44,6 +44,7 @@ enum icp_qat_fw_comp_20_cmd_id {
#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MAX_VALUE 0xFFFFFFFF
#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
ret_uncomp, secure_ram) \
@@ -117,7 +118,7 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
cnvdfx, crc, xxhash_acc, \
cnv_error_type, append_crc, \
- drop_data) \
+ drop_data, partial_decomp) \
((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
ICP_QAT_FW_COMP_SOP_BITPOS) | \
(((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
@@ -139,7 +140,9 @@ struct icp_qat_fw_comp_req_params {
(((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
<< ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
(((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
- << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
+ << ICP_QAT_FW_COMP_DROP_DATA_BITPOS) | \
+ (((partial_decomp) & ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK) \
+ << ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS))
#define ICP_QAT_FW_COMP_NOT_SOP 0
#define ICP_QAT_FW_COMP_SOP 1
@@ -161,6 +164,8 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
#define ICP_QAT_FW_COMP_DROP_DATA 1
#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMPRESS 1
+#define ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS 0
#define ICP_QAT_FW_COMP_SOP_BITPOS 0
#define ICP_QAT_FW_COMP_SOP_MASK 0x1
#define ICP_QAT_FW_COMP_EOP_BITPOS 1
@@ -189,6 +194,8 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS 27
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK 0x1
#define ICP_QAT_FW_COMP_SOP_GET(flags) \
QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
@@ -281,8 +288,18 @@ struct icp_qat_fw_comp_req {
union {
struct icp_qat_fw_xlt_req_params xlt_pars;
__u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct {
+ __u32 partial_decompress_length;
+ __u32 partial_decompress_offset;
+ } partial_decompress;
} u1;
- __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ union {
+ __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct {
+ __u32 asb_value;
+ __u32 reserved;
+ } asb_threshold;
+ } u3;
struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
union {
struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
index 7eb5daef4f88..6887930c7995 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -35,6 +35,7 @@ struct icp_qat_fw_loader_chip_info {
u32 wakeup_event_val;
bool fw_auth;
bool css_3k;
+ bool dual_sign;
bool tgroup_share_ustore;
u32 fcu_ctl_csr;
u32 fcu_sts_csr;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h
new file mode 100644
index 000000000000..dce639152345
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ICP_QAT_HW_51_COMP_H_
+#define ICP_QAT_HW_51_COMP_H_
+
+#include <linux/types.h>
+
+#include "icp_qat_fw.h"
+#include "icp_qat_hw_51_comp_defs.h"
+
+struct icp_qat_hw_comp_51_config_csr_lower {
+ enum icp_qat_hw_comp_51_abd abd;
+ enum icp_qat_hw_comp_51_lllbd_ctrl lllbd;
+ enum icp_qat_hw_comp_51_search_depth sd;
+ enum icp_qat_hw_comp_51_min_match_control mmctrl;
+ enum icp_qat_hw_comp_51_lz4_block_checksum lbc;
+};
+
+static inline u32
+ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_51_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.abd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK);
+ QAT_FIELD_SET(val32, csr.lllbd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK);
+ QAT_FIELD_SET(val32, csr.sd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK);
+ QAT_FIELD_SET(val32, csr.mmctrl,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_comp_51_config_csr_upper {
+ enum icp_qat_hw_comp_51_dmm_algorithm edmm;
+ enum icp_qat_hw_comp_51_bms bms;
+ enum icp_qat_hw_comp_51_scb_mode_reset_mask scb_mode_reset;
+};
+
+static inline u32
+ICP_QAT_FW_COMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_51_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.edmm,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK);
+ QAT_FIELD_SET(val32, csr.bms,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK);
+ QAT_FIELD_SET(val32, csr.scb_mode_reset,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_decomp_51_config_csr_lower {
+ enum icp_qat_hw_decomp_51_lz4_block_checksum lbc;
+};
+
+static inline u32
+ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_51_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_decomp_51_config_csr_upper {
+ enum icp_qat_hw_decomp_51_bms bms;
+};
+
+static inline u32
+ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_51_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.bms,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK);
+
+ return val32;
+}
+
+#endif /* ICP_QAT_HW_51_COMP_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h
new file mode 100644
index 000000000000..e745688c5da4
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ICP_QAT_HW_51_COMP_DEFS_H_
+#define ICP_QAT_HW_51_COMP_DEFS_H_
+
+#include <linux/bits.h>
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_BITPOS 28
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_MASK GENMASK(1, 0)
+enum icp_qat_hw_comp_51_som_control {
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE = 0x0,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_DICTIONARY_MODE = 0x1,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_INPUT_CRC = 0x2,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_RESERVED_MODE = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_rd_control {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_BITPOS 25
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_bypass_compression {
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED = 0x0,
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS 22
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_dmm_algorithm {
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_ZSTD_DMM_LITE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_token_fusion_internal_only {
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS 19
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0)
+enum icp_qat_hw_comp_51_bms {
+ ICP_QAT_HW_COMP_51_BMS_BMS_64KB = 0x0,
+ ICP_QAT_HW_COMP_51_BMS_BMS_256KB = 0x1,
+ ICP_QAT_HW_COMP_51_BMS_BMS_1MB = 0x2,
+ ICP_QAT_HW_COMP_51_BMS_BMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BMS_BMS_64KB
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_scb_mode_reset_mask {
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT = 0x0,
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_RESET_HB_HT = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_zstd_frame_gen_dec_en {
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0,
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_BITPOS 1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_cnv_disable {
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_BITPOS 0
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_asb_disable {
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_spec_decoder_internal_only {
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_mini_xcam_internal_only {
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_BITPOS 19
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_rep_off_enc_internal_only {
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_BITPOS 18
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_prog_block_drop_internal_only {
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE = 0x0,
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_BITPOS 17
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_override_internal_only {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_OVERRIDE_HASH_PARAMS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_BITPOS 14
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0)
+enum icp_qat_hw_comp_51_hbs {
+ ICP_QAT_HW_COMP_51_HBS_32KB = 0x0,
+ ICP_QAT_HW_COMP_51_HBS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_HBS_32KB
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS 13
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_abd {
+ ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_ABD_ABD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_lllbd_ctrl {
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK GENMASK(3, 0)
+enum icp_qat_hw_comp_51_search_depth {
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1 = 0x1,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_6 = 0x3,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_9 = 0x4,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_10 = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_BITPOS 5
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0)
+enum icp_qat_hw_comp_51_format {
+ ICP_QAT_HW_COMP_51_FORMAT_ILZ77 = 0x1,
+ ICP_QAT_HW_COMP_51_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_COMP_51_FORMAT_LZ4s = 0x3,
+ ICP_QAT_HW_COMP_51_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_FORMAT_ILZ77
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_min_match_control {
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_collision {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_update {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_BITPOS 1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_byte_skip {
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN = 0x0,
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_LITERAL = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_lz4_block_checksum {
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_BITPOS 26
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_discard_data {
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED = 0x0,
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS 19
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0)
+enum icp_qat_hw_decomp_51_bms {
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB = 0x0,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_256KB = 0x1,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_1MB = 0x2,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_zstd_frame_gen_dec_en {
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0,
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_spec_decoder_internal_only {
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_mini_xcam_internal_only {
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_BITPOS 14
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0)
+enum icp_qat_hw_decomp_51_hbs {
+ ICP_QAT_HW_DECOMP_51_HBS_32KB = 0x0,
+ ICP_QAT_HW_DECOMP_51_HBS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_HBS_32KB
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_BITPOS 5
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0)
+enum icp_qat_hw_decomp_51_format {
+ ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77 = 0x1,
+ ICP_QAT_HW_DECOMP_51_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_DECOMP_51_FORMAT_RESERVED = 0x3,
+ ICP_QAT_HW_DECOMP_51_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_lz4_block_checksum {
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT
+
+#endif /* ICP_QAT_HW_51_COMP_DEFS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index 1c7bcd8e4055..6313c35eff0c 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
@@ -7,6 +7,7 @@
#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000
#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
+#define ICP_QAT_AC_6XXX_DEV_TYPE 0x80000000
#define ICP_QAT_UCLO_MAX_AE 17
#define ICP_QAT_UCLO_MAX_CTX 8
#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
@@ -81,6 +82,21 @@
#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000
#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000
+/* All lengths below are in bytes */
+#define ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN 12
+#define ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN 16
+#define ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN 3540
+#define ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN 64
+#define ICP_QAT_DUALSIGN_XMSS_SIG_LEN 2692
+#define ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN 2696
+#define ICP_QAT_DUALSIGN_MISC_INFO_LEN 16
+#define ICP_QAT_DUALSIGN_FW_TYPE_LEN 7
+#define ICP_QAT_DUALSIGN_MODULE_TYPE 0x14
+#define ICP_QAT_DUALSIGN_HDR_LEN 0x375
+#define ICP_QAT_DUALSIGN_HDR_VER 0x40001
+#define ICP_QAT_DUALSIGN_HDR_LEN_OFFSET 4
+#define ICP_QAT_DUALSIGN_HDR_VER_OFFSET 8
+
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
@@ -440,6 +456,13 @@ struct icp_qat_fw_auth_desc {
unsigned int img_ae_init_data_low;
unsigned int img_ae_insts_high;
unsigned int img_ae_insts_low;
+ unsigned int cpp_mask;
+ unsigned int reserved;
+ unsigned int xmss_pubkey_high;
+ unsigned int xmss_pubkey_low;
+ unsigned int xmss_sig_high;
+ unsigned int xmss_sig_low;
+ unsigned int reserved2[2];
};
struct icp_qat_auth_chunk {
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index a6e02405d402..8b123472b71c 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -8,6 +8,7 @@
#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
+#include "adf_dc.h"
#include "qat_bl.h"
#include "qat_comp_req.h"
#include "qat_compression.h"
@@ -145,9 +146,7 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
return -EINVAL;
ctx->inst = inst;
- ctx->inst->build_deflate_ctx(ctx->comp_ctx);
-
- return 0;
+ return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE);
}
static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
@@ -241,13 +240,13 @@ static struct acomp_alg qat_acomp[] = { {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_reqsize = sizeof(struct qat_compression_req),
.cra_module = THIS_MODULE,
},
.init = qat_comp_alg_init_tfm,
.exit = qat_comp_alg_exit_tfm,
.compress = qat_comp_alg_compress,
.decompress = qat_comp_alg_decompress,
- .reqsize = sizeof(struct qat_compression_req),
}};
int qat_comp_algs_register(void)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 7842a9f22178..c285b45b8679 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -144,7 +144,6 @@ static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
inst->id = i;
atomic_set(&inst->refctr, 0);
inst->accel_dev = accel_dev;
- inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h
index aebac2302dcf..5ced3ed0e5ea 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.h
@@ -20,7 +20,6 @@ struct qat_compression_instance {
atomic_t refctr;
struct qat_instance_backlog backlog;
struct adf_dc_data *dc_data;
- void (*build_deflate_ctx)(void *ctx);
};
static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index ef8a9cf74f0c..da4eca6e1633 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -694,16 +694,17 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->pci_dev = pci_info->pci_dev;
switch (handle->pci_dev->device) {
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
+ case PCI_DEVICE_ID_INTEL_QAT_6XXX:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
- if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID)
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_420XX)
handle->chip_info->icp_rst_mask = 0x100155;
else
handle->chip_info->icp_rst_mask = 0x100015;
@@ -712,6 +713,8 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->wakeup_event_val = 0x80000000;
handle->chip_info->fw_auth = true;
handle->chip_info->css_3k = true;
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX)
+ handle->chip_info->dual_sign = true;
handle->chip_info->tgroup_share_ustore = true;
handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 7678a93c6853..21d652a1c8ef 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
#include <linux/align.h>
+#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci_ids.h>
+#include <linux/wordpart.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
@@ -59,7 +64,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
unsigned int i;
if (!ae_data) {
- pr_err("QAT: bad argument, ae_data is NULL\n");
+ pr_err("bad argument, ae_data is NULL\n");
return -EINVAL;
}
@@ -86,12 +91,11 @@ static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
int min = hdr->min_ver & 0xff;
if (hdr->file_id != ICP_QAT_UOF_FID) {
- pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+ pr_err("Invalid header 0x%x\n", hdr->file_id);
return -EINVAL;
}
if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
- pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad UOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
@@ -103,20 +107,19 @@ static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
int min = suof_hdr->min_ver & 0xff;
if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
- pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+ pr_err("invalid header 0x%x\n", suof_hdr->file_id);
return -EINVAL;
}
if (suof_hdr->fw_type != 0) {
- pr_err("QAT: unsupported firmware type\n");
+ pr_err("unsupported firmware type\n");
return -EINVAL;
}
if (suof_hdr->num_chunks <= 0x1) {
- pr_err("QAT: SUOF chunk amount is incorrect\n");
+ pr_err("SUOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
- pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad SUOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
@@ -223,24 +226,24 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
char *str;
if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
- pr_err("QAT: initmem is out of range");
+ pr_err("initmem is out of range");
return -EINVAL;
}
if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
- pr_err("QAT: Memory scope for init_mem error\n");
+ pr_err("Memory scope for init_mem error\n");
return -EINVAL;
}
str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
if (!str) {
- pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+ pr_err("AE name assigned in UOF init table is NULL\n");
return -EINVAL;
}
if (qat_uclo_parse_num(str, ae)) {
- pr_err("QAT: Parse num for AE number failed\n");
+ pr_err("Parse num for AE number failed\n");
return -EINVAL;
}
if (*ae >= ICP_QAT_UCLO_MAX_AE) {
- pr_err("QAT: ae %d out of range\n", *ae);
+ pr_err("ae %d out of range\n", *ae);
return -EINVAL;
}
return 0;
@@ -356,8 +359,7 @@ static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
break;
default:
- pr_err("QAT: initmem region error. region type=0x%x\n",
- init_mem->region);
+ pr_err("initmem region error. region type=0x%x\n", init_mem->region);
return -EINVAL;
}
return 0;
@@ -431,7 +433,7 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_batch_wr_lm(handle, ae,
obj_handle->lm_init_tab[ae])) {
- pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+ pr_err("fail to batch init lmem for AE %d\n", ae);
return -EINVAL;
}
qat_uclo_cleanup_batch_init_list(handle,
@@ -539,26 +541,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
code_page->imp_expr_tab_offset);
if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
imp_expr_tab->entry_num) {
- pr_err("QAT: UOF can't contain imported variable to be parsed\n");
+ pr_err("UOF can't contain imported variable to be parsed\n");
return -EINVAL;
}
neigh_reg_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_page->neigh_reg_tab_offset);
if (neigh_reg_tab->entry_num) {
- pr_err("QAT: UOF can't contain neighbor register table\n");
+ pr_err("UOF can't contain neighbor register table\n");
return -EINVAL;
}
if (image->numpages > 1) {
- pr_err("QAT: UOF can't contain multiple pages\n");
+ pr_err("UOF can't contain multiple pages\n");
return -EINVAL;
}
if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use shared control store feature\n");
+ pr_err("UOF can't use shared control store feature\n");
return -EFAULT;
}
if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use reloadable feature\n");
+ pr_err("UOF can't use reloadable feature\n");
return -EFAULT;
}
return 0;
@@ -677,7 +679,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
}
}
if (!mflag) {
- pr_err("QAT: uimage uses AE not set\n");
+ pr_err("uimage uses AE not set\n");
return -EINVAL;
}
return 0;
@@ -731,14 +733,15 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C62X_DEV_TYPE;
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
+ case PCI_DEVICE_ID_INTEL_QAT_6XXX:
+ return ICP_QAT_AC_6XXX_DEV_TYPE;
default:
- pr_err("QAT: unsupported device 0x%x\n",
- handle->pci_dev->device);
+ pr_err("unsupported device 0x%x\n", handle->pci_dev->device);
return 0;
}
}
@@ -748,7 +751,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
unsigned int maj_ver, prod_type = obj_handle->prod_type;
if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
- pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+ pr_err("UOF type 0x%x doesn't match with platform 0x%x\n",
obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
prod_type);
return -EINVAL;
@@ -756,7 +759,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
maj_ver = obj_handle->prod_rev & 0xff;
if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
- pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+ pr_err("UOF majVer 0x%x out of range\n", maj_ver);
return -EINVAL;
}
return 0;
@@ -799,7 +802,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_NEIGH_REL:
return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
default:
- pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+ pr_err("UOF uses not supported reg type 0x%x\n", reg_type);
return -EFAULT;
}
return 0;
@@ -835,8 +838,7 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
case ICP_QAT_UOF_INIT_REG_CTX:
/* check if ctx is appropriate for the ctxMode */
if (!((1 << init_regsym->ctx) & ctx_mask)) {
- pr_err("QAT: invalid ctx num = 0x%x\n",
- init_regsym->ctx);
+ pr_err("invalid ctx num = 0x%x\n", init_regsym->ctx);
return -EINVAL;
}
qat_uclo_init_reg(handle, ae,
@@ -848,10 +850,10 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
exp_res);
break;
case ICP_QAT_UOF_INIT_EXPR:
- pr_err("QAT: INIT_EXPR feature not supported\n");
+ pr_err("INIT_EXPR feature not supported\n");
return -EINVAL;
case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
- pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+ pr_err("INIT_EXPR_ENDIAN_SWAP feature not supported\n");
return -EINVAL;
default:
break;
@@ -871,7 +873,7 @@ static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
return 0;
if (obj_handle->init_mem_tab.entry_num) {
if (qat_uclo_init_memory(handle)) {
- pr_err("QAT: initialize memory failed\n");
+ pr_err("initialize memory failed\n");
return -EINVAL;
}
}
@@ -900,40 +902,40 @@ static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+ pr_err("qat_hal_set_ae_ctx_mode error\n");
return ret;
}
if (handle->chip_info->nn) {
mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+ pr_err("qat_hal_set_ae_nn_mode error\n");
return ret;
}
}
mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM0 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM1 error\n");
return ret;
}
if (handle->chip_info->lm2lm3) {
mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM2 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM3 error\n");
return ret;
}
mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
@@ -997,7 +999,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
obj_handle->prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (qat_uclo_check_uof_compat(obj_handle)) {
- pr_err("QAT: UOF incompatible\n");
+ pr_err("UOF incompatible\n");
return -EINVAL;
}
obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
@@ -1008,7 +1010,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
if (!obj_handle->obj_hdr->file_buff ||
!qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
&obj_handle->str_table)) {
- pr_err("QAT: UOF doesn't have effective images\n");
+ pr_err("UOF doesn't have effective images\n");
goto out_err;
}
obj_handle->uimage_num =
@@ -1017,7 +1019,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
if (!obj_handle->uimage_num)
goto out_err;
if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
- pr_err("QAT: Bad object\n");
+ pr_err("Bad object\n");
goto out_check_uof_aemask_err;
}
qat_uclo_init_uword_num(handle);
@@ -1034,6 +1036,36 @@ out_err:
return -EFAULT;
}
+static unsigned int qat_uclo_simg_hdr2sign_len(struct icp_qat_fw_loader_handle *handle)
+{
+ if (handle->chip_info->dual_sign)
+ return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN;
+
+ return ICP_QAT_AE_IMG_OFFSET(handle);
+}
+
+static unsigned int qat_uclo_simg_hdr2cont_len(struct icp_qat_fw_loader_handle *handle)
+{
+ if (handle->chip_info->dual_sign)
+ return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN + ICP_QAT_DUALSIGN_MISC_INFO_LEN;
+
+ return ICP_QAT_AE_IMG_OFFSET(handle);
+}
+
+static unsigned int qat_uclo_simg_fw_type(struct icp_qat_fw_loader_handle *handle, void *img_ptr)
+{
+ struct icp_qat_css_hdr *hdr = img_ptr;
+ char *fw_hdr = img_ptr;
+ unsigned int offset;
+
+ if (handle->chip_info->dual_sign) {
+ offset = qat_uclo_simg_hdr2sign_len(handle) + ICP_QAT_DUALSIGN_FW_TYPE_LEN;
+ return *(fw_hdr + offset);
+ }
+
+ return hdr->fw_type;
+}
+
static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_filehdr *suof_ptr,
int suof_size)
@@ -1050,7 +1082,7 @@ static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
min_ver_offset);
if (check_sum != suof_ptr->check_sum) {
- pr_err("QAT: incorrect SUOF checksum\n");
+ pr_err("incorrect SUOF checksum\n");
return -EINVAL;
}
suof_handle->check_sum = suof_ptr->check_sum;
@@ -1065,9 +1097,9 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
- unsigned int offset = ICP_QAT_AE_IMG_OFFSET(handle);
- struct icp_qat_simg_ae_mode *ae_mode;
+ unsigned int offset = qat_uclo_simg_hdr2cont_len(handle);
struct icp_qat_suof_objhdr *suof_objhdr;
+ struct icp_qat_simg_ae_mode *ae_mode;
suof_img_hdr->simg_buf = (suof_handle->suof_buf +
suof_chunk_hdr->offset +
@@ -1112,14 +1144,13 @@ static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (img_ae_mode->dev_type != prod_type) {
- pr_err("QAT: incompatible product type %x\n",
- img_ae_mode->dev_type);
+ pr_err("incompatible product type %x\n", img_ae_mode->dev_type);
return -EINVAL;
}
maj_ver = prod_rev & 0xff;
if (maj_ver > img_ae_mode->devmax_ver ||
maj_ver < img_ae_mode->devmin_ver) {
- pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+ pr_err("incompatible device majver 0x%x\n", maj_ver);
return -EINVAL;
}
return 0;
@@ -1162,7 +1193,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_img_hdr img_header;
if (!suof_ptr || suof_size == 0) {
- pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+ pr_err("input parameter SUOF pointer/size is NULL\n");
return -EINVAL;
}
if (qat_uclo_check_suof_format(suof_ptr))
@@ -1205,7 +1236,6 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
}
#define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
-#define BITS_IN_DWORD 32
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
@@ -1223,7 +1253,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
- SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
+ SET_CAP_CSR(handle, fcu_dram_hi_csr, bus_addr >> BITS_PER_TYPE(u32));
SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
@@ -1237,7 +1267,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
return 0;
} while (retry++ < FW_AUTH_MAX_RETRY);
auth_fail:
- pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+ pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n",
fcu_sts & FCU_AUTH_STS_MASK, retry);
return -EINVAL;
}
@@ -1273,14 +1303,13 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
fcu_sts_csr = handle->chip_info->fcu_sts_csr;
fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
} else {
- pr_err("Chip 0x%x doesn't support broadcast load\n",
- handle->pci_dev->device);
+ pr_err("Chip 0x%x doesn't support broadcast load\n", handle->pci_dev->device);
return -EINVAL;
}
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
- pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
+ pr_err("Broadcast load failed. AE is not enabled or active.\n");
return -EINVAL;
}
@@ -1312,7 +1341,7 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
- pr_err("QAT: broadcast load failed timeout %d\n", retry);
+ pr_err("broadcast load failed timeout %d\n", retry);
return -EINVAL;
}
}
@@ -1366,24 +1395,38 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
}
static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
- char *image, unsigned int size,
+ void *image, unsigned int size,
unsigned int fw_type)
{
char *fw_type_name = fw_type ? "MMP" : "AE";
unsigned int css_dword_size = sizeof(u32);
+ unsigned int header_len, simg_type;
+ struct icp_qat_css_hdr *css_hdr;
if (handle->chip_info->fw_auth) {
- struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
- unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+ header_len = qat_uclo_simg_hdr2sign_len(handle);
+ simg_type = qat_uclo_simg_fw_type(handle, image);
+ css_hdr = image;
+
+ if (handle->chip_info->dual_sign) {
+ if (css_hdr->module_type != ICP_QAT_DUALSIGN_MODULE_TYPE)
+ goto err;
+ if (css_hdr->header_len != ICP_QAT_DUALSIGN_HDR_LEN)
+ goto err;
+ if (css_hdr->header_ver != ICP_QAT_DUALSIGN_HDR_VER)
+ goto err;
+ } else {
+ if (css_hdr->header_len * css_dword_size != header_len)
+ goto err;
+ if (css_hdr->size * css_dword_size != size)
+ goto err;
+ if (size <= header_len)
+ goto err;
+ }
- if ((css_hdr->header_len * css_dword_size) != header_len)
- goto err;
- if ((css_hdr->size * css_dword_size) != size)
- goto err;
- if (fw_type != css_hdr->fw_type)
- goto err;
- if (size <= header_len)
+ if (fw_type != simg_type)
goto err;
+
size -= header_len;
}
@@ -1397,123 +1440,95 @@ static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
goto err;
} else {
- pr_err("QAT: Unsupported firmware type\n");
+ pr_err("Unsupported firmware type\n");
return -EINVAL;
}
return 0;
err:
- pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+ pr_err("Invalid %s firmware image\n", fw_type_name);
return -EINVAL;
}
-static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
- char *image, unsigned int size,
- struct icp_qat_fw_auth_desc **desc)
+static int qat_uclo_build_auth_desc_RSA(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_firml_dram_desc *dram_desc,
+ unsigned int fw_type, struct icp_qat_fw_auth_desc **desc)
{
struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
- struct icp_qat_fw_auth_desc *auth_desc;
- struct icp_qat_auth_chunk *auth_chunk;
- u64 virt_addr, bus_addr, virt_base;
- unsigned int simg_offset = sizeof(*auth_chunk);
struct icp_qat_simg_ae_mode *simg_ae_mode;
- struct icp_firml_dram_desc img_desc;
- int ret;
-
- ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN);
- if (ret) {
- pr_err("QAT: error, allocate continuous dram fail\n");
- return ret;
- }
-
- if (!IS_ALIGNED(img_desc.dram_size, 8) || !img_desc.dram_bus_addr) {
- pr_debug("QAT: invalid address\n");
- qat_uclo_simg_free(handle, &img_desc);
- return -EINVAL;
- }
+ struct icp_qat_fw_auth_desc *auth_desc;
+ char *virt_addr, *virt_base;
+ u64 bus_addr;
- auth_chunk = img_desc.dram_base_addr_v;
- auth_chunk->chunk_size = img_desc.dram_size;
- auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
- virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
- bus_addr = img_desc.dram_bus_addr + simg_offset;
- auth_desc = img_desc.dram_base_addr_v;
- auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->css_hdr_low = (unsigned int)bus_addr;
+ virt_base = dram_desc->dram_base_addr_v;
+ virt_base += sizeof(struct icp_qat_auth_chunk);
+ bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
+ auth_desc = dram_desc->dram_base_addr_v;
+ auth_desc->css_hdr_high = upper_32_bits(bus_addr);
+ auth_desc->css_hdr_low = lower_32_bits(bus_addr);
virt_addr = virt_base;
- memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+ memcpy(virt_addr, image, sizeof(*css_hdr));
/* pub key */
bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
sizeof(*css_hdr);
virt_addr = virt_addr + sizeof(*css_hdr);
- auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+ auth_desc->fwsk_pub_high = upper_32_bits(bus_addr);
+ auth_desc->fwsk_pub_low = lower_32_bits(bus_addr);
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + sizeof(*css_hdr)),
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+ memcpy(virt_addr, image + sizeof(*css_hdr), ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
/* padding */
memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
/* exponent */
- memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
- (void *)(image + sizeof(*css_hdr) +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
- sizeof(unsigned int));
+ memcpy(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_PAD_LEN(handle), image + sizeof(*css_hdr) +
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle), sizeof(unsigned int));
/* signature */
bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
auth_desc->fwsk_pub_low) +
ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
- auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->signature_low = (unsigned int)bus_addr;
+ auth_desc->signature_high = upper_32_bits(bus_addr);
+ auth_desc->signature_low = lower_32_bits(bus_addr);
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + sizeof(*css_hdr) +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
- ICP_QAT_CSS_SIGNATURE_LEN(handle));
+ memcpy(virt_addr, image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle), ICP_QAT_CSS_SIGNATURE_LEN(handle));
bus_addr = ADD_ADDR(auth_desc->signature_high,
auth_desc->signature_low) +
ICP_QAT_CSS_SIGNATURE_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
- auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->img_low = (unsigned int)bus_addr;
- auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
- if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr +
- ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) {
- pr_err("QAT: insufficient memory size for authentication data\n");
- qat_uclo_simg_free(handle, &img_desc);
+ auth_desc->img_high = upper_32_bits(bus_addr);
+ auth_desc->img_low = lower_32_bits(bus_addr);
+ auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
+ if (bus_addr + auth_desc->img_len >
+ dram_desc->dram_bus_addr + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) {
+ pr_err("insufficient memory size for authentication data\n");
+ qat_uclo_simg_free(handle, dram_desc);
return -ENOMEM;
}
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
- auth_desc->img_len);
+ memcpy(virt_addr, image + qat_uclo_simg_hdr2sign_len(handle), auth_desc->img_len);
virt_addr = virt_base;
/* AE firmware */
- if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
- CSS_AE_FIRMWARE) {
+ if (fw_type == CSS_AE_FIRMWARE) {
auth_desc->img_ae_mode_data_high = auth_desc->img_high;
auth_desc->img_ae_mode_data_low = auth_desc->img_low;
bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
auth_desc->img_ae_mode_data_low) +
sizeof(struct icp_qat_simg_ae_mode);
- auth_desc->img_ae_init_data_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
- auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+ auth_desc->img_ae_init_data_high = upper_32_bits(bus_addr);
+ auth_desc->img_ae_init_data_low = lower_32_bits(bus_addr);
bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
- auth_desc->img_ae_insts_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
- auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+ auth_desc->img_ae_insts_high = upper_32_bits(bus_addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(bus_addr);
virt_addr += sizeof(struct icp_qat_css_hdr);
virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
@@ -1527,6 +1542,141 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
return 0;
}
+static int qat_uclo_build_auth_desc_dualsign(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_firml_dram_desc *dram_desc,
+ unsigned int fw_type,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_qat_simg_ae_mode *simg_ae_mode;
+ struct icp_qat_fw_auth_desc *auth_desc;
+ unsigned int chunk_offset, img_offset;
+ u64 bus_addr, addr;
+ char *virt_addr;
+
+ virt_addr = dram_desc->dram_base_addr_v;
+ virt_addr += sizeof(struct icp_qat_auth_chunk);
+ bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
+
+ auth_desc = dram_desc->dram_base_addr_v;
+ auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
+ auth_desc->css_hdr_high = upper_32_bits(bus_addr);
+ auth_desc->css_hdr_low = lower_32_bits(bus_addr);
+ memcpy(virt_addr, image, ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN);
+
+ img_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN;
+ chunk_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN;
+
+ /* RSA pub key */
+ addr = bus_addr + chunk_offset;
+ auth_desc->fwsk_pub_high = upper_32_bits(addr);
+ auth_desc->fwsk_pub_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
+ /* RSA padding */
+ memset(virt_addr + chunk_offset, 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
+
+ chunk_offset += ICP_QAT_CSS_FWSK_PAD_LEN(handle);
+ /* RSA exponent */
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+ /* RSA signature */
+ addr = bus_addr + chunk_offset;
+ auth_desc->signature_high = upper_32_bits(addr);
+ auth_desc->signature_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_SIGNATURE_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ /* XMSS pubkey */
+ addr = bus_addr + chunk_offset;
+ auth_desc->xmss_pubkey_high = upper_32_bits(addr);
+ auth_desc->xmss_pubkey_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN);
+
+ img_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
+ chunk_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
+ /* XMSS signature */
+ addr = bus_addr + chunk_offset;
+ auth_desc->xmss_sig_high = upper_32_bits(addr);
+ auth_desc->xmss_sig_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_SIG_LEN);
+
+ img_offset += ICP_QAT_DUALSIGN_XMSS_SIG_LEN;
+ chunk_offset += ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN;
+
+ if (dram_desc->dram_size < (chunk_offset + auth_desc->img_len)) {
+ pr_err("auth chunk memory size is not enough to store data\n");
+ return -ENOMEM;
+ }
+
+ /* Signed data */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_high = upper_32_bits(addr);
+ auth_desc->img_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, auth_desc->img_len);
+
+ chunk_offset += ICP_QAT_DUALSIGN_MISC_INFO_LEN;
+ /* AE firmware */
+ if (fw_type == CSS_AE_FIRMWARE) {
+ /* AE mode data */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_mode_data_high = upper_32_bits(addr);
+ auth_desc->img_ae_mode_data_low = lower_32_bits(addr);
+ simg_ae_mode =
+ (struct icp_qat_simg_ae_mode *)(virt_addr + chunk_offset);
+ auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
+
+ chunk_offset += sizeof(struct icp_qat_simg_ae_mode);
+ /* AE init seq */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_init_data_high = upper_32_bits(addr);
+ auth_desc->img_ae_init_data_low = lower_32_bits(addr);
+
+ chunk_offset += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+ /* AE instructions */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_insts_high = upper_32_bits(addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(addr);
+ } else {
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_insts_high = upper_32_bits(addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(addr);
+ }
+ *desc = auth_desc;
+ return 0;
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_qat_auth_chunk *auth_chunk;
+ struct icp_firml_dram_desc img_desc;
+ unsigned int simg_fw_type;
+ int ret;
+
+ ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN);
+ if (ret)
+ return ret;
+
+ simg_fw_type = qat_uclo_simg_fw_type(handle, image);
+ auth_chunk = img_desc.dram_base_addr_v;
+ auth_chunk->chunk_size = img_desc.dram_size;
+ auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+
+ if (handle->chip_info->dual_sign)
+ return qat_uclo_build_auth_desc_dualsign(handle, image, size, &img_desc,
+ simg_fw_type, desc);
+
+ return qat_uclo_build_auth_desc_RSA(handle, image, size, &img_desc,
+ simg_fw_type, desc);
+}
+
static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
@@ -1546,7 +1696,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
if (!((desc->ae_mask >> i) & 0x1))
continue;
if (qat_hal_check_ae_active(handle, i)) {
- pr_err("QAT: AE %d is active\n", i);
+ pr_err("AE %d is active\n", i);
return -EINVAL;
}
SET_CAP_CSR(handle, fcu_ctl_csr,
@@ -1566,7 +1716,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
}
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
- pr_err("QAT: firmware load failed timeout %x\n", retry);
+ pr_err("firmware load failed timeout %x\n", retry);
return -EINVAL;
}
}
@@ -1584,7 +1734,7 @@ static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
handle->sobj_handle = suof_handle;
if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
qat_uclo_del_suof(handle);
- pr_err("QAT: map SUOF failed\n");
+ pr_err("map SUOF failed\n");
return -EINVAL;
}
return 0;
@@ -1608,7 +1758,7 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
if (handle->chip_info->mmp_sram_size < mem_size) {
- pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
+ pr_err("MMP size is too large: 0x%x\n", mem_size);
return -EFBIG;
}
qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
@@ -1634,7 +1784,7 @@ static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
ICP_QAT_UOF_OBJS);
if (!objhdl->obj_hdr) {
- pr_err("QAT: object file chunk is null\n");
+ pr_err("object file chunk is null\n");
goto out_objhdr_err;
}
handle->obj_handle = objhdl;
@@ -1669,7 +1819,7 @@ static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
min_ver_offset);
if (checksum != mof_ptr->checksum) {
- pr_err("QAT: incorrect MOF checksum\n");
+ pr_err("incorrect MOF checksum\n");
return -EINVAL;
}
@@ -1705,7 +1855,7 @@ static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
}
}
- pr_err("QAT: object %s is not found inside MOF\n", obj_name);
+ pr_err("object %s is not found inside MOF\n", obj_name);
return -EINVAL;
}
@@ -1722,7 +1872,7 @@ static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
} else {
- pr_err("QAT: unsupported chunk id\n");
+ pr_err("unsupported chunk id\n");
return -EINVAL;
}
mobj_hdr->obj_buf = obj;
@@ -1783,7 +1933,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
}
if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
- pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
+ pr_err("inconsistent UOF/SUOF chunk amount\n");
return -EINVAL;
}
return 0;
@@ -1824,17 +1974,16 @@ static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
int min = mof_hdr->min_ver & 0xff;
if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
- pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
+ pr_err("invalid header 0x%x\n", mof_hdr->file_id);
return -EINVAL;
}
if (mof_hdr->num_chunks <= 0x1) {
- pr_err("QAT: MOF chunk amount is incorrect\n");
+ pr_err("MOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
- pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad MOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
index 5bf5c890c362..1427fe76f171 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index e48bcf1818cd..5b4bd0ba1ccb 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -4,7 +4,6 @@
#include <adf_admin.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -24,7 +23,6 @@ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
static struct adf_hw_device_class dh895xcc_class = {
.name = ADF_DH895XCC_DEVICE_NAME,
.type = DEV_DH895XCC,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 07e9d7e52861..b59e0cc49e52 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_dh895xcc_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_DH895XCC_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_DH895XCC_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
index 93f9c81edf09..c2fdb6e0f68f 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index f4ee4c2e00da..828456c43b76 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class dh895xcciov_class = {
.name = ADF_DH895XCCVF_DEVICE_NAME,
.type = DEV_DH895XCCVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index fa08f10e6f3f..9c21f5d835d2 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
{
- if (engine->chain.first && engine->chain.last)
+ if (engine->chain_hw.first && engine->chain_hw.last)
return mv_cesa_tdma_process(engine, status);
return mv_cesa_std_process(engine, status);
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index d215a6bed6bc..50ca1039fdaa 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -440,8 +440,10 @@ struct mv_cesa_dev {
* SRAM
* @queue: fifo of the pending crypto requests
* @load: engine load counter, useful for load balancing
- * @chain: list of the current tdma descriptors being processed
- * by this engine.
+ * @chain_hw: list of the current tdma descriptors being processed
+ * by the hardware.
+ * @chain_sw: list of the current tdma descriptors that will be
+ * submitted to the hardware.
* @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
@@ -463,7 +465,8 @@ struct mv_cesa_engine {
struct gen_pool *pool;
struct crypto_queue queue;
atomic_t load;
- struct mv_cesa_tdma_chain chain;
+ struct mv_cesa_tdma_chain chain_hw;
+ struct mv_cesa_tdma_chain chain_sw;
struct list_head complete_queue;
int irq;
};
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index cf62db50f958..48c5c8ea8c43 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -459,6 +459,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_engine *engine;
+ if (!req->cryptlen)
+ return 0;
+
ret = mv_cesa_skcipher_req_init(req, tmpl);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index f150861ceaf6..6815eddc9068 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -663,7 +663,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (ret)
goto err_free_tdma;
- if (iter.src.sg) {
+ if (iter.base.len > iter.src.op_offset) {
/*
* Add all the new data, inserting an operation block and
* launch command between each full SRAM block-worth of
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 388a06e180d6..243305354420 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
{
struct mv_cesa_engine *engine = dreq->engine;
+ spin_lock_bh(&engine->lock);
+ if (engine->chain_sw.first == dreq->chain.first) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
+ }
+ engine->chain_hw.first = dreq->chain.first;
+ engine->chain_hw.last = dreq->chain.last;
+ spin_unlock_bh(&engine->lock);
+
writel_relaxed(0, engine->regs + CESA_SA_CFG);
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
@@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
struct mv_cesa_req *dreq)
{
- if (engine->chain.first == NULL && engine->chain.last == NULL) {
- engine->chain.first = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
- } else {
- struct mv_cesa_tdma_desc *last;
+ struct mv_cesa_tdma_desc *last = engine->chain_sw.last;
- last = engine->chain.last;
+ /*
+ * Break the DMA chain if the request being queued needs the IV
+ * regs to be set before lauching the request.
+ */
+ if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE)
+ engine->chain_sw.first = dreq->chain.first;
+ else {
last->next = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
-
- /*
- * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
- * the last element of the current chain, or if the request
- * being queued needs the IV regs to be set before lauching
- * the request.
- */
- if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
- !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
- last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ }
+ last = dreq->chain.last;
+ engine->chain_sw.last = last;
+ /*
+ * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+ * the last element of the current chain.
+ */
+ if (last->flags & CESA_TDMA_BREAK_CHAIN) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
}
}
@@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
- for (tdma = engine->chain.first; tdma; tdma = next) {
+ for (tdma = engine->chain_hw.first; tdma; tdma = next) {
spin_lock_bh(&engine->lock);
next = tdma->next;
spin_unlock_bh(&engine->lock);
@@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
&backlog);
/* Re-chaining to the next request */
- engine->chain.first = tdma->next;
+ engine->chain_hw.first = tdma->next;
tdma->next = NULL;
/* If this is the last request, clear the chain */
- if (engine->chain.first == NULL)
- engine->chain.last = NULL;
+ if (engine->chain_hw.first == NULL)
+ engine->chain_hw.last = NULL;
spin_unlock_bh(&engine->lock);
ctx = crypto_tfm_ctx(req->tfm);
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 5cae8fafa151..d4aab9e20f2a 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -6,6 +6,7 @@
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include "cn10k_cpt.h"
+#include "otx2_cpt_common.h"
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf);
@@ -27,7 +28,7 @@ static struct cpt_hw_ops cn10k_hw_ops = {
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf)
{
- void __iomem *lmtline = lf->lmtline;
+ void *lmtline = lf->lfs->lmt_info.base + (lf->slot * LMTLINE_SIZE);
u64 val = (lf->slot & 0x7FF);
u64 tar_addr = 0;
@@ -41,15 +42,49 @@ static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
dma_wmb();
/* Copy CPT command to LMTLINE */
- memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+ memcpy(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
cn10k_lmt_flush(val, tar_addr);
}
+void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
+
+ if (!lmt_info->base)
+ return;
+
+ dma_free_attrs(&pdev->dev, lmt_info->size,
+ lmt_info->base - lmt_info->align,
+ lmt_info->iova - lmt_info->align,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_lmtst_free, "CRYPTO_DEV_OCTEONTX2_CPT");
+
+static int cn10k_cpt_lmtst_alloc(struct pci_dev *pdev,
+ struct otx2_cptlfs_info *lfs, u32 size)
+{
+ struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
+ dma_addr_t align_iova;
+ dma_addr_t iova;
+
+ lmt_info->base = dma_alloc_attrs(&pdev->dev, size, &iova, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!lmt_info->base)
+ return -ENOMEM;
+
+ align_iova = ALIGN((u64)iova, LMTLINE_ALIGN);
+ lmt_info->iova = align_iova;
+ lmt_info->align = align_iova - iova;
+ lmt_info->size = size;
+ lmt_info->base += lmt_info->align;
+ return 0;
+}
+
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
{
struct pci_dev *pdev = cptpf->pdev;
- resource_size_t size;
- u64 lmt_base;
+ u32 size;
+ int ret;
if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
cptpf->lfs.ops = &otx2_hw_ops;
@@ -57,18 +92,19 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
}
cptpf->lfs.ops = &cn10k_hw_ops;
- lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
- if (!lmt_base) {
- dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
- return -ENOMEM;
+ size = OTX2_CPT_MAX_VFS_NUM * LMTLINE_SIZE + LMTLINE_ALIGN;
+ ret = cn10k_cpt_lmtst_alloc(pdev, &cptpf->lfs, size);
+ if (ret) {
+ dev_err(&pdev->dev, "PF-%d LMTLINE memory allocation failed\n",
+ cptpf->pf_id);
+ return ret;
}
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
- cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
- if (!cptpf->lfs.lmt_base) {
- dev_err(&pdev->dev,
- "Mapping of PF LMTLINE address failed\n");
- return -ENOMEM;
+
+ ret = otx2_cpt_lmtst_tbl_setup_msg(&cptpf->lfs);
+ if (ret) {
+ dev_err(&pdev->dev, "PF-%d: LMTST Table setup failed\n",
+ cptpf->pf_id);
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
}
return 0;
@@ -78,18 +114,25 @@ EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
- resource_size_t offset, size;
+ u32 size;
+ int ret;
if (!test_bit(CN10K_LMTST, &cptvf->cap_flag))
return 0;
- offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- /* Map VF LMILINE region */
- cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
- if (!cptvf->lfs.lmt_base) {
- dev_err(&pdev->dev, "Unable to map BAR4\n");
- return -ENOMEM;
+ size = cptvf->lfs.lfs_num * LMTLINE_SIZE + LMTLINE_ALIGN;
+ ret = cn10k_cpt_lmtst_alloc(pdev, &cptvf->lfs, size);
+ if (ret) {
+ dev_err(&pdev->dev, "VF-%d LMTLINE memory allocation failed\n",
+ cptvf->vf_id);
+ return ret;
+ }
+
+ ret = otx2_cpt_lmtst_tbl_setup_msg(&cptvf->lfs);
+ if (ret) {
+ dev_err(&pdev->dev, "VF-%d: LMTST Table setup failed\n",
+ cptvf->vf_id);
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
}
return 0;
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
index 92be3ecf570f..ea5990048c21 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -50,6 +50,7 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs);
void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval);
int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index c5b7c57574ef..d529bcb03775 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -145,11 +145,8 @@ static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
static inline bool is_dev_otx2(struct pci_dev *pdev)
{
- if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
- pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID)
- return true;
-
- return false;
+ return pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
+ pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID;
}
static inline bool is_dev_cn10ka(struct pci_dev *pdev)
@@ -159,12 +156,10 @@ static inline bool is_dev_cn10ka(struct pci_dev *pdev)
static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev)
{
- if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
- ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 ||
- (pdev->revision & 0xff) == 0x51))
- return true;
-
- return false;
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ ((pdev->revision & 0xFF) == 4 ||
+ (pdev->revision & 0xFF) == 0x50 ||
+ (pdev->revision & 0xFF) == 0x51);
}
static inline bool is_dev_cn10kb(struct pci_dev *pdev)
@@ -174,11 +169,8 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
{
- if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
- (pdev->revision & 0xFF) == 0x54)
- return true;
-
- return false;
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0xFF) == 0x54;
}
static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
@@ -192,18 +184,12 @@ static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev)
{
- if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev))
- return true;
-
- return false;
+ return is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev);
}
static inline bool cpt_feature_sgv2(struct pci_dev *pdev)
{
- if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev))
- return true;
-
- return false;
+ return !is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev);
}
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
@@ -223,5 +209,6 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot);
+int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs);
#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index b8b7c8a3c0ca..95f3de3a34eb 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -255,3 +255,28 @@ int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
return ret;
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
+
+int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct lmtst_tbl_setup_req *req;
+
+ req = (struct lmtst_tbl_setup_req *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ dev_err(&pdev->dev, "RVU MBOX failed to alloc message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_LMTST_TBL_SETUP;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+
+ req->use_local_lmt_region = true;
+ req->lmt_iova = lfs->lmt_info.iova;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_lmtst_tbl_setup_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index b5d66afcc030..dc7c7a2650a5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -433,10 +433,7 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
for (slot = 0; slot < lfs->lfs_num; slot++) {
lfs->lf[slot].lfs = lfs;
lfs->lf[slot].slot = slot;
- if (lfs->lmt_base)
- lfs->lf[slot].lmtline = lfs->lmt_base +
- (slot * LMTLINE_SIZE);
- else
+ if (!lfs->lmt_info.base)
lfs->lf[slot].lmtline = lfs->reg_base +
OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
OTX2_CPT_LMT_LF_LMTLINEX(0));
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index bd8604be2952..6e004a5568d8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -105,11 +105,19 @@ struct cpt_hw_ops {
gfp_t gfp);
};
+#define LMTLINE_SIZE 128
+#define LMTLINE_ALIGN 128
+struct otx2_lmt_info {
+ void *base;
+ dma_addr_t iova;
+ u32 size;
+ u8 align;
+};
+
struct otx2_cptlfs_info {
/* Registers start address of VF/PF LFs are attached to */
void __iomem *reg_base;
-#define LMTLINE_SIZE 128
- void __iomem *lmt_base;
+ struct otx2_lmt_info lmt_info;
struct pci_dev *pdev; /* Device LFs are attached to */
struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
struct otx2_mbox *mbox;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 12971300296d..1c5c262af48d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -639,6 +639,12 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* Disable all cores */
ret = otx2_cpt_disable_all_cores(cptpf);
+ otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ if (cptpf->has_cpt1)
+ otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
+ cptpf->reg_base, &cptpf->afpf_mbox,
+ BLKADDR_CPT1);
return ret;
}
@@ -786,19 +792,19 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
cptpf->kvf_limits = 1;
- err = cn10k_cptpf_lmtst_init(cptpf);
+ /* Initialize CPT PF device */
+ err = cptpf_device_init(cptpf);
if (err)
goto unregister_intr;
- /* Initialize CPT PF device */
- err = cptpf_device_init(cptpf);
+ err = cn10k_cptpf_lmtst_init(cptpf);
if (err)
goto unregister_intr;
/* Initialize engine groups */
err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
if (err)
- goto unregister_intr;
+ goto free_lmtst;
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
if (err)
@@ -814,6 +820,8 @@ sysfs_grp_del:
sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
cleanup_eng_grps:
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+free_lmtst:
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
unregister_intr:
cptpf_disable_afpf_mbox_intr(cptpf);
destroy_afpf_mbox:
@@ -848,6 +856,8 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
cptpf_disable_afpf_mbox_intr(cptpf);
/* Destroy AF-PF mbox */
cptpf_afpf_mbox_destroy(cptpf);
+ /* Free LMTST memory */
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index ec1ac7e836a3..12c0e966fa65 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -264,8 +264,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
return -ENOENT;
}
- otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
- &cptpf->afpf_mbox, BLKADDR_CPT0);
cptpf->lfs.global_slot = 0;
cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen;
@@ -278,9 +276,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
if (cptpf->has_cpt1) {
cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
- otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
- cptpf->reg_base, &cptpf->afpf_mbox,
- BLKADDR_CPT1);
cptpf->cpt1_lfs.global_slot = num_lfs;
cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen;
@@ -507,6 +502,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
case MBOX_MSG_CPT_LF_RESET:
+ case MBOX_MSG_LMTST_TBL_SETUP:
break;
default:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 42c5484ce66a..78367849c3d5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1513,8 +1513,6 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
if (ret)
goto delete_grps;
- otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
- &cptpf->afpf_mbox, BLKADDR_CPT0);
ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index d84eebdf2fa8..56904bdfd6e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -283,8 +283,6 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
lfs_num = cptvf->lfs.kvf_limits;
- otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
- &cptvf->pfvf_mbox, cptvf->blkaddr);
ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
lfs_num);
if (ret)
@@ -378,10 +376,6 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
- ret = cn10k_cptvf_lmtst_init(cptvf);
- if (ret)
- goto clear_drvdata;
-
/* Initialize PF<=>VF mailbox */
ret = cptvf_pfvf_mbox_init(cptvf);
if (ret)
@@ -396,6 +390,9 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
cptvf_hw_ops_get(cptvf);
+ otx2_cptlf_set_dev_info(&cptvf->lfs, cptvf->pdev, cptvf->reg_base,
+ &cptvf->pfvf_mbox, cptvf->blkaddr);
+
ret = otx2_cptvf_send_caps_msg(cptvf);
if (ret) {
dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n");
@@ -404,13 +401,19 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35))
cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create;
+ ret = cn10k_cptvf_lmtst_init(cptvf);
+ if (ret)
+ goto unregister_interrupts;
+
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
- goto unregister_interrupts;
+ goto free_lmtst;
return 0;
+free_lmtst:
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
unregister_interrupts:
cptvf_disable_pfvf_mbox_intrs(cptvf);
destroy_pfvf_mbox:
@@ -434,6 +437,8 @@ static void otx2_cptvf_remove(struct pci_dev *pdev)
cptvf_disable_pfvf_mbox_intrs(cptvf);
/* Destroy PF-VF mbox */
cptvf_pfvf_mbox_destroy(cptvf);
+ /* Free LMTST memory */
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index d9fa5f6e204d..931b72580fd9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -134,6 +134,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
sizeof(cptvf->eng_caps));
break;
case MBOX_MSG_CPT_LF_RESET:
+ case MBOX_MSG_LMTST_TBL_SETUP:
break;
default:
dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 0e440f704a8f..35fa5bad1d9f 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index dfa3ad1a12f2..709b3ee74657 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -9,10 +9,12 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 502a565074e9..4039cf3b22d4 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index eb5c8f689360..bf465d824e2c 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -7,13 +7,14 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
-#include <crypto/internal/hash.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -21,8 +22,6 @@
struct xcbc_state {
u8 state[AES_BLOCK_SIZE];
- unsigned int count;
- u8 buffer[AES_BLOCK_SIZE];
};
static int nx_xcbc_set_key(struct crypto_shash *desc,
@@ -58,7 +57,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
*/
static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u8 keys[2][AES_BLOCK_SIZE];
@@ -135,9 +134,9 @@ out:
return rc;
}
-static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
int err;
@@ -166,31 +165,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg;
- u32 to_process = 0, leftover, total;
unsigned int max_sg_len;
unsigned long irq_flags;
+ u32 to_process, total;
int rc = 0;
int data_len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ memcpy(csbcpb->cpb.aes_xcbc.out_cv_mac, sctx->state, AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- total = sctx->count + len;
-
- /* 2 cases for total data len:
- * 1: <= AES_BLOCK_SIZE: copy into state, return 0
- * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
- */
- if (total <= AES_BLOCK_SIZE) {
- memcpy(sctx->buffer + sctx->count, data, len);
- sctx->count += len;
- goto out;
- }
+ total = len;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
@@ -200,7 +192,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
data_len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, nx_ctx->ap->sglen);
+ &data_len, nx_ctx->ap->sglen);
if (data_len != AES_BLOCK_SIZE) {
rc = -EINVAL;
@@ -210,56 +202,21 @@ static int nx_xcbc_update(struct shash_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
do {
- to_process = total - to_process;
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
-
- leftover = total - to_process;
-
- /* the hardware will not accept a 0 byte operation for this
- * algorithm and the operation MUST be finalized to be correct.
- * So if we happen to get an update that falls on a block sized
- * boundary, we must save off the last block to finalize with
- * later. */
- if (!leftover) {
- to_process -= AES_BLOCK_SIZE;
- leftover = AES_BLOCK_SIZE;
- }
-
- if (sctx->count) {
- data_len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
- (u8 *) sctx->buffer,
- &data_len,
- max_sg_len);
- if (data_len != sctx->count) {
- rc = -EINVAL;
- goto out;
- }
- }
+ to_process = total & ~(AES_BLOCK_SIZE - 1);
- data_len = to_process - sctx->count;
in_sg = nx_build_sg_list(in_sg,
(u8 *) data,
- &data_len,
+ &to_process,
max_sg_len);
- if (data_len != to_process - sctx->count) {
- rc = -EINVAL;
- goto out;
- }
-
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
/* we've hit the nx chip previously and we're updating again,
* so copy over the partial digest */
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac,
- AES_BLOCK_SIZE);
- }
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -271,28 +228,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
atomic_inc(&(nx_ctx->stats->aes_ops));
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
-
total -= to_process;
- data += to_process - sctx->count;
- sctx->count = 0;
+ data += to_process;
in_sg = nx_ctx->in_sg;
- } while (leftover > AES_BLOCK_SIZE);
+ } while (total >= AES_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- memcpy(sctx->buffer, data, leftover);
- sctx->count = leftover;
+ rc = total;
+ memcpy(sctx->state, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
+static int nx_xcbc_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -301,12 +254,10 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- } else if (sctx->count == 0) {
+ if (nbytes) {
+ /* non-zero final, so copy over the partial digest */
+ memcpy(csbcpb->cpb.aes_xcbc.cv, sctx->state, AES_BLOCK_SIZE);
+ } else {
/*
* we've never seen an update, so this is a 0 byte op. The
* hardware cannot handle a 0 byte op, so just ECB to
@@ -320,11 +271,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
- &len, nx_ctx->ap->sglen);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len,
+ nx_ctx->ap->sglen);
- if (len != sctx->count) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -362,18 +313,19 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.digestsize = AES_BLOCK_SIZE,
.init = nx_xcbc_init,
.update = nx_xcbc_update,
- .final = nx_xcbc_final,
+ .finup = nx_xcbc_finup,
.setkey = nx_xcbc_set_key,
.descsize = sizeof(struct xcbc_state),
- .statesize = sizeof(struct xcbc_state),
+ .init_tfm = nx_crypto_ctx_aes_xcbc_init2,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.base = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_aes_xcbc_init2,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index c3bebf0feabe..5b29dd026df2 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -9,9 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
-#include <asm/byteorder.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -19,12 +22,11 @@
struct sha256_state_be {
__be32 state[SHA256_DIGEST_SIZE / 4];
u64 count;
- u8 buf[SHA256_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha256_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -40,11 +42,10 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
return 0;
}
-static int nx_sha256_init(struct shash_desc *desc) {
+static int nx_sha256_init(struct shash_desc *desc)
+{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1);
sctx->state[2] = __cpu_to_be32(SHA256_H2);
@@ -61,30 +62,18 @@ static int nx_sha256_init(struct shash_desc *desc) {
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count % SHA256_BLOCK_SIZE) + len;
- if (total < SHA256_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,41 +94,17 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len,
- max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA256_BLOCK_SIZE - 1);
- /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -162,26 +127,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha256_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count += to_process;
} while (leftover >= SHA256_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
-
- sctx->count += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+static int nx_sha256_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -197,25 +158,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count >= SHA256_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ sctx->count += nbytes;
csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
- len = sctx->count & (SHA256_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
- &len, max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -251,18 +206,34 @@ out:
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u32 *u32;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ put_unaligned(be32_to_cpu(sctx->state[i]), p.u32++);
+ put_unaligned(sctx->count, p.u64++);
return 0;
}
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u32 *u32;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ sctx->state[i] = cpu_to_be32(get_unaligned(p.u32++));
+ sctx->count = get_unaligned(p.u64++);
return 0;
}
@@ -270,19 +241,20 @@ struct shash_alg nx_shash_sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = nx_sha256_init,
.update = nx_sha256_update,
- .final = nx_sha256_final,
+ .finup = nx_sha256_finup,
.export = nx_sha256_export,
.import = nx_sha256_import,
+ .init_tfm = nx_crypto_ctx_sha256_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha256_state_be),
.statesize = sizeof(struct sha256_state_be),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha256_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 1ffb40d2c324..f74776b7d7d7 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -9,8 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -18,12 +22,11 @@
struct sha512_state_be {
__be64 state[SHA512_DIGEST_SIZE / 8];
u64 count[2];
- u8 buf[SHA512_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha512_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -43,8 +46,6 @@ static int nx_sha512_init(struct shash_desc *desc)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1);
sctx->state[2] = __cpu_to_be64(SHA512_H2);
@@ -54,6 +55,7 @@ static int nx_sha512_init(struct shash_desc *desc)
sctx->state[6] = __cpu_to_be64(SHA512_H6);
sctx->state[7] = __cpu_to_be64(SHA512_H7);
sctx->count[0] = 0;
+ sctx->count[1] = 0;
return 0;
}
@@ -61,30 +63,18 @@ static int nx_sha512_init(struct shash_desc *desc)
static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
- if (total < SHA512_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count[0] += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,45 +95,17 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len, max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA512_BLOCK_SIZE - 1);
- /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- if (data_len != (to_process - buf_len)) {
- rc = -EINVAL;
- goto out;
- }
-
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -166,30 +128,29 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha512_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count[0] += to_process;
+ if (sctx->count[0] < to_process)
+ sctx->count[1]++;
} while (leftover >= SHA512_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
- sctx->count[0] += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+static int nx_sha512_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u32 max_sg_len;
- u64 count0;
unsigned long irq_flags;
+ u64 count0, count1;
int rc = 0;
int len;
@@ -201,30 +162,23 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
- SHA512_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
-
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- count0 = sctx->count[0] * 8;
+ count0 = sctx->count[0] + nbytes;
+ count1 = sctx->count[1];
- csbcpb->cpb.sha512.message_bit_length_lo = count0;
+ csbcpb->cpb.sha512.message_bit_length_lo = count0 << 3;
+ csbcpb->cpb.sha512.message_bit_length_hi = (count1 << 3) |
+ (count0 >> 61);
- len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
- max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -246,7 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
+ atomic64_add(count0, &(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
@@ -257,18 +211,34 @@ out:
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ put_unaligned(be64_to_cpu(sctx->state[i]), p.u64++);
+ put_unaligned(sctx->count[0], p.u64++);
+ put_unaligned(sctx->count[1], p.u64++);
return 0;
}
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ sctx->state[i] = cpu_to_be64(get_unaligned(p.u64++));
+ sctx->count[0] = get_unaligned(p.u64++);
+ sctx->count[1] = get_unaligned(p.u64++);
return 0;
}
@@ -276,19 +246,20 @@ struct shash_alg nx_shash_sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE,
.init = nx_sha512_init,
.update = nx_sha512_update,
- .final = nx_sha512_final,
+ .finup = nx_sha512_finup,
.export = nx_sha512_export,
.import = nx_sha512_import,
+ .init_tfm = nx_crypto_ctx_sha512_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha512_state_be),
.statesize = sizeof(struct sha512_state_be),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha512_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index a3b979193d9b..78135fb13f5c 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -7,11 +7,11 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
+#include <crypto/aes.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
-#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/sha2.h>
-#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -124,8 +124,6 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
}
if ((sg - sg_head) == sgmax) {
- pr_err("nx: scatter/gather list overflow, pid: %d\n",
- current->pid);
sg++;
break;
}
@@ -702,14 +700,14 @@ int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
NX_MODE_AES_ECB);
}
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
}
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_AES,
NX_MODE_AES_XCBC_MAC);
}
@@ -744,6 +742,11 @@ void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
kfree_sensitive(nx_ctx->kmem);
}
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm)
+{
+ nx_crypto_ctx_exit(crypto_shash_ctx(tfm));
+}
+
static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
{
dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index e1b4b6927bec..36974f08490a 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -3,7 +3,11 @@
#ifndef __NX_H__
#define __NX_H__
+#include <asm/vio.h>
#include <crypto/ctr.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
@@ -139,19 +143,20 @@ struct nx_crypto_ctx {
} priv;
};
-struct crypto_aead;
+struct scatterlist;
/* prototypes */
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm);
int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 551dd32a8db0..1ecf5f6ac04e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1086,10 +1086,7 @@ static struct attribute *omap_aes_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_aes_attr_group = {
- .attrs = omap_aes_attrs,
-};
+ATTRIBUTE_GROUPS(omap_aes);
static int omap_aes_probe(struct platform_device *pdev)
{
@@ -1215,12 +1212,6 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_aead_algs;
- }
-
return 0;
err_aead_algs:
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
@@ -1277,8 +1268,6 @@ static void omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
}
#ifdef CONFIG_PM_SLEEP
@@ -1304,6 +1293,7 @@ static struct platform_driver omap_aes_driver = {
.name = "omap-aes",
.pm = &omap_aes_pm_ops,
.of_match_table = omap_aes_of_match,
+ .dev_groups = omap_aes_groups,
},
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 7021481bf027..56f192cb976d 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2039,10 +2039,7 @@ static struct attribute *omap_sham_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_sham_attr_group = {
- .attrs = omap_sham_attrs,
-};
+ATTRIBUTE_GROUPS(omap_sham);
static int omap_sham_probe(struct platform_device *pdev)
{
@@ -2158,12 +2155,6 @@ static int omap_sham_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_algs;
- }
-
return 0;
err_algs:
@@ -2210,8 +2201,6 @@ static void omap_sham_remove(struct platform_device *pdev)
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
}
static struct platform_driver omap_sham_driver = {
@@ -2220,6 +2209,7 @@ static struct platform_driver omap_sham_driver = {
.driver = {
.name = "omap-sham",
.of_match_table = omap_sham_of_match,
+ .dev_groups = omap_sham_groups,
},
};
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index db9e84c0c9fb..329f60ad422e 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -7,59 +7,89 @@
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
*/
+#include <asm/cpu_device_id.h>
#include <crypto/internal/hash.h>
#include <crypto/padlock.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
+#include <linux/cpufeature.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-#include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
+#include <linux/module.h>
-struct padlock_sha_desc {
- struct shash_desc fallback;
-};
+#define PADLOCK_SHA_DESCSIZE (128 + ((PADLOCK_ALIGNMENT - 1) & \
+ ~(CRYPTO_MINALIGN - 1)))
struct padlock_sha_ctx {
- struct crypto_shash *fallback;
+ struct crypto_ahash *fallback;
};
-static int padlock_sha_init(struct shash_desc *desc)
+static inline void *padlock_shash_desc_ctx(struct shash_desc *desc)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ return PTR_ALIGN(shash_desc_ctx(desc), PADLOCK_ALIGNMENT);
+}
+
+static int padlock_sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = padlock_shash_desc_ctx(desc);
+
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+ };
+
+ return 0;
+}
+
+static int padlock_sha256_init(struct shash_desc *desc)
+{
+ struct crypto_sha256_state *sctx = padlock_shash_desc_ctx(desc);
- dctx->fallback.tfm = ctx->fallback;
- return crypto_shash_init(&dctx->fallback);
+ sha256_block_init(sctx);
+ return 0;
}
static int padlock_sha_update(struct shash_desc *desc,
const u8 *data, unsigned int length)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ u8 *state = padlock_shash_desc_ctx(desc);
+ struct crypto_shash *tfm = desc->tfm;
+ int err, remain;
+
+ remain = length - round_down(length, crypto_shash_blocksize(tfm));
+ {
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(tfm);
+ HASH_REQUEST_ON_STACK(req, ctx->fallback);
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, data, NULL, length - remain);
+ err = crypto_ahash_import_core(req, state) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export_core(req, state);
+ HASH_REQUEST_ZERO(req);
+ }
- return crypto_shash_update(&dctx->fallback, data, length);
+ return err ?: remain;
}
static int padlock_sha_export(struct shash_desc *desc, void *out)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_export(&dctx->fallback, out);
+ memcpy(out, padlock_shash_desc_ctx(desc),
+ crypto_shash_coresize(desc->tfm));
+ return 0;
}
static int padlock_sha_import(struct shash_desc *desc, const void *in)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+ unsigned int ss = crypto_shash_coresize(desc->tfm);
+ u64 *state = padlock_shash_desc_ctx(desc);
+
+ memcpy(state, in, ss);
+
+ /* Stop evil imports from generating a fault. */
+ state[ss / 8 - 1] &= ~(bs - 1);
- dctx->fallback.tfm = ctx->fallback;
- return crypto_shash_import(&dctx->fallback, in);
+ return 0;
}
static inline void padlock_output_block(uint32_t *src,
@@ -69,65 +99,38 @@ static inline void padlock_output_block(uint32_t *src,
*dst++ = swab32(*src++);
}
+static int padlock_sha_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
+{
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ HASH_REQUEST_ON_STACK(req, ctx->fallback);
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, in, out, count);
+ return crypto_ahash_import_core(req, padlock_shash_desc_ctx(desc)) ?:
+ crypto_ahash_finup(req);
+}
+
static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct sha1_state state;
- unsigned int space;
- unsigned int leftover;
- int err;
-
- err = crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
+ struct sha1_state *state = padlock_shash_desc_ctx(desc);
+ u64 start = state->count;
- if (state.count + count > ULONG_MAX)
- return crypto_shash_finup(&dctx->fallback, in, count, out);
-
- leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
- space = SHA1_BLOCK_SIZE - leftover;
- if (space) {
- if (count > space) {
- err = crypto_shash_update(&dctx->fallback, in, space) ?:
- crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
- count -= space;
- in += space;
- } else {
- memcpy(state.buffer + leftover, in, count);
- in = state.buffer;
- count += leftover;
- state.count &= ~(SHA1_BLOCK_SIZE - 1);
- }
- }
-
- memcpy(result, &state.state, SHA1_DIGEST_SIZE);
+ if (start + count > ULONG_MAX)
+ return padlock_sha_finup(desc, in, count, out);
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
- : "c"((unsigned long)state.count + count), \
- "a"((unsigned long)state.count), \
- "S"(in), "D"(result));
-
- padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+ : "c"((unsigned long)start + count), \
+ "a"((unsigned long)start), \
+ "S"(in), "D"(state));
-out:
- return err;
-}
-
-static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
-{
- const u8 *buf = (void *)desc;
-
- return padlock_sha1_finup(desc, buf, 0, out);
+ padlock_output_block(state->state, (uint32_t *)out, 5);
+ return 0;
}
static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
@@ -136,78 +139,46 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct sha256_state state;
- unsigned int space;
- unsigned int leftover;
- int err;
-
- err = crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
+ struct sha256_state *state = padlock_shash_desc_ctx(desc);
+ u64 start = state->count;
- if (state.count + count > ULONG_MAX)
- return crypto_shash_finup(&dctx->fallback, in, count, out);
-
- leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
- space = SHA256_BLOCK_SIZE - leftover;
- if (space) {
- if (count > space) {
- err = crypto_shash_update(&dctx->fallback, in, space) ?:
- crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
- count -= space;
- in += space;
- } else {
- memcpy(state.buf + leftover, in, count);
- in = state.buf;
- count += leftover;
- state.count &= ~(SHA1_BLOCK_SIZE - 1);
- }
- }
-
- memcpy(result, &state.state, SHA256_DIGEST_SIZE);
+ if (start + count > ULONG_MAX)
+ return padlock_sha_finup(desc, in, count, out);
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
- : "c"((unsigned long)state.count + count), \
- "a"((unsigned long)state.count), \
- "S"(in), "D"(result));
+ : "c"((unsigned long)start + count), \
+ "a"((unsigned long)start), \
+ "S"(in), "D"(state));
- padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
-
-out:
- return err;
-}
-
-static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
-{
- const u8 *buf = (void *)desc;
-
- return padlock_sha256_finup(desc, buf, 0, out);
+ padlock_output_block(state->state, (uint32_t *)out, 8);
+ return 0;
}
static int padlock_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
- struct crypto_shash *fallback_tfm;
+ struct crypto_ahash *fallback_tfm;
/* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
return PTR_ERR(fallback_tfm);
}
+ if (crypto_shash_statesize(hash) !=
+ crypto_ahash_statesize(fallback_tfm)) {
+ crypto_free_ahash(fallback_tfm);
+ return -EINVAL;
+ }
+
ctx->fallback = fallback_tfm;
- hash->descsize += crypto_shash_descsize(fallback_tfm);
+
return 0;
}
@@ -215,26 +186,27 @@ static void padlock_exit_tfm(struct crypto_shash *hash)
{
struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
- crypto_free_shash(ctx->fallback);
+ crypto_free_ahash(ctx->fallback);
}
static struct shash_alg sha1_alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = padlock_sha_init,
+ .init = padlock_sha1_init,
.update = padlock_sha_update,
.finup = padlock_sha1_finup,
- .final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
.init_tfm = padlock_init_tfm,
.exit_tfm = padlock_exit_tfm,
- .descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha1_state),
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -243,21 +215,22 @@ static struct shash_alg sha1_alg = {
static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
- .init = padlock_sha_init,
+ .init = padlock_sha256_init,
.update = padlock_sha_update,
.finup = padlock_sha256_finup,
- .final = padlock_sha256_final,
+ .init_tfm = padlock_init_tfm,
.export = padlock_sha_export,
.import = padlock_sha_import,
- .init_tfm = padlock_init_tfm,
.exit_tfm = padlock_exit_tfm,
- .descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha256_state),
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = sizeof(struct crypto_sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -266,207 +239,58 @@ static struct shash_alg sha256_alg = {
/* Add two shash_alg instance for hardware-implemented *
* multiple-parts hash supported by VIA Nano Processor.*/
-static int padlock_sha1_init_nano(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
-
- return 0;
-}
static int padlock_sha1_update_nano(struct shash_desc *desc,
- const u8 *data, unsigned int len)
+ const u8 *src, unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
- u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
- memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
-
- if ((partial + len) >= SHA1_BLOCK_SIZE) {
-
- /* Append the bytes in state's buffer to a block to handle */
- if (partial) {
- done = -partial;
- memcpy(sctx->buffer + partial, data,
- done + SHA1_BLOCK_SIZE);
- src = sctx->buffer;
- asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
- : "+S"(src), "+D"(dst) \
- : "a"((long)-1), "c"((unsigned long)1));
- done += SHA1_BLOCK_SIZE;
- src = data + done;
- }
-
- /* Process the left bytes from the input data */
- if (len - done >= SHA1_BLOCK_SIZE) {
- asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1),
- "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
- done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
- src = data + done;
- }
- partial = 0;
- }
- memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
- memcpy(sctx->buffer + partial, src, len - done);
-
- return 0;
-}
-
-static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
- unsigned int partial, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(state->count << 3);
-
- /* Pad out to 56 mod 64 */
- partial = state->count & 0x3f;
- padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
- padlock_sha1_update_nano(desc, padding, padlen);
-
- /* Append length field bytes */
- padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Swap to output */
- padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
-
- return 0;
-}
-
-static int padlock_sha256_init_nano(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha256_state){
- .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
- };
-
- return 0;
+ struct sha1_state *state = padlock_shash_desc_ctx(desc);
+ int blocks = len / SHA1_BLOCK_SIZE;
+
+ len -= blocks * SHA1_BLOCK_SIZE;
+ state->count += blocks * SHA1_BLOCK_SIZE;
+
+ /* Process the left bytes from the input data */
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
+ : "+S"(src), "+D"(state)
+ : "a"((long)-1),
+ "c"((unsigned long)blocks));
+ return len;
}
-static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
+static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
- u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
- memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
-
- if ((partial + len) >= SHA256_BLOCK_SIZE) {
-
- /* Append the bytes in state's buffer to a block to handle */
- if (partial) {
- done = -partial;
- memcpy(sctx->buf + partial, data,
- done + SHA256_BLOCK_SIZE);
- src = sctx->buf;
- asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1), "c"((unsigned long)1));
- done += SHA256_BLOCK_SIZE;
- src = data + done;
- }
-
- /* Process the left bytes from input data*/
- if (len - done >= SHA256_BLOCK_SIZE) {
- asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1),
- "c"((unsigned long)((len - done) / 64)));
- done += ((len - done) - (len - done) % 64);
- src = data + done;
- }
- partial = 0;
- }
- memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
- memcpy(sctx->buf + partial, src, len - done);
-
- return 0;
-}
-
-static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *state =
- (struct sha256_state *)shash_desc_ctx(desc);
- unsigned int partial, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(state->count << 3);
-
- /* Pad out to 56 mod 64 */
- partial = state->count & 0x3f;
- padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
- padlock_sha256_update_nano(desc, padding, padlen);
-
- /* Append length field bytes */
- padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Swap to output */
- padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
-
- return 0;
-}
-
-static int padlock_sha_export_nano(struct shash_desc *desc,
- void *out)
-{
- int statesize = crypto_shash_statesize(desc->tfm);
- void *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, statesize);
- return 0;
-}
-
-static int padlock_sha_import_nano(struct shash_desc *desc,
- const void *in)
-{
- int statesize = crypto_shash_statesize(desc->tfm);
- void *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, statesize);
- return 0;
+ struct crypto_sha256_state *state = padlock_shash_desc_ctx(desc);
+ int blocks = len / SHA256_BLOCK_SIZE;
+
+ len -= blocks * SHA256_BLOCK_SIZE;
+ state->count += blocks * SHA256_BLOCK_SIZE;
+
+ /* Process the left bytes from input data*/
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
+ : "+S"(src), "+D"(state)
+ : "a"((long)-1),
+ "c"((unsigned long)blocks));
+ return len;
}
static struct shash_alg sha1_alg_nano = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = padlock_sha1_init_nano,
+ .init = padlock_sha1_init,
.update = padlock_sha1_update_nano,
- .final = padlock_sha1_final_nano,
- .export = padlock_sha_export_nano,
- .import = padlock_sha_import_nano,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = padlock_sha1_finup,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -474,17 +298,19 @@ static struct shash_alg sha1_alg_nano = {
static struct shash_alg sha256_alg_nano = {
.digestsize = SHA256_DIGEST_SIZE,
- .init = padlock_sha256_init_nano,
+ .init = padlock_sha256_init,
.update = padlock_sha256_update_nano,
- .final = padlock_sha256_final_nano,
- .export = padlock_sha_export_nano,
- .import = padlock_sha_import_nano,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
+ .finup = padlock_sha256_finup,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = sizeof(struct crypto_sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 69d6019d8abc..d6928ebe9526 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -52,12 +52,11 @@ static int rk_ahash_digest_fb(struct ahash_request *areq)
algt->stat_fb++;
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -124,8 +123,9 @@ static int rk_ahash_init(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -137,10 +137,10 @@ static int rk_ahash_update(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -152,9 +152,10 @@ static int rk_ahash_final(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -166,12 +167,11 @@ static int rk_ahash_finup(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -183,8 +183,9 @@ static int rk_ahash_import(struct ahash_request *req, const void *in)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -196,8 +197,9 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index b4c3c14dafd5..b829c84f60f2 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -9,11 +9,17 @@
//
// Hash part based on omap-sham.c driver.
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -22,17 +28,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
-
-#include <crypto/ctr.h>
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <crypto/scatterwalk.h>
-
-#include <crypto/hash.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/internal/hash.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#define _SBF(s, v) ((v) << (s))
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 091612b066f1..fdc0b2486069 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -1415,22 +1415,13 @@ static int sa_sha_run(struct ahash_request *req)
(auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
struct ahash_request *subreq = &rctx->fallback_req;
- int ret = 0;
+ int ret;
ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- crypto_ahash_init(subreq);
-
- subreq->nbytes = auth_len;
- subreq->src = req->src;
- subreq->result = req->result;
-
- ret |= crypto_ahash_update(subreq);
-
- subreq->nbytes = 0;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(subreq, req->src, req->result, auth_len);
- ret |= crypto_ahash_final(subreq);
+ ret = crypto_ahash_digest(subreq);
return ret;
}
@@ -1502,8 +1493,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
return ret;
if (alg_base) {
- ctx->shash = crypto_alloc_shash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->shash = crypto_alloc_shash(alg_base, 0, 0);
if (IS_ERR(ctx->shash)) {
dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
alg_base);
@@ -1511,8 +1501,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
/* for fallback */
ctx->fallback.ahash =
- crypto_alloc_ahash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ crypto_alloc_ahash(alg_base, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->fallback.ahash)) {
dev_err(ctx->dev_data->dev,
"Could not load fallback driver\n");
@@ -1546,54 +1535,38 @@ static int sa_sha_init(struct ahash_request *req)
crypto_ahash_digestsize(tfm), rctx);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, NULL, 0);
return crypto_ahash_init(&rctx->fallback_req);
}
static int sa_sha_update(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
static int sa_sha_final(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
static int sa_sha_finup(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -1605,8 +1578,7 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -1614,12 +1586,9 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
static int sa_sha_export(struct ahash_request *req, void *out)
{
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = &rctx->fallback_req;
- ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_export(subreq, out);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 42d007b7af45..d09b4aaeecef 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -117,8 +117,9 @@ static int tegra_sha_fallback_init(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -130,10 +131,10 @@ static int tegra_sha_fallback_update(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -145,9 +146,10 @@ static int tegra_sha_fallback_final(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -159,12 +161,11 @@ static int tegra_sha_fallback_finup(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -176,12 +177,11 @@ static int tegra_sha_fallback_digest(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -193,8 +193,9 @@ static int tegra_sha_fallback_import(struct ahash_request *req, const void *in)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -206,8 +207,9 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 580649f9bff8..5813017b6b79 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -3,18 +3,18 @@
* Xilinx ZynqMP SHA Driver.
* Copyright (c) 2022 Xilinx Inc.
*/
-#include <linux/cacheflush.h>
-#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/sha3.h>
-#include <linux/crypto.h>
+#include <linux/cacheflush.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U
@@ -36,13 +36,11 @@ struct zynqmp_sha_tfm_ctx {
struct crypto_shash *fbk_tfm;
};
-struct zynqmp_sha_desc_ctx {
- struct shash_desc fbk_req;
-};
-
static dma_addr_t update_dma_addr, final_dma_addr;
static char *ubuf, *fbuf;
+static DEFINE_SPINLOCK(zynqmp_sha_lock);
+
static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
@@ -60,8 +58,13 @@ static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
if (IS_ERR(fallback_tfm))
return PTR_ERR(fallback_tfm);
+ if (crypto_shash_descsize(hash) <
+ crypto_shash_statesize(tfm_ctx->fbk_tfm)) {
+ crypto_free_shash(fallback_tfm);
+ return -EINVAL;
+ }
+
tfm_ctx->fbk_tfm = fallback_tfm;
- hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
return 0;
}
@@ -70,61 +73,55 @@ static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
{
struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
- if (tfm_ctx->fbk_tfm) {
- crypto_free_shash(tfm_ctx->fbk_tfm);
- tfm_ctx->fbk_tfm = NULL;
- }
+ crypto_free_shash(tfm_ctx->fbk_tfm);
+}
- memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
+static int zynqmp_sha_continue(struct shash_desc *desc,
+ struct shash_desc *fbdesc, int err)
+{
+ err = err ?: crypto_shash_export(fbdesc, shash_desc_ctx(desc));
+ shash_desc_zero(fbdesc);
+ return err;
}
static int zynqmp_sha_init(struct shash_desc *desc)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ int err;
- dctx->fbk_req.tfm = tctx->fbk_tfm;
- return crypto_shash_init(&dctx->fbk_req);
+ fbdesc->tfm = fbtfm;
+ err = crypto_shash_init(fbdesc);
+ return zynqmp_sha_continue(desc, fbdesc, err);
}
static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_update(&dctx->fbk_req, data, length);
-}
-
-static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ int err;
- return crypto_shash_final(&dctx->fbk_req, out);
+ fbdesc->tfm = fbtfm;
+ err = crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
+ crypto_shash_update(fbdesc, data, length);
+ return zynqmp_sha_continue(desc, fbdesc, err);
}
static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_finup(&dctx->fbk_req, data, length, out);
-}
-
-static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
- dctx->fbk_req.tfm = tctx->fbk_tfm;
- return crypto_shash_import(&dctx->fbk_req, in);
+ fbdesc->tfm = fbtfm;
+ return crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
+ crypto_shash_finup(fbdesc, data, length, out);
}
-static int zynqmp_sha_export(struct shash_desc *desc, void *out)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_export(&dctx->fbk_req, out);
-}
-
-static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
unsigned int remaining_len = len;
int update_size;
@@ -159,26 +156,27 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i
return ret;
}
+static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+{
+ scoped_guard(spinlock_bh, &zynqmp_sha_lock)
+ return __zynqmp_sha_digest(desc, data, len, out);
+}
+
static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
.sha3_384 = {
.init = zynqmp_sha_init,
.update = zynqmp_sha_update,
- .final = zynqmp_sha_final,
.finup = zynqmp_sha_finup,
.digest = zynqmp_sha_digest,
- .export = zynqmp_sha_export,
- .import = zynqmp_sha_import,
.init_tfm = zynqmp_sha_init_tfm,
.exit_tfm = zynqmp_sha_exit_tfm,
- .descsize = sizeof(struct zynqmp_sha_desc_ctx),
- .statesize = sizeof(struct sha3_state),
+ .descsize = SHA3_384_EXPORT_SIZE,
.digestsize = SHA3_384_DIGEST_SIZE,
.base = {
.cra_name = "sha3-384",
.cra_driver_name = "zynqmp-sha3-384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index e97d47f42ee2..584c70a34b52 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -13,6 +13,7 @@
#include <linux/mman.h>
#include <linux/memory-tiers.h>
#include <linux/memory_hotplug.h>
+#include <linux/string_helpers.h>
#include "dax-private.h"
#include "bus.h"
@@ -68,7 +69,7 @@ static void kmem_put_memory_types(void)
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
- unsigned long total_len = 0;
+ unsigned long total_len = 0, orig_len = 0;
struct dax_kmem_data *data;
struct memory_dev_type *mtype;
int i, rc, mapped = 0;
@@ -97,6 +98,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
for (i = 0; i < dev_dax->nr_range; i++) {
struct range range;
+ orig_len += range_len(&dev_dax->ranges[i].range);
rc = dax_kmem_range(dev_dax, i, &range);
if (rc) {
dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
@@ -109,6 +111,12 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
if (!total_len) {
dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
return -EINVAL;
+ } else if (total_len != orig_len) {
+ char buf[16];
+
+ string_get_size(orig_len - total_len, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
+ dev_warn(dev, "DAX region truncated by %s due to alignment\n", buf);
}
init_node_memory_type(numa_node, mtype);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5baa83b85515..890ecac04dac 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -19,7 +19,9 @@
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/sync_file.h>
#include <linux/poll.h>
@@ -35,35 +37,91 @@
static inline int is_dma_buf_file(struct file *);
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static DEFINE_MUTEX(debugfs_list_mutex);
-static LIST_HEAD(debugfs_list);
+static DEFINE_MUTEX(dmabuf_list_mutex);
+static LIST_HEAD(dmabuf_list);
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+static void __dma_buf_list_add(struct dma_buf *dmabuf)
{
- mutex_lock(&debugfs_list_mutex);
- list_add(&dmabuf->list_node, &debugfs_list);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
+ list_add(&dmabuf->list_node, &dmabuf_list);
+ mutex_unlock(&dmabuf_list_mutex);
}
-static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+static void __dma_buf_list_del(struct dma_buf *dmabuf)
{
if (!dmabuf)
return;
- mutex_lock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
list_del(&dmabuf->list_node);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
}
-#else
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+
+/**
+ * dma_buf_iter_begin - begin iteration through global list of all DMA buffers
+ *
+ * Returns the first buffer in the global list of DMA-bufs that's not in the
+ * process of being destroyed. Increments that buffer's reference count to
+ * prevent buffer destruction. Callers must release the reference, either by
+ * continuing iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * First buffer from global list, with refcount elevated
+ * * NULL if no active buffers are present
+ */
+struct dma_buf *dma_buf_iter_begin(void)
{
+ struct dma_buf *ret = NULL, *dmabuf;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ list_for_each_entry(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+/**
+ * dma_buf_iter_next - continue iteration through global list of all DMA buffers
+ * @dmabuf: [in] pointer to dma_buf
+ *
+ * Decrements the reference count on the provided buffer. Returns the next
+ * buffer from the remainder of the global list of DMA-bufs with its reference
+ * count incremented. Callers must release the reference, either by continuing
+ * iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * Next buffer from global list, with refcount elevated
+ * * NULL if no additional active buffers are present
+ */
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
{
+ struct dma_buf *ret = NULL;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ dma_buf_put(dmabuf);
+ list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-#endif
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
@@ -115,7 +173,7 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
if (!is_dma_buf_file(file))
return -EINVAL;
- __dma_buf_debugfs_list_del(file->private_data);
+ __dma_buf_list_del(file->private_data);
return 0;
}
@@ -636,10 +694,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->release))
return ERR_PTR(-EINVAL);
- if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- (exp_info->ops->pin || exp_info->ops->unpin)))
- return ERR_PTR(-EINVAL);
-
if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
@@ -689,7 +743,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- __dma_buf_debugfs_list_add(dmabuf);
+ __dma_buf_list_add(dmabuf);
return dmabuf;
@@ -782,7 +836,7 @@ static void mangle_sg_table(struct sg_table *sg_table)
/* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to
- * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * not corrupt the sgt. The mixing is undone on unmap
* before passing the sgt back to the exporter.
*/
for_each_sgtable_sg(sg_table, sg, i)
@@ -790,29 +844,19 @@ static void mangle_sg_table(struct sg_table *sg_table)
#endif
}
-static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction direction)
-{
- struct sg_table *sg_table;
- signed long ret;
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
- if (IS_ERR_OR_NULL(sg_table))
- return sg_table;
-
- if (!dma_buf_attachment_is_dynamic(attach)) {
- ret = dma_resv_wait_timeout(attach->dmabuf->resv,
- DMA_RESV_USAGE_KERNEL, true,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0) {
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
- direction);
- return ERR_PTR(ret);
- }
- }
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return !!attach->importer_ops;
+}
- mangle_sg_table(sg_table);
- return sg_table;
+static bool
+dma_buf_pin_on_map(struct dma_buf_attachment *attach)
+{
+ return attach->dmabuf->ops->pin &&
+ (!dma_buf_attachment_is_dynamic(attach) ||
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
}
/**
@@ -935,48 +979,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
list_add(&attach->node, &dmabuf->attachments);
dma_resv_unlock(dmabuf->resv);
- /* When either the importer or the exporter can't handle dynamic
- * mappings we cache the mapping here to avoid issues with the
- * reservation object lock.
- */
- if (dma_buf_attachment_is_dynamic(attach) !=
- dma_buf_is_dynamic(dmabuf)) {
- struct sg_table *sgt;
-
- dma_resv_lock(attach->dmabuf->resv, NULL);
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- ret = dmabuf->ops->pin(attach);
- if (ret)
- goto err_unlock;
- }
-
- sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
- if (!sgt)
- sgt = ERR_PTR(-ENOMEM);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_unpin;
- }
- dma_resv_unlock(attach->dmabuf->resv);
- attach->sgt = sgt;
- attach->dir = DMA_BIDIRECTIONAL;
- }
-
return attach;
err_attach:
kfree(attach);
return ERR_PTR(ret);
-
-err_unpin:
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
-
-err_unlock:
- dma_resv_unlock(attach->dmabuf->resv);
-
- dma_buf_detach(dmabuf, attach);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
@@ -995,16 +1002,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
-static void __unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sg_table,
- enum dma_data_direction direction)
-{
- /* uses XOR, hence this unmangles */
- mangle_sg_table(sg_table);
-
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
-}
-
/**
* dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
@@ -1020,16 +1017,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
return;
dma_resv_lock(dmabuf->resv, NULL);
-
- if (attach->sgt) {
-
- __unmap_dma_buf(attach, attach->sgt, attach->dir);
-
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
- }
list_del(&attach->node);
-
dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
@@ -1058,7 +1046,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1081,7 +1069,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1115,7 +1103,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
- int r;
+ signed long ret;
might_sleep();
@@ -1124,41 +1112,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt) {
+ if (dma_buf_pin_on_map(attach)) {
+ ret = attach->dmabuf->ops->pin(attach);
/*
- * Two mappings with different directions for the same
- * attachment are not allowed.
+ * Catch exporters making buffers inaccessible even when
+ * attachments preventing that exist.
*/
- if (attach->dir != direction &&
- attach->dir != DMA_BIDIRECTIONAL)
- return ERR_PTR(-EBUSY);
-
- return attach->sgt;
- }
-
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = attach->dmabuf->ops->pin(attach);
- if (r)
- return ERR_PTR(r);
- }
+ WARN_ON_ONCE(ret == EBUSY);
+ if (ret)
+ return ERR_PTR(ret);
}
- sg_table = __map_dma_buf(attach, direction);
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table))
+ goto error_unpin;
- if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- attach->dmabuf->ops->unpin(attach);
-
- if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
- attach->sgt = sg_table;
- attach->dir = direction;
+ /*
+ * Importers with static attachments don't wait for fences.
+ */
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ goto error_unmap;
}
+ mangle_sg_table(sg_table);
#ifdef CONFIG_DMA_API_DEBUG
- if (!IS_ERR(sg_table)) {
+ {
struct scatterlist *sg;
u64 addr;
int len;
@@ -1175,6 +1159,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
+
+error_unmap:
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ sg_table = ERR_PTR(ret);
+
+error_unpin:
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
+
+ return sg_table;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
@@ -1227,14 +1221,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt == sg_table)
- return;
-
- __unmap_dma_buf(attach, sg_table, direction);
+ mangle_sg_table(sg_table);
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
- if (dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
@@ -1630,7 +1621,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
size_t size = 0;
int ret;
- ret = mutex_lock_interruptible(&debugfs_list_mutex);
+ ret = mutex_lock_interruptible(&dmabuf_list_mutex);
if (ret)
return ret;
@@ -1639,7 +1630,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
- list_for_each_entry(buf_obj, &debugfs_list, list_node) {
+ list_for_each_entry(buf_obj, &dmabuf_list, list_node) {
ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
if (ret)
@@ -1676,11 +1667,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return 0;
error_unlock:
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return ret;
}
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 2a059ac0ed27..a495d8a6c2e3 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -79,6 +79,41 @@ static int fence_cmp(const void *_a, const void *_b)
return 0;
}
+/**
+ * dma_fence_dedup_array - Sort and deduplicate an array of dma_fence pointers
+ * @fences: Array of dma_fence pointers to be deduplicated
+ * @num_fences: Number of entries in the @fences array
+ *
+ * Sorts the input array by context, then removes duplicate
+ * fences with the same context, keeping only the most recent one.
+ *
+ * The array is modified in-place and unreferenced duplicate fences are released
+ * via dma_fence_put(). The function returns the new number of fences after
+ * deduplication.
+ *
+ * Return: Number of unique fences remaining in the array.
+ */
+int dma_fence_dedup_array(struct dma_fence **fences, int num_fences)
+{
+ int i, j;
+
+ sort(fences, num_fences, sizeof(*fences), fence_cmp, NULL);
+
+ /*
+ * Only keep the most recent fence for each context.
+ */
+ j = 0;
+ for (i = 1; i < num_fences; i++) {
+ if (fences[i]->context == fences[j]->context)
+ dma_fence_put(fences[i]);
+ else
+ fences[++j] = fences[i];
+ }
+
+ return ++j;
+}
+EXPORT_SYMBOL_GPL(dma_fence_dedup_array);
+
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
@@ -87,7 +122,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence *tmp, *unsignaled = NULL, **array;
struct dma_fence_array *result;
ktime_t timestamp;
- int i, j, count;
+ int i, count;
count = 0;
timestamp = ns_to_ktime(0);
@@ -141,19 +176,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
if (count == 0 || count == 1)
goto return_fastpath;
- sort(array, count, sizeof(*array), fence_cmp, NULL);
-
- /*
- * Only keep the most recent fence for each context.
- */
- j = 0;
- for (i = 1; i < count; i++) {
- if (array[i]->context == array[j]->context)
- dma_fence_put(array[i]);
- else
- array[++j] = array[i];
- }
- count = ++j;
+ count = dma_fence_dedup_array(array, count);
if (count > 1) {
result = dma_fence_array_create(count, array,
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 26d5dc89ea16..82b1b714300d 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -21,8 +21,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-static struct dma_heap *sys_heap;
-
struct system_heap_buffer {
struct dma_heap *heap;
struct list_head attachments;
@@ -424,6 +422,7 @@ static const struct dma_heap_ops system_heap_ops = {
static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
+ struct dma_heap *sys_heap;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index 9f80a45498f0..261b38816226 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -413,7 +413,7 @@ static int test_wait_timeout(void *arg)
err = 0;
err_free:
timer_delete_sync(&wt.timer);
- destroy_timer_on_stack(&wt.timer);
+ timer_destroy_on_stack(&wt.timer);
dma_fence_signal(wt.f);
dma_fence_put(wt.f);
return err;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 22a808995f10..4f27ee93a00c 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -173,20 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
}
-static void timeline_fence_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- snprintf(str, size, "%lld", fence->seqno);
-}
-
-static void timeline_fence_timeline_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- struct sync_timeline *parent = dma_fence_parent(fence);
-
- snprintf(str, size, "%d", parent->value);
-}
-
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
@@ -208,8 +194,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
.get_timeline_name = timeline_fence_get_timeline_name,
.signaled = timeline_fence_signaled,
.release = timeline_fence_release,
- .fence_value_str = timeline_fence_value_str,
- .timeline_value_str = timeline_fence_timeline_value_str,
.set_deadline = timeline_fence_set_deadline,
};
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 237bce21d1e7..67cd69551e42 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -12,8 +12,6 @@ static struct dentry *dbgfs;
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_file_list_head);
-static DEFINE_SPINLOCK(sync_file_list_lock);
void sync_timeline_debug_add(struct sync_timeline *obj)
{
@@ -33,24 +31,6 @@ void sync_timeline_debug_remove(struct sync_timeline *obj)
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
}
-void sync_file_debug_add(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
-void sync_file_debug_remove(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_del(&sync_file->sync_file_list);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
static const char *sync_status_str(int status)
{
if (status < 0)
@@ -82,25 +62,8 @@ static void sync_print_fence(struct seq_file *s,
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
- if (fence->ops->timeline_value_str &&
- fence->ops->fence_value_str) {
- char value[64];
- bool success;
-
- fence->ops->fence_value_str(fence, value, sizeof(value));
- success = strlen(value);
-
- if (success) {
- seq_printf(s, ": %s", value);
-
- fence->ops->timeline_value_str(fence, value,
- sizeof(value));
-
- if (strlen(value))
- seq_printf(s, " / %s", value);
- }
- }
-
+ seq_printf(s, ": %lld", fence->seqno);
+ seq_printf(s, " / %d", parent->value);
seq_putc(s, '\n');
}
@@ -118,26 +81,6 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
spin_unlock(&obj->lock);
}
-static void sync_print_sync_file(struct seq_file *s,
- struct sync_file *sync_file)
-{
- char buf[128];
- int i;
-
- seq_printf(s, "[%p] %s: %s\n", sync_file,
- sync_file_get_name(sync_file, buf, sizeof(buf)),
- sync_status_str(dma_fence_get_status(sync_file->fence)));
-
- if (dma_fence_is_array(sync_file->fence)) {
- struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
-
- for (i = 0; i < array->num_fences; ++i)
- sync_print_fence(s, array->fences[i], true);
- } else {
- sync_print_fence(s, sync_file->fence, true);
- }
-}
-
static int sync_info_debugfs_show(struct seq_file *s, void *unused)
{
struct list_head *pos;
@@ -157,15 +100,6 @@ static int sync_info_debugfs_show(struct seq_file *s, void *unused)
seq_puts(s, "fences:\n--------------\n");
- spin_lock_irq(&sync_file_list_lock);
- list_for_each(pos, &sync_file_list_head) {
- struct sync_file *sync_file =
- container_of(pos, struct sync_file, sync_file_list);
-
- sync_print_sync_file(s, sync_file);
- seq_putc(s, '\n');
- }
- spin_unlock_irq(&sync_file_list_lock);
return 0;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index a1bdd62efccd..02af347293d0 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -68,7 +68,5 @@ extern const struct file_operations sw_sync_debugfs_fops;
void sync_timeline_debug_add(struct sync_timeline *obj);
void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_file_debug_add(struct sync_file *fence);
-void sync_file_debug_remove(struct sync_file *fence);
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index e74e36a8ecda..7eee3eb47a8e 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -285,7 +285,6 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
}
static const struct dma_buf_ops udmabuf_ops = {
- .cache_sgt_mapping = true,
.map_dma_buf = map_udmabuf,
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 760b7d81fcd8..80355d03004d 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -702,27 +702,6 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd)
idxd->pasid = IOMMU_PASID_INVALID;
}
-static int idxd_enable_sva(struct pci_dev *pdev)
-{
- int ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- return ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret)
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-
- return ret;
-}
-
-static void idxd_disable_sva(struct pci_dev *pdev)
-{
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-}
-
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -737,17 +716,13 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- if (idxd_enable_sva(pdev)) {
- dev_warn(dev, "Unable to turn on user SVA feature.\n");
- } else {
- set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+ set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- rc = idxd_enable_system_pasid(idxd);
- if (rc)
- dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
- else
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
- }
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc)
+ dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -785,8 +760,6 @@ static int idxd_probe(struct idxd_device *idxd)
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
return rc;
}
@@ -797,8 +770,6 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(idxd->pdev);
}
/*
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index c9aba2304de7..5d3c0ae6b342 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -10,7 +10,7 @@
#include <linux/interrupt.h>
#include <linux/dca.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index dcd7008fe06b..cae52c654a15 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1746,9 +1746,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR)
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
else
- writel(priv->ce_set_mask, set_addr);
+ writew(priv->ce_set_mask, set_addr);
/* Ensure the interrupt test bits are set */
wmb();
@@ -1778,7 +1778,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
} else {
/* Setup read/write of 4 bytes */
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
@@ -2131,8 +2131,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
- edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64,
- &a10_eccmgr_ic_ops, edac);
+ edac->domain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node),
+ 64, &a10_eccmgr_ic_ops, edac);
if (!edac->domain) {
dev_err(&pdev->dev, "Error adding IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 90f0eb7cc5b9..58b1482a0fbb 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2,8 +2,8 @@
#include <linux/ras.h>
#include <linux/string_choices.h>
#include "amd64_edac.h"
-#include <asm/amd_nb.h>
-#include <asm/amd_node.h>
+#include <asm/amd/nb.h>
+#include <asm/amd/node.h>
static struct edac_pci_ctl_info *pci_ctl;
@@ -2942,13 +2942,13 @@ static void dct_read_mc_regs(struct amd64_pvt *pvt)
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
* those are Read-As-Zero.
*/
- rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
+ rdmsrq(MSR_K8_TOP_MEM1, pvt->top_mem);
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* Check first whether TOP_MEM2 is enabled: */
- rdmsrl(MSR_AMD64_SYSCFG, msr_val);
+ rdmsrq(MSR_AMD64_SYSCFG, msr_val);
if (msr_val & BIT(21)) {
- rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
+ rdmsrq(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else {
edac_dbg(0, " TOP_MEM2 disabled\n");
diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
index 4942a240c30f..ae3bb7afa103 100644
--- a/drivers/edac/bluefield_edac.c
+++ b/drivers/edac/bluefield_edac.c
@@ -199,8 +199,10 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
* error without the detailed information.
*/
err = bluefield_edac_readl(priv, MLXBF_SYNDROM, &dram_syndrom);
- if (err)
+ if (err) {
dev_err(priv->dev, "DRAM syndrom read failed.\n");
+ return;
+ }
serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
@@ -213,20 +215,26 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
}
err = bluefield_edac_readl(priv, MLXBF_ADD_INFO, &dram_additional_info);
- if (err)
+ if (err) {
dev_err(priv->dev, "DRAM additional info read failed.\n");
+ return;
+ }
err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_0, &edea0);
- if (err)
+ if (err) {
dev_err(priv->dev, "Error addr 0 read failed.\n");
+ return;
+ }
err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_1, &edea1);
- if (err)
+ if (err) {
dev_err(priv->dev, "Error addr 1 read failed.\n");
+ return;
+ }
ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
@@ -250,8 +258,10 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
return;
err = bluefield_edac_readl(priv, MLXBF_ECC_CNT, &ecc_count);
- if (err)
+ if (err) {
dev_err(priv->dev, "ECC count read failed.\n");
+ return;
+ }
single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 355a977019e9..a3fca2567752 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -72,12 +72,6 @@
#define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
#define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5)
-#define RETRY_RD_ERR_LOG_UC BIT(1)
-#define RETRY_RD_ERR_LOG_NOOVER BIT(14)
-#define RETRY_RD_ERR_LOG_EN BIT(15)
-#define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1))
-#define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0))
-
static struct list_head *i10nm_edac_list;
static struct res_config *res_cfg;
@@ -85,227 +79,319 @@ static int retry_rd_err_log;
static int decoding_via_mca;
static bool mem_cfg_2lm;
-static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
-static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
-static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
-static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
-static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
-static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
-static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
-static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
-static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
-
-static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
- u32 *offsets_scrub, u32 *offsets_demand,
- u32 *offsets_demand2)
+static struct reg_rrl icx_reg_rrl_ddr = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8},
+ {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0},
+ },
+ .widths = {4, 4, 4, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_ddr = {
+ .set_num = 3,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND, FRE_DEMAND},
+ .offsets = {
+ {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8},
+ {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0},
+ {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_hbm_pch0 = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8},
+ {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x2818, 0x281c, 0x2820, 0x2824},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_hbm_pch1 = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8},
+ {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x2c18, 0x2c1c, 0x2c20, 0x2c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl gnr_reg_rrl_ddr = {
+ .set_num = 4,
+ .reg_num = 6,
+ .modes = {FRE_SCRUB, FRE_DEMAND, LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2f10, 0x2f20, 0x2f30, 0x2f50, 0x2f60, 0xba0},
+ {0x2f14, 0x2f24, 0x2f38, 0x2f54, 0x2f64, 0xba8},
+ {0x2f18, 0x2f28, 0x2f40, 0x2f58, 0x2f68, 0xbb0},
+ {0x2f1c, 0x2f2c, 0x2f48, 0x2f5c, 0x2f6c, 0xbb8},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(14),
+ .noover_mask = BIT(15),
+ .en_mask = BIT(12),
+
+ .cecnt_num = 8,
+ .cecnt_offsets = {0x2c10, 0x2c14, 0x2c18, 0x2c1c, 0x2c20, 0x2c24, 0x2c28, 0x2c2c},
+ .cecnt_widths = {4, 4, 4, 4, 4, 4, 4, 4},
+};
+
+static u64 read_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width)
{
- u32 s, d, d2;
+ switch (width) {
+ case 4:
+ return I10NM_GET_REG32(imc, chan, offset);
+ case 8:
+ return I10NM_GET_REG64(imc, chan, offset);
+ default:
+ i10nm_printk(KERN_ERR, "Invalid readd RRL 0x%x width %d\n", offset, width);
+ return 0;
+ }
+}
+
+static void write_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width, u64 val)
+{
+ switch (width) {
+ case 4:
+ return I10NM_SET_REG32(imc, chan, offset, (u32)val);
+ default:
+ i10nm_printk(KERN_ERR, "Invalid write RRL 0x%x width %d\n", offset, width);
+ }
+}
- s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
- d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
- if (offsets_demand2)
- d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
+static void enable_rrl(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
+ int rrl_set, bool enable, u32 *rrl_ctl)
+{
+ enum rrl_mode mode = rrl->modes[rrl_set];
+ u32 offset = rrl->offsets[rrl_set][0], v;
+ u8 width = rrl->widths[0];
+ bool first, scrub;
+
+ /* First or last read error. */
+ first = (mode == FRE_SCRUB || mode == FRE_DEMAND);
+ /* Patrol scrub or on-demand read error. */
+ scrub = (mode == FRE_SCRUB || mode == LRE_SCRUB);
+
+ v = read_imc_reg(imc, chan, offset, width);
if (enable) {
- /* Save default configurations */
- imc->chan[chan].retry_rd_err_log_s = s;
- imc->chan[chan].retry_rd_err_log_d = d;
- if (offsets_demand2)
- imc->chan[chan].retry_rd_err_log_d2 = d2;
-
- s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
- s |= RETRY_RD_ERR_LOG_EN;
- d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
- d |= RETRY_RD_ERR_LOG_EN;
-
- if (offsets_demand2) {
- d2 &= ~RETRY_RD_ERR_LOG_UC;
- d2 |= RETRY_RD_ERR_LOG_NOOVER;
- d2 |= RETRY_RD_ERR_LOG_EN;
- }
+ /* Save default configurations. */
+ *rrl_ctl = v;
+ v &= ~rrl->uc_mask;
+
+ if (first)
+ v |= rrl->noover_mask;
+ else
+ v &= ~rrl->noover_mask;
+
+ if (scrub)
+ v |= rrl->en_patspr_mask;
+ else
+ v &= ~rrl->en_patspr_mask;
+
+ v |= rrl->en_mask;
} else {
- /* Restore default configurations */
- if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
- s |= RETRY_RD_ERR_LOG_UC;
- if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
- s |= RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
- s &= ~RETRY_RD_ERR_LOG_EN;
- if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
- d |= RETRY_RD_ERR_LOG_UC;
- if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
- d |= RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
- d &= ~RETRY_RD_ERR_LOG_EN;
-
- if (offsets_demand2) {
- if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
- d2 |= RETRY_RD_ERR_LOG_UC;
- if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
- d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
- d2 &= ~RETRY_RD_ERR_LOG_EN;
+ /* Restore default configurations. */
+ if (*rrl_ctl & rrl->uc_mask)
+ v |= rrl->uc_mask;
+
+ if (first) {
+ if (!(*rrl_ctl & rrl->noover_mask))
+ v &= ~rrl->noover_mask;
+ } else {
+ if (*rrl_ctl & rrl->noover_mask)
+ v |= rrl->noover_mask;
}
+
+ if (scrub) {
+ if (!(*rrl_ctl & rrl->en_patspr_mask))
+ v &= ~rrl->en_patspr_mask;
+ } else {
+ if (*rrl_ctl & rrl->en_patspr_mask)
+ v |= rrl->en_patspr_mask;
+ }
+
+ if (!(*rrl_ctl & rrl->en_mask))
+ v &= ~rrl->en_mask;
}
- I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
- I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
- if (offsets_demand2)
- I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
+ write_imc_reg(imc, chan, offset, width, v);
+}
+
+static void enable_rrls(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
+ bool enable, u32 *rrl_ctl)
+{
+ for (int i = 0; i < rrl->set_num; i++)
+ enable_rrl(imc, chan, rrl, i, enable, rrl_ctl + i);
+}
+
+static void enable_rrls_ddr(struct skx_imc *imc, bool enable)
+{
+ struct reg_rrl *rrl_ddr = res_cfg->reg_rrl_ddr;
+ int i, chan_num = res_cfg->ddr_chan_num;
+ struct skx_channel *chan = imc->chan;
+
+ if (!imc->mbase)
+ return;
+
+ for (i = 0; i < chan_num; i++)
+ enable_rrls(imc, i, rrl_ddr, enable, chan[i].rrl_ctl[0]);
+}
+
+static void enable_rrls_hbm(struct skx_imc *imc, bool enable)
+{
+ struct reg_rrl **rrl_hbm = res_cfg->reg_rrl_hbm;
+ int i, chan_num = res_cfg->hbm_chan_num;
+ struct skx_channel *chan = imc->chan;
+
+ if (!imc->mbase || !imc->hbm_mc || !rrl_hbm[0] || !rrl_hbm[1])
+ return;
+
+ for (i = 0; i < chan_num; i++) {
+ enable_rrls(imc, i, rrl_hbm[0], enable, chan[i].rrl_ctl[0]);
+ enable_rrls(imc, i, rrl_hbm[1], enable, chan[i].rrl_ctl[1]);
+ }
}
static void enable_retry_rd_err_log(bool enable)
{
- int i, j, imc_num, chan_num;
- struct skx_imc *imc;
struct skx_dev *d;
+ int i, imc_num;
edac_dbg(2, "\n");
list_for_each_entry(d, i10nm_edac_list, list) {
imc_num = res_cfg->ddr_imc_num;
- chan_num = res_cfg->ddr_chan_num;
-
- for (i = 0; i < imc_num; i++) {
- imc = &d->imc[i];
- if (!imc->mbase)
- continue;
-
- for (j = 0; j < chan_num; j++)
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub,
- res_cfg->offsets_demand,
- res_cfg->offsets_demand2);
- }
+ for (i = 0; i < imc_num; i++)
+ enable_rrls_ddr(&d->imc[i], enable);
imc_num += res_cfg->hbm_imc_num;
- chan_num = res_cfg->hbm_chan_num;
-
- for (; i < imc_num; i++) {
- imc = &d->imc[i];
- if (!imc->mbase || !imc->hbm_mc)
- continue;
-
- for (j = 0; j < chan_num; j++) {
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm0,
- res_cfg->offsets_demand_hbm0,
- NULL);
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm1,
- res_cfg->offsets_demand_hbm1,
- NULL);
- }
- }
+ for (; i < imc_num; i++)
+ enable_rrls_hbm(&d->imc[i], enable);
}
}
static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
int len, bool scrub_err)
{
+ int i, j, n, ch = res->channel, pch = res->cs & 1;
struct skx_imc *imc = &res->dev->imc[res->imc];
- u32 log0, log1, log2, log3, log4;
- u32 corr0, corr1, corr2, corr3;
- u32 lxg0, lxg1, lxg3, lxg4;
- u32 *xffsets = NULL;
- u64 log2a, log5;
- u64 lxg2a, lxg5;
- u32 *offsets;
- int n, pch;
+ u64 log, corr, status_mask;
+ struct reg_rrl *rrl;
+ bool scrub;
+ u32 offset;
+ u8 width;
if (!imc->mbase)
return;
- if (imc->hbm_mc) {
- pch = res->cs & 1;
+ rrl = imc->hbm_mc ? res_cfg->reg_rrl_hbm[pch] : res_cfg->reg_rrl_ddr;
- if (pch)
- offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
- res_cfg->offsets_demand_hbm1;
- else
- offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
- res_cfg->offsets_demand_hbm0;
- } else {
- if (scrub_err) {
- offsets = res_cfg->offsets_scrub;
- } else {
- offsets = res_cfg->offsets_demand;
- xffsets = res_cfg->offsets_demand2;
- }
- }
+ if (!rrl)
+ return;
- log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
- log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
- log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
- log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
- log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
-
- if (xffsets) {
- lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
- lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
- lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
- lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
- lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
- }
+ status_mask = rrl->over_mask | rrl->uc_mask | rrl->v_mask;
- if (res_cfg->type == SPR) {
- log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
- log0, log1, log2a, log3, log4, log5);
+ n = snprintf(msg, len, " retry_rd_err_log[");
+ for (i = 0; i < rrl->set_num; i++) {
+ scrub = (rrl->modes[i] == FRE_SCRUB || rrl->modes[i] == LRE_SCRUB);
+ if (scrub_err != scrub)
+ continue;
- if (len - n > 0) {
- if (xffsets) {
- lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
- n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
- lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
- } else {
- n += snprintf(msg + n, len - n, "]");
- }
- }
- } else {
- log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
- log0, log1, log2, log3, log4, log5);
- }
+ for (j = 0; j < rrl->reg_num && len - n > 0; j++) {
+ offset = rrl->offsets[i][j];
+ width = rrl->widths[j];
+ log = read_imc_reg(imc, ch, offset, width);
- if (imc->hbm_mc) {
- if (pch) {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
- } else {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
+ if (width == 4)
+ n += snprintf(msg + n, len - n, "%.8llx ", log);
+ else
+ n += snprintf(msg + n, len - n, "%.16llx ", log);
+
+ /* Clear RRL status if RRL in Linux control mode. */
+ if (retry_rd_err_log == 2 && !j && (log & status_mask))
+ write_imc_reg(imc, ch, offset, width, log & ~status_mask);
}
- } else {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
}
- if (len - n > 0)
- snprintf(msg + n, len - n,
- " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
- corr0 & 0xffff, corr0 >> 16,
- corr1 & 0xffff, corr1 >> 16,
- corr2 & 0xffff, corr2 >> 16,
- corr3 & 0xffff, corr3 >> 16);
-
- /* Clear status bits */
- if (retry_rd_err_log == 2) {
- if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
- log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+ /* Move back one space. */
+ n--;
+ n += snprintf(msg + n, len - n, "]");
+
+ if (len - n > 0) {
+ n += snprintf(msg + n, len - n, " correrrcnt[");
+ for (i = 0; i < rrl->cecnt_num && len - n > 0; i++) {
+ offset = rrl->cecnt_offsets[i];
+ width = rrl->cecnt_widths[i];
+ corr = read_imc_reg(imc, ch, offset, width);
+
+ /* CPUs {ICX,SPR} encode two counters per 4-byte CORRERRCNT register. */
+ if (res_cfg->type <= SPR) {
+ n += snprintf(msg + n, len - n, "%.4llx %.4llx ",
+ corr & 0xffff, corr >> 16);
+ } else {
+ /* CPUs {GNR} encode one counter per CORRERRCNT register. */
+ if (width == 4)
+ n += snprintf(msg + n, len - n, "%.8llx ", corr);
+ else
+ n += snprintf(msg + n, len - n, "%.16llx ", corr);
+ }
}
- if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
- lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
- }
+ /* Move back one space. */
+ n--;
+ n += snprintf(msg + n, len - n, "]");
}
}
@@ -870,8 +956,7 @@ static struct res_config i10nm_cfg0 = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
- .offsets_scrub = offsets_scrub_icx,
- .offsets_demand = offsets_demand_icx,
+ .reg_rrl_ddr = &icx_reg_rrl_ddr,
};
static struct res_config i10nm_cfg1 = {
@@ -889,8 +974,7 @@ static struct res_config i10nm_cfg1 = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
- .offsets_scrub = offsets_scrub_icx,
- .offsets_demand = offsets_demand_icx,
+ .reg_rrl_ddr = &icx_reg_rrl_ddr,
};
static struct res_config spr_cfg = {
@@ -913,13 +997,9 @@ static struct res_config spr_cfg = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x300,
- .offsets_scrub = offsets_scrub_spr,
- .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
- .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1,
- .offsets_demand = offsets_demand_spr,
- .offsets_demand2 = offsets_demand2_spr,
- .offsets_demand_hbm0 = offsets_demand_spr_hbm0,
- .offsets_demand_hbm1 = offsets_demand_spr_hbm1,
+ .reg_rrl_ddr = &spr_reg_rrl_ddr,
+ .reg_rrl_hbm[0] = &spr_reg_rrl_hbm_pch0,
+ .reg_rrl_hbm[1] = &spr_reg_rrl_hbm_pch1,
};
static struct res_config gnr_cfg = {
@@ -937,6 +1017,7 @@ static struct res_config gnr_cfg = {
.uracu_bdf = {0, 0, 1},
.ddr_mdev_bdf = {0, 5, 1},
.sad_all_offset = 0x300,
+ .reg_rrl_ddr = &gnr_reg_rrl_ddr,
};
static const struct x86_cpu_id i10nm_cpuids[] = {
@@ -1108,7 +1189,7 @@ static int __init i10nm_init(void)
mce_register_decode_chain(&i10nm_mce_dec);
skx_setup_debug("i10nm_test");
- if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+ if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(true);
@@ -1128,7 +1209,7 @@ static void __exit i10nm_exit(void)
{
edac_dbg(2, "\n");
- if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+ if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
skx_set_decode(NULL, NULL);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(false);
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 204834149579..a53612be4b2f 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -52,6 +52,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include "edac_module.h"
#define EDAC_MOD_STR "ie31200_edac"
@@ -89,6 +90,10 @@
#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703
#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640
#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4 0xa700
+
+/* Alder Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1 0x4660
#define IE31200_RANKS_PER_CHANNEL 8
#define IE31200_DIMMS_PER_CHANNEL 2
@@ -734,6 +739,8 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1), (kernel_ulong_t)&rpl_s_cfg},
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 5807517ee32d..1930dc00c791 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -127,6 +127,7 @@
static const struct res_config {
bool machine_check;
+ /* The number of present memory controllers. */
int num_imc;
u32 imc_base;
u32 cmf_base;
@@ -240,6 +241,12 @@ static struct work_struct ecclog_work;
#define DID_ADL_N_SKU11 0x467c
#define DID_ADL_N_SKU12 0x4632
+/* Compute die IDs for Arizona Beach with IBECC */
+#define DID_AZB_SKU1 0x4676
+
+/* Compute did IDs for Amston Lake with IBECC */
+#define DID_ASL_SKU1 0x464a
+
/* Compute die IDs for Raptor Lake-P with IBECC */
#define DID_RPL_P_SKU1 0xa706
#define DID_RPL_P_SKU2 0xa707
@@ -595,6 +602,8 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU12), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_AZB_SKU1), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ASL_SKU1), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg },
@@ -1201,23 +1210,21 @@ static void igen6_check(struct mem_ctl_info *mci)
irq_work_queue(&ecclog_irq_work);
}
-static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
+/* Check whether the memory controller is absent. */
+static bool igen6_imc_absent(void __iomem *window)
+{
+ return readl(window + MAD_INTER_CHANNEL_OFFSET) == ~0;
+}
+
+static int igen6_register_mci(int mc, void __iomem *window, struct pci_dev *pdev)
{
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct igen6_imc *imc;
- void __iomem *window;
int rc;
edac_dbg(2, "\n");
- mchbar += mc * MCHBAR_SIZE;
- window = ioremap(mchbar, MCHBAR_SIZE);
- if (!window) {
- igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar);
- return -ENODEV;
- }
-
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = NUM_CHANNELS;
layers[0].is_virt_csrow = false;
@@ -1283,7 +1290,6 @@ fail3:
fail2:
edac_mc_free(mci);
fail:
- iounmap(window);
return rc;
}
@@ -1309,6 +1315,56 @@ static void igen6_unregister_mcis(void)
}
}
+static int igen6_register_mcis(struct pci_dev *pdev, u64 mchbar)
+{
+ void __iomem *window;
+ int lmc, pmc, rc;
+ u64 base;
+
+ for (lmc = 0, pmc = 0; pmc < NUM_IMC; pmc++) {
+ base = mchbar + pmc * MCHBAR_SIZE;
+ window = ioremap(base, MCHBAR_SIZE);
+ if (!window) {
+ igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx for mc%d\n", base, pmc);
+ rc = -ENOMEM;
+ goto out_unregister_mcis;
+ }
+
+ if (igen6_imc_absent(window)) {
+ iounmap(window);
+ edac_dbg(2, "Skip absent mc%d\n", pmc);
+ continue;
+ }
+
+ rc = igen6_register_mci(lmc, window, pdev);
+ if (rc)
+ goto out_iounmap;
+
+ /* Done, if all present MCs are detected and registered. */
+ if (++lmc >= res_cfg->num_imc)
+ break;
+ }
+
+ if (!lmc) {
+ igen6_printk(KERN_ERR, "No mc found.\n");
+ return -ENODEV;
+ }
+
+ if (lmc < res_cfg->num_imc)
+ igen6_printk(KERN_WARNING, "Expected %d mcs, but only %d detected.",
+ res_cfg->num_imc, lmc);
+
+ return 0;
+
+out_iounmap:
+ iounmap(window);
+
+out_unregister_mcis:
+ igen6_unregister_mcis();
+
+ return rc;
+}
+
static int igen6_mem_slice_setup(u64 mchbar)
{
struct igen6_imc *imc = &igen6_pvt->imc[0];
@@ -1405,7 +1461,7 @@ static void opstate_set(const struct res_config *cfg, const struct pci_device_id
static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u64 mchbar;
- int i, rc;
+ int rc;
edac_dbg(2, "\n");
@@ -1421,11 +1477,9 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
opstate_set(res_cfg, ent);
- for (i = 0; i < res_cfg->num_imc; i++) {
- rc = igen6_register_mci(i, mchbar, pdev);
- if (rc)
- goto fail2;
- }
+ rc = igen6_register_mcis(pdev, mchbar);
+ if (rc)
+ goto fail;
if (res_cfg->num_imc > 1) {
rc = igen6_mem_slice_setup(mchbar);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 50d74d3bf0f5..af3c12284a1e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <asm/cpu.h>
+#include <asm/msr.h>
#include "mce_amd.h"
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index fa5b442b1844..c9ade45c1a99 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -116,6 +116,7 @@ EXPORT_SYMBOL_GPL(skx_adxl_get);
void skx_adxl_put(void)
{
+ adxl_component_count = 0;
kfree(adxl_values);
kfree(adxl_msg);
}
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index ca5408803f87..ec4966f7ea40 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -79,6 +79,47 @@
*/
#define MCACOD_EXT_MEM_ERR 0x280
+/* Max RRL register sets per {,sub-,pseudo-}channel. */
+#define NUM_RRL_SET 4
+/* Max RRL registers per set. */
+#define NUM_RRL_REG 6
+/* Max correctable error count registers. */
+#define NUM_CECNT_REG 8
+
+/* Modes of RRL register set. */
+enum rrl_mode {
+ /* Last read error from patrol scrub. */
+ LRE_SCRUB,
+ /* Last read error from demand. */
+ LRE_DEMAND,
+ /* First read error from patrol scrub. */
+ FRE_SCRUB,
+ /* First read error from demand. */
+ FRE_DEMAND,
+};
+
+/* RRL registers per {,sub-,pseudo-}channel. */
+struct reg_rrl {
+ /* RRL register parts. */
+ int set_num, reg_num;
+ enum rrl_mode modes[NUM_RRL_SET];
+ u32 offsets[NUM_RRL_SET][NUM_RRL_REG];
+ /* RRL register widths in byte per set. */
+ u8 widths[NUM_RRL_REG];
+ /* RRL control bits of the first register per set. */
+ u32 v_mask;
+ u32 uc_mask;
+ u32 over_mask;
+ u32 en_patspr_mask;
+ u32 noover_mask;
+ u32 en_mask;
+
+ /* CORRERRCNT register parts. */
+ int cecnt_num;
+ u32 cecnt_offsets[NUM_CECNT_REG];
+ u8 cecnt_widths[NUM_CECNT_REG];
+};
+
/*
* Each cpu socket contains some pci devices that provide global
* information, and also some that are local to each of the two
@@ -117,9 +158,11 @@ struct skx_dev {
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
- u32 retry_rd_err_log_s;
- u32 retry_rd_err_log_d;
- u32 retry_rd_err_log_d2;
+ /*
+ * Two groups of RRL control registers per channel to save default RRL
+ * settings of two {sub-,pseudo-}channels in Linux RRL control mode.
+ */
+ u32 rrl_ctl[2][NUM_RRL_SET];
struct skx_dimm {
u8 close_pg;
u8 bank_xor_enable;
@@ -232,14 +275,10 @@ struct res_config {
/* HBM mdev device BDF */
struct pci_bdf hbm_mdev_bdf;
int sad_all_offset;
- /* Offsets of retry_rd_err_log registers */
- u32 *offsets_scrub;
- u32 *offsets_scrub_hbm0;
- u32 *offsets_scrub_hbm1;
- u32 *offsets_demand;
- u32 *offsets_demand2;
- u32 *offsets_demand_hbm0;
- u32 *offsets_demand_hbm1;
+ /* RRL register sets per DDR channel */
+ struct reg_rrl *reg_rrl_ddr;
+ /* RRL register sets per HBM channel */
+ struct reg_rrl *reg_rrl_hbm[2];
};
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index b0f9ef6ac6df..18cacb9edbbc 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -431,7 +431,7 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
- destroy_timer_on_stack(&t.split_timeout_timer);
+ timer_destroy_on_stack(&t.split_timeout_timer);
return d.rcode;
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index aadc395ee168..bbd2155d8483 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -31,7 +31,6 @@ config ARM_SCPI_PROTOCOL
config ARM_SDE_INTERFACE
bool "ARM Software Delegated Exception Interface (SDEI)"
depends on ARM64
- depends on ACPI_APEI_GHES
help
The Software Delegated Exception Interface (SDEI) is an ARM
standard for registering callbacks from the platform firmware
@@ -268,6 +267,23 @@ config TURRIS_MOX_RWTM
other manufacturing data and also utilize the Entropy Bit Generator
for hardware random number generation.
+if TURRIS_MOX_RWTM
+
+config TURRIS_MOX_RWTM_KEYCTL
+ bool "Turris Mox rWTM ECDSA message signing"
+ default y
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ select CZNIC_PLATFORMS
+ select TURRIS_SIGNING_KEY
+ help
+ Say Y here to add support for ECDSA message signing with board private
+ key (each Turris Mox has an ECDSA private key generated in the secure
+ coprocessor when manufactured). This functionality is exposed via the
+ keyctl() syscall.
+
+endif # TURRIS_MOX_RWTM
+
source "drivers/firmware/arm_ffa/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/cirrus/Kconfig"
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index dabd874641d0..e3fb36825978 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -69,6 +69,19 @@ config ARM_SCMI_DEBUG_COUNTERS
such useful debug counters. This can be helpful for debugging and
SCMI monitoring.
+config ARM_SCMI_QUIRKS
+ bool "Enable SCMI Quirks framework"
+ depends on JUMP_LABEL || COMPILE_TEST
+ default y
+ help
+ Enables support for SCMI Quirks framework to workaround SCMI platform
+ firmware bugs on system already deployed in the wild.
+
+ The framework allows the definition of platform-specific code quirks
+ that will be associated and enabled only on the desired platforms
+ depending on the SCMI firmware advertised versions and/or machine
+ compatibles.
+
source "drivers/firmware/arm_scmi/transports/Kconfig"
source "drivers/firmware/arm_scmi/vendors/imx/Kconfig"
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 9ac81adff567..780cd62b2f78 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -3,6 +3,7 @@ scmi-bus-y = bus.o
scmi-core-objs := $(scmi-bus-y)
scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_QUIRKS) += quirks.o
scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 3a5474015f7d..1adef0389475 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -201,55 +201,51 @@ scmi_protocol_table_unregister(const struct scmi_device_id *id_table)
scmi_protocol_device_unrequest(entry);
}
-static const struct scmi_device_id *
-scmi_dev_match_id(struct scmi_device *scmi_dev, const struct scmi_driver *scmi_drv)
+static int scmi_dev_match_by_id_table(struct scmi_device *scmi_dev,
+ const struct scmi_device_id *id_table)
{
- const struct scmi_device_id *id = scmi_drv->id_table;
-
- if (!id)
- return NULL;
-
- for (; id->protocol_id; id++)
- if (id->protocol_id == scmi_dev->protocol_id) {
- if (!id->name)
- return id;
- else if (!strcmp(id->name, scmi_dev->name))
- return id;
- }
+ if (!id_table || !id_table->name)
+ return 0;
+
+ /* Always skip transport devices from matching */
+ for (; id_table->protocol_id && id_table->name; id_table++)
+ if (id_table->protocol_id == scmi_dev->protocol_id &&
+ strncmp(scmi_dev->name, "__scmi_transport_device", 23) &&
+ !strcmp(id_table->name, scmi_dev->name))
+ return 1;
+ return 0;
+}
- return NULL;
+static int scmi_dev_match_id(struct scmi_device *scmi_dev,
+ const struct scmi_driver *scmi_drv)
+{
+ return scmi_dev_match_by_id_table(scmi_dev, scmi_drv->id_table);
}
static int scmi_dev_match(struct device *dev, const struct device_driver *drv)
{
const struct scmi_driver *scmi_drv = to_scmi_driver(drv);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- const struct scmi_device_id *id;
-
- id = scmi_dev_match_id(scmi_dev, scmi_drv);
- if (id)
- return 1;
- return 0;
+ return scmi_dev_match_id(scmi_dev, scmi_drv);
}
static int scmi_match_by_id_table(struct device *dev, const void *data)
{
- struct scmi_device *sdev = to_scmi_dev(dev);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id_table = data;
- return sdev->protocol_id == id_table->protocol_id &&
- (id_table->name && !strcmp(sdev->name, id_table->name));
+ return scmi_dev_match_by_id_table(scmi_dev, id_table);
}
static struct scmi_device *scmi_child_dev_find(struct device *parent,
int prot_id, const char *name)
{
- struct scmi_device_id id_table;
+ struct scmi_device_id id_table[2] = { 0 };
struct device *dev;
- id_table.protocol_id = prot_id;
- id_table.name = name;
+ id_table[0].protocol_id = prot_id;
+ id_table[0].name = name;
dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
if (!dev)
@@ -463,6 +459,20 @@ put_dev:
return NULL;
}
+static struct scmi_device *
+_scmi_device_create(struct device_node *np, struct device *parent,
+ int protocol, const char *name)
+{
+ struct scmi_device *sdev;
+
+ sdev = __scmi_device_create(np, parent, protocol, name);
+ if (!sdev)
+ pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node), protocol, name);
+
+ return sdev;
+}
+
/**
* scmi_device_create - A method to create one or more SCMI devices
*
@@ -495,7 +505,7 @@ struct scmi_device *scmi_device_create(struct device_node *np,
struct scmi_device *scmi_dev = NULL;
if (name)
- return __scmi_device_create(np, parent, protocol, name);
+ return _scmi_device_create(np, parent, protocol, name);
mutex_lock(&scmi_requested_devices_mtx);
phead = idr_find(&scmi_requested_devices, protocol);
@@ -509,18 +519,13 @@ struct scmi_device *scmi_device_create(struct device_node *np,
list_for_each_entry(rdev, phead, node) {
struct scmi_device *sdev;
- sdev = __scmi_device_create(np, parent,
- rdev->id_table->protocol_id,
- rdev->id_table->name);
- /* Report errors and carry on... */
+ sdev = _scmi_device_create(np, parent,
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
if (sdev)
scmi_dev = sdev;
- else
- pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- rdev->id_table->protocol_id,
- rdev->id_table->name);
}
+
mutex_unlock(&scmi_requested_devices_mtx);
return scmi_dev;
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 2ed2279388f0..afa7981efe82 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -11,6 +11,7 @@
#include "protocols.h"
#include "notify.h"
+#include "quirks.h"
/* Updated only after ALL the mandatory features for that version are merged */
#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
@@ -429,6 +430,23 @@ static void iter_clk_describe_prepare_message(void *message,
msg->rate_index = cpu_to_le32(desc_index);
}
+#define QUIRK_OUT_OF_SPEC_TRIPLET \
+ ({ \
+ /* \
+ * A known quirk: a triplet is returned but num_returned != 3 \
+ * Check for a safe payload size and fix. \
+ */ \
+ if (st->num_returned != 3 && st->num_remaining == 0 && \
+ st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { \
+ st->num_returned = 3; \
+ st->num_remaining = 0; \
+ } else { \
+ dev_err(p->dev, \
+ "Cannot fix out-of-spec reply !\n"); \
+ return -EPROTO; \
+ } \
+ })
+
static int
iter_clk_describe_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
@@ -450,19 +468,8 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st,
p->clk->name, st->num_returned, st->num_remaining,
st->rx_len);
- /*
- * A known quirk: a triplet is returned but num_returned != 3
- * Check for a safe payload size and fix.
- */
- if (st->num_returned != 3 && st->num_remaining == 0 &&
- st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
- st->num_returned = 3;
- st->num_remaining = 0;
- } else {
- dev_err(p->dev,
- "Cannot fix out-of-spec reply !\n");
- return -EPROTO;
- }
+ SCMI_QUIRK(clock_rates_triplet_out_of_spec,
+ QUIRK_OUT_OF_SPEC_TRIPLET);
}
return 0;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 10ea7962323e..dab758c5fdea 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -475,6 +475,7 @@ static int __tag##_probe(struct platform_device *pdev) \
if (ret) \
goto err; \
\
+ spdev->dev.parent = dev; \
ret = platform_device_add(spdev); \
if (ret) \
goto err; \
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 0390d5ff195e..395fe9289035 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -11,7 +11,7 @@
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
- * Copyright (C) 2018-2024 ARM Ltd.
+ * Copyright (C) 2018-2025 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -38,6 +38,7 @@
#include "common.h"
#include "notify.h"
+#include "quirks.h"
#include "raw_mode.h"
@@ -439,14 +440,8 @@ static void scmi_create_protocol_devices(struct device_node *np,
struct scmi_info *info,
int prot_id, const char *name)
{
- struct scmi_device *sdev;
-
mutex_lock(&info->devreq_mtx);
- sdev = scmi_device_create(np, info->dev, prot_id, name);
- if (name && !sdev)
- dev_err(info->dev,
- "failed to create device for protocol 0x%X (%s)\n",
- prot_id, name);
+ scmi_device_create(np, info->dev, prot_id, name);
mutex_unlock(&info->devreq_mtx);
}
@@ -1190,7 +1185,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
* RX path since it will be already queued at the end of the TX
* poll loop.
*/
- if (!xfer->hdr.poll_completion)
+ if (!xfer->hdr.poll_completion ||
+ xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
scmi_raw_message_report(info->raw, xfer,
SCMI_RAW_REPLY_QUEUE,
cinfo->id);
@@ -1738,6 +1734,39 @@ static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
}
/**
+ * scmi_protocol_msg_check - Check protocol message attributes
+ *
+ * @ph: A reference to the protocol handle.
+ * @message_id: The ID of the message to check.
+ * @attributes: A parameter to optionally return the retrieved message
+ * attributes, in case of Success.
+ *
+ * An helper to check protocol message attributes for a specific protocol
+ * and message pair.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
+ u32 message_id, u32 *attributes)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(message_id, t->tx.buf);
+ ret = do_xfer(ph, t);
+ if (!ret && attributes)
+ *attributes = get_unaligned_le32(t->rx.buf);
+ xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
* struct scmi_iterator - Iterator descriptor
* @msg: A reference to the message TX buffer; filled by @prepare_message with
* a proper custom command payload for each multi-part command request.
@@ -1869,6 +1898,13 @@ struct scmi_msg_resp_desc_fc {
__le32 db_preserve_hmask;
};
+#define QUIRK_PERF_FC_FORCE \
+ ({ \
+ if (pi->proto->id == SCMI_PROTOCOL_PERF && \
+ message_id == 0x8 /* PERF_LEVEL_GET */) \
+ attributes |= BIT(0); \
+ })
+
static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
u8 describe_id, u32 message_id, u32 valid_size,
@@ -1878,6 +1914,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
int ret;
u32 flags;
u64 phys_addr;
+ u32 attributes;
u8 size;
void __iomem *addr;
struct scmi_xfer *t;
@@ -1886,6 +1923,16 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
struct scmi_msg_resp_desc_fc *resp;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ /* Check if the MSG_ID supports fastchannel */
+ ret = scmi_protocol_msg_check(ph, message_id, &attributes);
+ SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE);
+ if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
+ dev_dbg(ph->dev,
+ "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n",
+ pi->proto->id, message_id, domain, ret);
+ return;
+ }
+
if (!p_addr) {
ret = -EINVAL;
goto err_out;
@@ -2003,39 +2050,6 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
SCMI_PROTO_FC_RING_DB(64);
}
-/**
- * scmi_protocol_msg_check - Check protocol message attributes
- *
- * @ph: A reference to the protocol handle.
- * @message_id: The ID of the message to check.
- * @attributes: A parameter to optionally return the retrieved message
- * attributes, in case of Success.
- *
- * An helper to check protocol message attributes for a specific protocol
- * and message pair.
- *
- * Return: 0 on SUCCESS
- */
-static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
- u32 message_id, u32 *attributes)
-{
- int ret;
- struct scmi_xfer *t;
-
- ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
- sizeof(__le32), 0, &t);
- if (ret)
- return ret;
-
- put_unaligned_le32(message_id, t->tx.buf);
- ret = do_xfer(ph, t);
- if (!ret && attributes)
- *attributes = get_unaligned_le32(t->rx.buf);
- xfer_put(ph, t);
-
- return ret;
-}
-
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.get_max_msg_size = scmi_common_get_max_msg_size,
@@ -2828,9 +2842,8 @@ static int scmi_bus_notifier(struct notifier_block *nb,
struct scmi_info *info = bus_nb_to_scmi_info(nb);
struct scmi_device *sdev = to_scmi_dev(data);
- /* Skip transport devices and devices of different SCMI instances */
- if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
- sdev->dev.parent != info->dev)
+ /* Skip devices of different SCMI instances */
+ if (sdev->dev.parent != info->dev)
return NOTIFY_DONE;
switch (action) {
@@ -3101,6 +3114,18 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
return &trans->desc;
}
+static void scmi_enable_matching_quirks(struct scmi_info *info)
+{
+ struct scmi_revision_info *rev = &info->version;
+
+ dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n",
+ rev->vendor_id, rev->sub_vendor_id, rev->impl_ver);
+
+ /* Enable applicable quirks */
+ scmi_quirks_enable(info->dev, rev->vendor_id,
+ rev->sub_vendor_id, rev->impl_ver);
+}
+
static int scmi_probe(struct platform_device *pdev)
{
int ret;
@@ -3222,6 +3247,8 @@ static int scmi_probe(struct platform_device *pdev)
list_add_tail(&info->node, &scmi_list);
mutex_unlock(&scmi_list_mutex);
+ scmi_enable_matching_quirks(info);
+
for_each_available_child_of_node(np, child) {
u32 prot_id;
@@ -3380,6 +3407,8 @@ static struct dentry *scmi_debugfs_init(void)
static int __init scmi_driver_init(void)
{
+ scmi_quirks_initialize();
+
/* Bail out if no SCMI transport was configured */
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index aaee57cdcd55..d62c4469d1fd 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -31,6 +31,8 @@
#define SCMI_PROTOCOL_VENDOR_BASE 0x80
+#define MSG_SUPPORTS_FASTCHANNEL(x) ((x) & BIT(0))
+
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c
new file mode 100644
index 000000000000..03960aca3610
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+
+/**
+ * DOC: Theory of operation
+ *
+ * A framework to define SCMI quirks and their activation conditions based on
+ * existing static_keys kernel facilities.
+ *
+ * Quirks are named and their activation conditions defined using the macro
+ * DEFINE_SCMI_QUIRK() in this file.
+ *
+ * After a quirk is defined, a corresponding entry must also be added to the
+ * global @scmi_quirks_table in this file using __DECLARE_SCMI_QUIRK_ENTRY().
+ *
+ * Additionally a corresponding quirk declaration must be added also to the
+ * quirk.h file using DECLARE_SCMI_QUIRK().
+ *
+ * The needed quirk code-snippet itself will be defined local to the SCMI code
+ * that is meant to fix and will be associated to the previously defined quirk
+ * and related activation conditions using the macro SCMI_QUIRK().
+ *
+ * At runtime, during the SCMI stack probe sequence, once the SCMI Server had
+ * advertised the running platform Vendor, SubVendor and Implementation Version
+ * data, all the defined quirks matching the activation conditions will be
+ * enabled.
+ *
+ * Example
+ *
+ * quirk.c
+ * -------
+ * DEFINE_SCMI_QUIRK(fix_me, "vendor", "subvend", "0x12000-0x30000",
+ * "someone,plat_A", "another,plat_b", "vend,sku");
+ *
+ * static struct scmi_quirk *scmi_quirks_table[] = {
+ * ...
+ * __DECLARE_SCMI_QUIRK_ENTRY(fix_me),
+ * NULL
+ * };
+ *
+ * quirk.h
+ * -------
+ * DECLARE_SCMI_QUIRK(fix_me);
+ *
+ * <somewhere_in_the_scmi_stack.c>
+ * ------------------------------
+ *
+ * #define QUIRK_CODE_SNIPPET_FIX_ME() \
+ * ({ \
+ * if (p->condition) \
+ * a_ptr->calculated_val = 123; \
+ * })
+ *
+ *
+ * int some_function_to_fix(int param, struct something *p)
+ * {
+ * struct some_strut *a_ptr;
+ *
+ * a_ptr = some_load_func(p);
+ * SCMI_QUIRK(fix_me, QUIRK_CODE_SNIPPET_FIX_ME);
+ * some_more_func(a_ptr);
+ * ...
+ *
+ * return 0;
+ * }
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/hashtable.h>
+#include <linux/kstrtox.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/static_key.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
+#include <linux/types.h>
+
+#include "quirks.h"
+
+#define SCMI_QUIRKS_HT_SZ 4
+
+struct scmi_quirk {
+ bool enabled;
+ const char *name;
+ char *vendor;
+ char *sub_vendor_id;
+ char *impl_ver_range;
+ u32 start_range;
+ u32 end_range;
+ struct static_key_false *key;
+ struct hlist_node hash;
+ unsigned int hkey;
+ const char *const compats[];
+};
+
+#define __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ...) \
+ static struct scmi_quirk scmi_quirk_entry_ ## _qn = { \
+ .name = __stringify(quirk_ ## _qn), \
+ .vendor = _ven, \
+ .sub_vendor_id = _sub, \
+ .impl_ver_range = _impl, \
+ .key = &(scmi_quirk_ ## _qn), \
+ .compats = { __VA_ARGS__ __VA_OPT__(,) NULL }, \
+ }
+
+#define __DECLARE_SCMI_QUIRK_ENTRY(_qn) (&(scmi_quirk_entry_ ## _qn))
+
+/*
+ * Define a quirk by name and provide the matching tokens where:
+ *
+ * _qn: A string which will be used to build the quirk and the global
+ * static_key names.
+ * _ven : SCMI Vendor ID string match, NULL means any.
+ * _sub : SCMI SubVendor ID string match, NULL means any.
+ * _impl : SCMI Implementation Version string match, NULL means any.
+ * This string can be used to express version ranges which will be
+ * interpreted as follows:
+ *
+ * NULL [0, 0xFFFFFFFF]
+ * "X" [X, X]
+ * "X-" [X, 0xFFFFFFFF]
+ * "-X" [0, X]
+ * "X-Y" [X, Y]
+ *
+ * with X <= Y and <v> in [X, Y] meaning X <= <v> <= Y
+ *
+ * ... : An optional variadic macros argument used to provide a comma-separated
+ * list of compatible strings matches; when no variadic argument is
+ * provided, ANY compatible will match this quirk.
+ *
+ * This implicitly define also a properly named global static-key that
+ * will be used to dynamically enable the quirk at initialization time.
+ *
+ * Note that it is possible to associate multiple quirks to the same
+ * matching pattern, if your firmware quality is really astounding :P
+ *
+ * Example:
+ *
+ * Compatibles list NOT provided, so ANY compatible will match:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000");
+ *
+ *
+ * A few compatibles provided to match against:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000",
+ * "xvend,plat_a", "xvend,plat_b", "xvend,sku_name");
+ */
+#define DEFINE_SCMI_QUIRK(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/*
+ * Same as DEFINE_SCMI_QUIRK but EXPORTED: this is meant to address quirks
+ * that possibly reside in code that is included in loadable kernel modules
+ * that needs to be able to access the global static keys at runtime to
+ * determine if enabled or not. (see SCMI_QUIRK to understand usage)
+ */
+#define DEFINE_SCMI_QUIRK_EXPORTED(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ EXPORT_SYMBOL_GPL(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/* Global Quirks Definitions */
+DEFINE_SCMI_QUIRK(clock_rates_triplet_out_of_spec, NULL, NULL, NULL);
+DEFINE_SCMI_QUIRK(perf_level_get_fc_force, "Qualcomm", NULL, "0x20000-");
+
+/*
+ * Quirks Pointers Array
+ *
+ * This is filled at compile-time with the list of pointers to all the currently
+ * defined quirks descriptors.
+ */
+static struct scmi_quirk *scmi_quirks_table[] = {
+ __DECLARE_SCMI_QUIRK_ENTRY(clock_rates_triplet_out_of_spec),
+ __DECLARE_SCMI_QUIRK_ENTRY(perf_level_get_fc_force),
+ NULL
+};
+
+/*
+ * Quirks HashTable
+ *
+ * A run-time populated hashtable containing all the defined quirks descriptors
+ * hashed by matching pattern.
+ */
+static DEFINE_READ_MOSTLY_HASHTABLE(scmi_quirks_ht, SCMI_QUIRKS_HT_SZ);
+
+static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend)
+{
+ char *signature, *p;
+ unsigned int hash32;
+ unsigned long hash = 0;
+
+ /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
+ signature = kasprintf(GFP_KERNEL, "|%s|%s|", vend ?: "", sub_vend ?: "");
+ if (!signature)
+ return 0;
+
+ pr_debug("SCMI Quirk Signature >>>%s<<<\n", signature);
+
+ p = signature;
+ while (*p)
+ hash = partial_name_hash(tolower(*p++), hash);
+ hash32 = end_name_hash(hash);
+
+ kfree(signature);
+
+ return hash32;
+}
+
+static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
+{
+ const char *last, *first = quirk->impl_ver_range;
+ size_t len;
+ char *sep;
+ int ret;
+
+ quirk->start_range = 0;
+ quirk->end_range = 0xFFFFFFFF;
+ len = quirk->impl_ver_range ? strlen(quirk->impl_ver_range) : 0;
+ if (!len)
+ return 0;
+
+ last = first + len - 1;
+ sep = strchr(quirk->impl_ver_range, '-');
+ if (sep)
+ *sep = '\0';
+
+ if (sep == first) /* -X */
+ ret = kstrtouint(first + 1, 0, &quirk->end_range);
+ else /* X OR X- OR X-y */
+ ret = kstrtouint(first, 0, &quirk->start_range);
+ if (ret)
+ return ret;
+
+ if (!sep)
+ quirk->end_range = quirk->start_range;
+ else if (sep != last) /* x-Y */
+ ret = kstrtouint(sep + 1, 0, &quirk->end_range);
+
+ if (quirk->start_range > quirk->end_range)
+ return -EINVAL;
+
+ return ret;
+}
+
+void scmi_quirks_initialize(void)
+{
+ struct scmi_quirk *quirk;
+ int i;
+
+ for (i = 0, quirk = scmi_quirks_table[0]; quirk;
+ i++, quirk = scmi_quirks_table[i]) {
+ int ret;
+
+ ret = scmi_quirk_range_parse(quirk);
+ if (ret) {
+ pr_err("SCMI skip QUIRK [%s] - BAD RANGE - |%s|\n",
+ quirk->name, quirk->impl_ver_range);
+ continue;
+ }
+ quirk->hkey = scmi_quirk_signature(quirk->vendor,
+ quirk->sub_vendor_id);
+
+ hash_add(scmi_quirks_ht, &quirk->hash, quirk->hkey);
+
+ pr_debug("Registered SCMI QUIRK [%s] -- %p - Key [0x%08X] - %s/%s/[0x%08X-0x%08X]\n",
+ quirk->name, quirk, quirk->hkey,
+ quirk->vendor, quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+ }
+
+ pr_debug("SCMI Quirks initialized\n");
+}
+
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl)
+{
+ for (int i = 3; i >= 0; i--) {
+ struct scmi_quirk *quirk;
+ unsigned int hkey;
+
+ hkey = scmi_quirk_signature(i > 1 ? vend : NULL,
+ i > 2 ? subv : NULL);
+
+ /*
+ * Note that there could be multiple matches so we
+ * will enable multiple quirk part of a hash collision
+ * domain...BUT we cannot assume that ALL quirks on the
+ * same collision domain are a full match.
+ */
+ hash_for_each_possible(scmi_quirks_ht, quirk, hash, hkey) {
+ if (quirk->enabled || quirk->hkey != hkey ||
+ impl < quirk->start_range ||
+ impl > quirk->end_range)
+ continue;
+
+ if (quirk->compats[0] &&
+ !of_machine_compatible_match(quirk->compats))
+ continue;
+
+ dev_info(dev, "Enabling SCMI Quirk [%s]\n",
+ quirk->name);
+
+ dev_dbg(dev,
+ "Quirk matched on: %s/%s/%s/[0x%08X-0x%08X]\n",
+ quirk->compats[0], quirk->vendor,
+ quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+
+ static_branch_enable(quirk->key);
+ quirk->enabled = true;
+ }
+ }
+}
diff --git a/drivers/firmware/arm_scmi/quirks.h b/drivers/firmware/arm_scmi/quirks.h
new file mode 100644
index 000000000000..a71fde85a527
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+#ifndef _SCMI_QUIRKS_H
+#define _SCMI_QUIRKS_H
+
+#include <linux/static_key.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_ARM_SCMI_QUIRKS
+
+#define DECLARE_SCMI_QUIRK(_qn) \
+ DECLARE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn)
+
+/*
+ * A helper to associate the actual code snippet to use as a quirk
+ * named as _qn.
+ */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (static_branch_unlikely(&(scmi_quirk_ ## _qn))) \
+ (_blk); \
+ } while (0)
+
+void scmi_quirks_initialize(void);
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl);
+
+#else
+
+#define DECLARE_SCMI_QUIRK(_qn)
+/* Force quirks compilation even when SCMI Quirks are disabled */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (0) \
+ (_blk); \
+ } while (0)
+
+static inline void scmi_quirks_initialize(void) { }
+static inline void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *sub_vend, const u32 impl) { }
+
+#endif /* CONFIG_ARM_SCMI_QUIRKS */
+
+/* Quirk delarations */
+DECLARE_SCMI_QUIRK(clock_rates_triplet_out_of_spec);
+DECLARE_SCMI_QUIRK(perf_level_get_fc_force);
+
+#endif /* _SCMI_QUIRKS_H */
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 7cc0d616b8de..3d543b1d8947 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -671,11 +671,13 @@ static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
* @len: Length of the message in @buf.
* @chan_id: The channel ID to use.
* @async: A flag stating if an asynchronous command is required.
+ * @poll: A flag stating if a polling transmission is required.
*
* Return: 0 on Success
*/
static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
- void *buf, size_t len, u8 chan_id, bool async)
+ void *buf, size_t len, u8 chan_id,
+ bool async, bool poll)
{
int ret;
struct scmi_xfer *xfer;
@@ -684,6 +686,16 @@ static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
if (ret)
return ret;
+ if (poll) {
+ if (is_transport_polling_capable(raw->desc)) {
+ xfer->hdr.poll_completion = true;
+ } else {
+ dev_err(raw->handle->dev,
+ "Failed to send RAW message - Polling NOT supported\n");
+ return -EINVAL;
+ }
+ }
+
ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
if (ret)
scmi_xfer_raw_put(raw->handle, xfer);
@@ -801,7 +813,7 @@ static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos,
- bool async)
+ bool async, bool poll)
{
int ret;
struct scmi_dbg_raw_data *rd = filp->private_data;
@@ -831,7 +843,7 @@ static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
}
ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
- rd->chan_id, async);
+ rd->chan_id, async, poll);
/* Reset ppos for next message ... */
rd->tx_size = 0;
@@ -875,7 +887,8 @@ static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, false);
}
static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
@@ -964,7 +977,8 @@ static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, false);
}
static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
@@ -976,6 +990,40 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.owner = THIS_MODULE,
};
+static ssize_t scmi_dbg_raw_mode_message_poll_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_poll_async_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_async_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
@@ -1199,6 +1247,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
debugfs_create_file("message_async", 0600, raw->dentry, raw,
&scmi_dbg_raw_mode_message_async_fops);
+ debugfs_create_file("message_poll", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file("message_poll_async", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_async_fops);
+
debugfs_create_file("notification", 0400, raw->dentry, raw,
&scmi_dbg_raw_mode_notification_fops);
@@ -1230,6 +1284,14 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
debugfs_create_file_aux_num("message_async", 0600, chd,
raw, channels[i],
&scmi_dbg_raw_mode_message_async_fops);
+
+ debugfs_create_file_aux_num("message_poll", 0600, chd,
+ raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file_aux_num("message_poll_async", 0600,
+ chd, raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_async_fops);
}
}
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
index a01bf5e47301..c34c8c837441 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
@@ -12,6 +12,30 @@ config IMX_SCMI_BBM_EXT
To compile this driver as a module, choose M here: the
module will be called imx-sm-bbm.
+config IMX_SCMI_CPU_EXT
+ tristate "i.MX SCMI CPU EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_CPU_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System CPU Protocol to manage cpu
+ start, stop and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-cpu.
+
+config IMX_SCMI_LMM_EXT
+ tristate "i.MX SCMI LMM EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_LMM_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System Logical Machine Protocol to
+ manage Logical Machines boot, shutdown and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-lmm.
+
config IMX_SCMI_MISC_EXT
tristate "i.MX SCMI MISC EXTENSION"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Makefile b/drivers/firmware/arm_scmi/vendors/imx/Makefile
index d3ee6d544924..e3a5ea46345c 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Makefile
+++ b/drivers/firmware/arm_scmi/vendors/imx/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o
+obj-$(CONFIG_IMX_SCMI_CPU_EXT) += imx-sm-cpu.o
+obj-$(CONFIG_IMX_SCMI_LMM_EXT) += imx-sm-lmm.o
obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
new file mode 100644
index 000000000000..66f47f5371e5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP CPU Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_cpu_protocol_cmd {
+ SCMI_IMX_CPU_ATTRIBUTES = 0x3,
+ SCMI_IMX_CPU_START = 0x4,
+ SCMI_IMX_CPU_STOP = 0x5,
+ SCMI_IMX_CPU_RESET_VECTOR_SET = 0x6,
+ SCMI_IMX_CPU_INFO_GET = 0xC,
+};
+
+struct scmi_imx_cpu_info {
+ u32 nr_cpu;
+};
+
+#define SCMI_IMX_CPU_NR_CPU_MASK GENMASK(15, 0)
+struct scmi_msg_imx_cpu_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_cpu_attributes_out {
+ __le32 attributes;
+#define CPU_MAX_NAME 16
+ u8 name[CPU_MAX_NAME];
+};
+
+struct scmi_imx_cpu_reset_vector_set_in {
+ __le32 cpuid;
+#define CPU_VEC_FLAGS_RESUME BIT(31)
+#define CPU_VEC_FLAGS_START BIT(30)
+#define CPU_VEC_FLAGS_BOOT BIT(29)
+ __le32 flags;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_cpu_info_get_out {
+#define CPU_RUN_MODE_START 0
+#define CPU_RUN_MODE_HOLD 1
+#define CPU_RUN_MODE_STOP 2
+#define CPU_RUN_MODE_SLEEP 3
+ __le32 runmode;
+ __le32 sleepmode;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+static int scmi_imx_cpu_validate_cpuid(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_imx_cpu_info *info = ph->get_priv(ph);
+
+ if (cpuid >= info->nr_cpu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_cpu_start(const struct scmi_protocol_handle *ph,
+ u32 cpuid, bool start)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ if (start)
+ msg_id = SCMI_IMX_CPU_START;
+ else
+ msg_id = SCMI_IMX_CPU_STOP;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume)
+{
+ struct scmi_imx_cpu_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ if (start)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_START);
+ if (boot)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_BOOT);
+ if (resume)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_RESUME);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_started(const struct scmi_protocol_handle *ph, u32 cpuid,
+ bool *started)
+{
+ struct scmi_imx_cpu_info_get_out *out;
+ struct scmi_xfer *t;
+ u32 mode;
+ int ret;
+
+ if (!started)
+ return -EINVAL;
+
+ *started = false;
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_INFO_GET, sizeof(u32),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ mode = le32_to_cpu(out->runmode);
+ if (mode == CPU_RUN_MODE_START || mode == CPU_RUN_MODE_SLEEP)
+ *started = true;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_cpu_proto_ops scmi_imx_cpu_proto_ops = {
+ .cpu_reset_vector_set = scmi_imx_cpu_reset_vector_set,
+ .cpu_start = scmi_imx_cpu_start,
+ .cpu_started = scmi_imx_cpu_started,
+};
+
+static int scmi_imx_cpu_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_cpu_info *info)
+{
+ struct scmi_msg_imx_cpu_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ info->nr_cpu = le32_get_bits(attr->attributes, SCMI_IMX_CPU_NR_CPU_MASK);
+ dev_info(ph->dev, "i.MX SM CPU: %d cpus\n",
+ info->nr_cpu);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_msg_imx_cpu_attributes_out *out;
+ char name[SCMI_SHORT_NAME_MAX_SIZE] = {'\0'};
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->name, SCMI_SHORT_NAME_MAX_SIZE);
+ dev_info(ph->dev, "i.MX CPU: name: %s\n", name);
+ } else {
+ dev_err(ph->dev, "i.MX cpu: Failed to get info of cpu(%u)\n", cpuid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_cpu_info *info;
+ u32 version;
+ int ret, i;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM CPU Protocol Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_cpu_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->nr_cpu; i++) {
+ ret = scmi_imx_cpu_attributes_get(ph, i);
+ if (ret)
+ return ret;
+ }
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_cpu = {
+ .id = SCMI_PROTOCOL_IMX_CPU,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_cpu_protocol_init,
+ .ops = &scmi_imx_cpu_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_cpu);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_CPU) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
new file mode 100644
index 000000000000..b519c67fe920
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP LMM Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_lmm_protocol_cmd {
+ SCMI_IMX_LMM_ATTRIBUTES = 0x3,
+ SCMI_IMX_LMM_BOOT = 0x4,
+ SCMI_IMX_LMM_RESET = 0x5,
+ SCMI_IMX_LMM_SHUTDOWN = 0x6,
+ SCMI_IMX_LMM_WAKE = 0x7,
+ SCMI_IMX_LMM_SUSPEND = 0x8,
+ SCMI_IMX_LMM_NOTIFY = 0x9,
+ SCMI_IMX_LMM_RESET_REASON = 0xA,
+ SCMI_IMX_LMM_POWER_ON = 0xB,
+ SCMI_IMX_LMM_RESET_VECTOR_SET = 0xC,
+};
+
+struct scmi_imx_lmm_priv {
+ u32 nr_lmm;
+};
+
+#define SCMI_IMX_LMM_NR_LM_MASK GENMASK(5, 0)
+#define SCMI_IMX_LMM_NR_MAX 16
+struct scmi_msg_imx_lmm_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_lmm_attributes_out {
+ __le32 lmid;
+ __le32 attributes;
+ __le32 state;
+ __le32 errstatus;
+ u8 name[LMM_MAX_NAME];
+};
+
+struct scmi_imx_lmm_reset_vector_set_in {
+ __le32 lmid;
+ __le32 cpuid;
+ __le32 flags; /* reserved for future extension */
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_lmm_shutdown_in {
+ __le32 lmid;
+#define SCMI_IMX_LMM_SHUTDOWN_GRACEFUL BIT(0)
+ __le32 flags;
+};
+
+static int scmi_imx_lmm_validate_lmid(const struct scmi_protocol_handle *ph, u32 lmid)
+{
+ struct scmi_imx_lmm_priv *priv = ph->get_priv(ph);
+
+ if (lmid >= priv->nr_lmm)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_lmm_attributes(const struct scmi_protocol_handle *ph,
+ u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ struct scmi_msg_imx_lmm_attributes_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ info->lmid = le32_to_cpu(out->lmid);
+ info->state = le32_to_cpu(out->state);
+ info->errstatus = le32_to_cpu(out->errstatus);
+ strscpy(info->name, out->name);
+ dev_dbg(ph->dev, "i.MX LMM: Logical Machine(%d), name: %s\n",
+ info->lmid, info->name);
+ } else {
+ dev_err(ph->dev, "i.MX LMM: Failed to get info of Logical Machine(%u)\n", lmid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int
+scmi_imx_lmm_power_boot(const struct scmi_protocol_handle *ph, u32 lmid, bool boot)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ if (boot)
+ msg_id = SCMI_IMX_LMM_BOOT;
+ else
+ msg_id = SCMI_IMX_LMM_POWER_ON;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ struct scmi_imx_lmm_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_shutdown(const struct scmi_protocol_handle *ph, u32 lmid,
+ u32 flags)
+{
+ struct scmi_imx_lmm_shutdown_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_SHUTDOWN, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ if (flags & SCMI_IMX_LMM_SHUTDOWN_GRACEFUL)
+ in->flags = cpu_to_le32(SCMI_IMX_LMM_SHUTDOWN_GRACEFUL);
+ else
+ in->flags = cpu_to_le32(0);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_lmm_proto_ops scmi_imx_lmm_proto_ops = {
+ .lmm_power_boot = scmi_imx_lmm_power_boot,
+ .lmm_info = scmi_imx_lmm_attributes,
+ .lmm_reset_vector_set = scmi_imx_lmm_reset_vector_set,
+ .lmm_shutdown = scmi_imx_lmm_shutdown,
+};
+
+static int scmi_imx_lmm_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_lmm_priv *priv)
+{
+ struct scmi_msg_imx_lmm_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ priv->nr_lmm = le32_get_bits(attr->attributes, SCMI_IMX_LMM_NR_LM_MASK);
+ if (priv->nr_lmm > SCMI_IMX_LMM_NR_MAX) {
+ dev_err(ph->dev, "i.MX LMM: %d:Exceed max supported Logical Machines\n",
+ priv->nr_lmm);
+ ret = -EINVAL;
+ } else {
+ dev_info(ph->dev, "i.MX LMM: %d Logical Machines\n", priv->nr_lmm);
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_lmm_priv *info;
+ u32 version;
+ int ret;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM LMM Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_lmm_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_lmm = {
+ .id = SCMI_PROTOCOL_IMX_LMM,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_lmm_protocol_init,
+ .ops = &scmi_imx_lmm_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_lmm);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_LMM) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
index b2dfd6c46ca2..4e246a78a042 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -32,6 +32,518 @@ port, and deploy the SM on supported processors.
The SM implements an interface compliant with the Arm SCMI Specification
with additional vendor specific extensions.
+System Control and Management Logical Machine Management Vendor Protocol
+========================================================================
+
+The SM adds the concept of logical machines (LMs). These are analogous to
+VMs and each has its own instance of SCMI. All normal SCMI calls only apply
+the LM running the calling agent. That includes boot, shutdown, reset,
+suspend, wake, etc. If a caller makes the SCMI base call to get a list
+of agents, it will only get those on that LM. Each LM is completely isolated
+from the others. This is mandatory for these to operate independently.
+
+This protocol is intended to support boot, shutdown, and reset of other logical
+machines (LM). It is usually used to allow one LM(e.g. OSPM) to manage
+another LM which is usually an offload or accelerator engine. Notifications
+from this protocol can also be used to manage a communication link to another
+LM. The LMM protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover all the LMs defined in the system.
+- Boot a target LM.
+- Shutdown a target LM (gracefully or forcibly).
+- Reset a target LM (gracefully or forcibly).
+- Wake a target LM from suspend.
+- Suspend a target LM (gracefully).
+- Read boot/shutdown/reset information for a target LM.
+- Get notifications when a target LM boots or shuts down (e.g. LM 'X' requested
+ notification of LM 'Y' boots or shuts down, when LM 'Y' boots or shuts down,
+ SCMI firmware will send notification to LM 'X').
+
+'Graceful' means asking LM itself to shutdown/reset/etc (e.g. sending
+notification to Linux, Then Linux reboots or powers down itself). It is async
+command that the SUCCESS of the command just means the command successfully
+return, not means reboot/reset successfully finished.
+
+'Forceful' means the SM will force shutdown/reset/etc the LM. It is sync
+command that the SUCCESS of the command means the LM has been successfully
+shutdown/reset/etc.
+If the commands not have Graceful/Forceful flag settings, such as WAKE, SUSEND,
+it is a Graceful command.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:5] Reserved, must be zero. |
+| |Bits[4:0] Number of Logical Machines |
+| |Note that due to both hardware limitations and reset reason|
+| |field limitations, the max number of LM is 16. The minimum |
+| |is 1. |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+LMM_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x3
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |DENIED: if the agent does not have permission to get info |
+| |for the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier of the LM whose identification is requested. |
+| |This field is: Populated with the lmid of the calling |
+| |agent, when the lmid parameter passed via the command is |
+| |0xFFFFFFFF. Identical to the lmid field passed via the |
+| |calling parameters, in all other cases |
++------------------+-----------------------------------------------------------+
+|uint32 attributes | Bits[31:0] reserved. must be zero |
++------------------+-----------------------------------------------------------+
+|uint32 state | Current state of the LM |
++------------------+-----------------------------------------------------------+
+|uint32 errStatus | Last error status recorded |
++------------------+-----------------------------------------------------------+
+|char name[16] | A NULL terminated ASCII string with the LM name, of up |
+| | to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+LMM_BOOT
+~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM boots successfully started. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET
+~~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM RESET command finished successfully in |
+| |graceful reset or LM successfully resets in forceful reset.|
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SHUTDOWN
+~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM shutdown command finished successfully in |
+| |graceful request or LM successfully shutdown in forceful |
+| |request. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_WAKE
+~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM wake command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SUSPEND
+~~~~~~~~~~~
+
+message_id: 0x8
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM suspend command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_NOTIFY
+~~~~~~~~~~
+
+message_id: 0x9
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Notification flags: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[2] Suspend (sleep) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[1] Shutdown (off) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[0] Boot (on) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the notification state successfully updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if input attributes flag specifies |
+| |unsupported or invalid configurations. |
+| |DENIED: if the agent does not have permission to request |
+| |the notification. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_REASON
+~~~~~~~~~~~~~~~~
+
+message_id: 0xA
+protocol_id: 0x80
+This command is mandatory.
+
+This command is to return the reset reason that caused the last reset, such as
+POR, WDOG, JTAG and etc.
+
++---------------------+--------------------------------------------------------+
+|Parameters |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++---------------------+--------------------------------------------------------+
+|Return values |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|int32 status |SUCCESS: if the reset reason of the LM successfully |
+| |updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine|
+| |DENIED: if the agent does not have permission to request|
+| |the reset reason. |
++---------------------+--------------------------------------------------------+
+|uint32 bootflags |Boot reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Reserved, must be zero. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the system). |
+| |Bit[7:0] Reason(WDOG, POR, FCCU and etc): |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 shutdownflags |Shutdown reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Number of valid extended info words. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the System). |
+| |Bit[7:0] Reason |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 extinfo[3] |Array of extended info words(e.g. fault pc) |
++---------------------+--------------------------------------------------------+
+
+LMM_POWER_ON
+~~~~~~~~~~~~
+
+message_id: 0xB
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM successfully powers on. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x80
+This command is mandatory.
+
++-----------------------+------------------------------------------------------+
+|Parameters |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++-----------------------+------------------------------------------------------+
+|uint32 cpuid |ID of the CPU inside the LM |
++-----------------------+------------------------------------------------------+
+|uint32 flags |Reset vector flags |
+| |Bits[31:0] Reserved, must be zero. |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorHigh |Higher vector |
++-----------------------+------------------------------------------------------+
+|Return values |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|int32 status |SUCCESS: If reset vector is set successfully. |
+| |NOT_FOUND: if lmid not points to a valid logical |
+| |machine, or cpuId is not valid. |
+| |INVALID_PARAMETERS: if reset vector is invalid. |
+| |DENIED: if the agent does not have permission to set |
+| |the reset vector for the CPU in the LM. |
++-----------------------+------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x80
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
+Notifications
+_____________
+
+LMM_EVENT
+~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier for the LM that caused the transition. |
++------------------+-----------------------------------------------------------+
+|uint32 eventlm |Identifier of the LM this event refers to. |
++------------------+-----------------------------------------------------------+
+|uint32 flags |LM events: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) event: |
+| |1 LM has awakened. |
+| |0 not a wake event. |
+| |Bit[2] Suspend (sleep) event: |
+| |1 LM has suspended. |
+| |0 not a suspend event. |
+| |Bit[1] Shutdown (off) event: |
+| |1 LM has shutdown. |
+| |0 not a shutdown event. |
+| |Bit[0] Boot (on) event: |
+| |1 LM has booted. |
+| |0 not a boot event. |
++------------------+-----------------------------------------------------------+
+
SCMI_BBM: System Control and Management BBM Vendor Protocol
==============================================================
@@ -436,6 +948,322 @@ protocol_id: 0x81
| |0 no button change detected. |
+------------------+-----------------------------------------------------------+
+System Control and Management CPU Vendor Protocol
+=================================================
+
+This protocol allows an agent to start or stop a CPU. It is used to manage
+auxiliary CPUs in a target LM (e.g. additional cores in an AP cluster or
+Cortex-M cores).
+Note:
+ - For cores in AP cluster, PSCI should be used and PSCI firmware will use CPU
+ protocol to handle them. For cores in non-AP cluster, Operating System(e.g.
+ Linux OS) could use CPU protocols to control Cortex-M7 cores.
+ - CPU indicates the core and its auxiliary peripherals(e.g. TCM) inside
+ i.MX SoC
+
+There are cases where giving an agent full control of a CPU via the CPU
+protocol is not desired. The LMM protocol is more restricted to just boot,
+shutdown, etc. So an agent might boot another logical machine but not be
+able to directly mess the state of its CPUs. Its also the reason there is an
+LMM power on command even though that could have been done through the
+power protocol.
+
+The CPU protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover the CPUs defined in the system.
+- Start a CPU.
+- Stop a CPU.
+- Set the boot and resume addresses for a CPU.
+- Set the sleep mode of a CPU.
+- Configure wake-up sources for a CPU.
+- Configure power domain reactions (LPM mode and retention mask) for a CPU.
+- The CPU IDs can be found in the CPU section of the SoC DEVICE: SM Device
+ Interface. They can also be found in the SoC RM. See the CPU Mode Control
+ (CMC) list in General Power Controller (GPC) section.
+
+CPU settings are not aggregated and setting their state is normally exclusive
+to one client.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:16] Reserved, must be zero. |
+| |Bits[15:0] Number of CPUs |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+CPU_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully. |
+| |NOT_FOUND: if the cpuid is not valid. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Bits[31:0] Reserved, must be zero |
++------------------+-----------------------------------------------------------+
+|char name[16] |NULL terminated ASCII string with CPU name up to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+CPU_START
+~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to start this CPU.|
++------------------+-----------------------------------------------------------+
+
+CPU_STOP
+~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to stop this CPU. |
++------------------+-----------------------------------------------------------+
+
+CPU_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Reset vector flags: |
+| |Bit[31] Resume flag. |
+| |Set to 1 to update the reset vector used on resume. |
+| |Bit[30] Boot flag. |
+| |Set to 1 to update the reset vector used for boot. |
+| |Bits[29:1] Reserved, must be zero. |
+| |Bit[0] Table flag. |
+| |Set to 1 if vector is the vector table base address. |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector: |
+| |If bit[0] of flags is 0, the lower 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the lower 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorhigh|Upper vector: |
+| |If bit[0] of flags is 0, the upper 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the upper 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU reset vector is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the requested vector type is not |
+| |supported by this CPU. |
+| |DENIED: the calling agent is not allowed to set the |
+| |reset vector of this CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_SLEEP_MODE_SET
+~~~~~~~~~~~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Sleep mode flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] IRQ mux: |
+| |If set to 1 the wakeup mux source is the GIC, else if 0|
+| |then the GPC |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |target sleep mode. When CPU runs into WFI, the GPC mode|
+| |will be triggered to be in below modes: |
+| |RUN: (0) |
+| |WAIT: (1) |
+| |STOP: (2) |
+| |SUSPEND: (3) |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU sleep mode is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the sleepmode or flags is invalid. |
+| |DENIED: the calling agent is not allowed to configure |
+| |the CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_INFO_GET
+~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully.|
+| |NOT_FOUND: if the cpuid is not valid. |
++----------------------+-------------------------------------------------------+
+|uint32 runmode |Run mode for the CPU |
+| |RUN(0):cpu started |
+| |HOLD(1):cpu powered up and reset asserted |
+| |STOP(2):cpu reseted and hold cpu |
+| |SUSPEND(3):in cpuidle state |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |Sleep mode for the CPU, see CPU_SLEEP_MODE_SET |
++----------------------+-------------------------------------------------------+
+|uint32 resetvectorlow |Reset vector low 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 resetvecothigh |Reset vector high 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x82
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
SCMI_MISC: System Control and Management MISC Vendor Protocol
================================================================
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 3e8051fe8296..71e2a9a89f6a 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -1062,13 +1062,12 @@ static bool __init sdei_present_acpi(void)
return true;
}
-void __init sdei_init(void)
+void __init acpi_sdei_init(void)
{
struct platform_device *pdev;
int ret;
- ret = platform_driver_register(&sdei_driver);
- if (ret || !sdei_present_acpi())
+ if (!sdei_present_acpi())
return;
pdev = platform_device_register_simple(sdei_driver.driver.name,
@@ -1081,6 +1080,12 @@ void __init sdei_init(void)
}
}
+static int __init sdei_init(void)
+{
+ return platform_driver_register(&sdei_driver);
+}
+arch_initcall(sdei_init);
+
int sdei_event_handler(struct pt_regs *regs,
struct sdei_registered_event *arg)
{
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
index 49d84f7e59e6..3f8777ee4dc0 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
@@ -96,10 +96,11 @@ static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *bui
if (info_len % 4) {
/* Create a padded string with length a multiple of 4 */
+ size_t copy_len = info_len;
info_len = round_up(info_len, 4);
tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
- memcpy(tmp, info, info_len);
+ memcpy(tmp, info, copy_len);
info = tmp;
}
@@ -176,6 +177,9 @@ struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
struct cs_dsp_mock_bin_builder *builder;
struct wmfw_coeff_hdr *hdr;
+ KUNIT_ASSERT_LE(priv->test, format_version, 0xff);
+ KUNIT_ASSERT_LE(priv->test, fw_version, 0xffffff);
+
builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
builder->test_priv = priv;
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
index 73412bcef50c..95946fac5563 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -505,9 +505,11 @@ void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
* Could be one 32-bit register or two 16-bit registers.
* A raw read will read the requested number of bytes.
*/
- regmap_raw_read(priv->dsp->regmap,
- xm + (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
- &num_algs_be32, sizeof(num_algs_be32));
+ KUNIT_ASSERT_GE(priv->test, 0,
+ regmap_raw_read(priv->dsp->regmap,
+ xm +
+ (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
+ &num_algs_be32, sizeof(num_algs_be32)));
num_algs = be32_to_cpu(num_algs_be32);
bytes = sizeof(struct wmfw_adsp2_id_hdr) +
(num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) +
@@ -516,9 +518,10 @@ void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1);
break;
case WMFW_HALO:
- regmap_read(priv->dsp->regmap,
- xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
- &num_algs);
+ KUNIT_ASSERT_GE(priv->test, 0,
+ regmap_read(priv->dsp->regmap,
+ xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
+ &num_algs));
bytes = sizeof(struct wmfw_halo_id_hdr) +
(num_algs * sizeof(struct wmfw_halo_alg_hdr)) +
4 /* terminator word */;
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
index 5a3ac03ac37f..934d40a4d709 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
@@ -178,6 +178,8 @@ void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *buil
size_t bytes_needed, name_len, description_len;
int offset;
+ KUNIT_ASSERT_LE(builder->test_priv->test, alg_id, 0xffffff);
+
/* Bytes needed for region header */
bytes_needed = offsetof(struct wmfw_region, data);
@@ -435,6 +437,8 @@ struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
{
struct cs_dsp_mock_wmfw_builder *builder;
+ KUNIT_ASSERT_LE(priv->test, format_version, 0xff);
+
/* If format version isn't given use the default for the target core */
if (format_version < 0) {
switch (priv->dsp->type) {
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 5fe61b9ab5f9..db8c5c03d3a2 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -281,6 +281,30 @@ config EFI_EMBEDDED_FIRMWARE
bool
select CRYPTO_LIB_SHA256
+config EFI_SBAT
+ def_bool y if EFI_SBAT_FILE!=""
+
+config EFI_SBAT_FILE
+ string "Embedded SBAT section file path"
+ depends on EFI_ZBOOT
+ help
+ SBAT section provides a way to improve SecureBoot revocations of UEFI
+ binaries by introducing a generation-based mechanism. With SBAT, older
+ UEFI binaries can be prevented from booting by bumping the minimal
+ required generation for the specific component in the bootloader.
+
+ Note: SBAT information is distribution specific, i.e. the owner of the
+ signing SecureBoot certificate must define the SBAT policy. Linux
+ kernel upstream does not define SBAT components and their generations.
+
+ See https://github.com/rhboot/shim/blob/main/SBAT.md for the additional
+ details.
+
+ Specify a file with SBAT data which is going to be embedded as '.sbat'
+ section into the kernel.
+
+ If unsure, leave blank.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 7309394b8fc9..e57bff702b5f 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -558,6 +558,7 @@ int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
__weak __alias(__efi_mem_desc_lookup);
+EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
/*
* Calculate the highest address of an efi memory descriptor.
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d23a1b9fed75..2f173391b63d 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -85,7 +85,6 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o smbios.o
-lib-$(CONFIG_EFI_MIXED) += x86-mixed.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 48842b5c106b..92e3c73502ba 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -44,6 +44,10 @@ AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE
$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
$(call if_changed_rule,as_o_S)
+ifneq ($(CONFIG_EFI_SBAT_FILE),)
+$(obj)/zboot-header.o: $(CONFIG_EFI_SBAT_FILE)
+endif
+
ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a
LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index fd6dc790c5a8..7aa2f9ad2935 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -601,6 +601,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
* @image: EFI loaded image protocol
* @soft_limit: preferred address for loading the initrd
* @hard_limit: upper limit address for loading the initrd
+ * @out: pointer to store the address of the initrd table
*
* Return: status code
*/
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
index 77359e802181..f1c5fb45d5f7 100644
--- a/drivers/firmware/efi/libstub/x86-5lvl.c
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -62,7 +62,7 @@ efi_status_t efi_setup_5level_paging(void)
void efi_5level_switch(void)
{
- bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl;
+ bool want_la57 = !efi_no5lvl;
bool have_la57 = native_read_cr4() & X86_CR4_LA57;
bool need_toggle = want_la57 ^ have_la57;
u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
diff --git a/drivers/firmware/efi/libstub/x86-mixed.S b/drivers/firmware/efi/libstub/x86-mixed.S
deleted file mode 100644
index e04ed99bc449..000000000000
--- a/drivers/firmware/efi/libstub/x86-mixed.S
+++ /dev/null
@@ -1,253 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
- *
- * Early support for invoking 32-bit EFI services from a 64-bit kernel.
- *
- * Because this thunking occurs before ExitBootServices() we have to
- * restore the firmware's 32-bit GDT and IDT before we make EFI service
- * calls.
- *
- * On the plus side, we don't have to worry about mangling 64-bit
- * addresses into 32-bits because we're executing with an identity
- * mapped pagetable and haven't transitioned to 64-bit virtual addresses
- * yet.
- */
-
-#include <linux/linkage.h>
-#include <asm/desc_defs.h>
-#include <asm/msr.h>
-#include <asm/page_types.h>
-#include <asm/pgtable_types.h>
-#include <asm/processor-flags.h>
-#include <asm/segment.h>
-
- .text
- .code32
-#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
-SYM_FUNC_START(efi32_stub_entry)
- call 1f
-1: popl %ecx
-
- /* Clear BSS */
- xorl %eax, %eax
- leal (_bss - 1b)(%ecx), %edi
- leal (_ebss - 1b)(%ecx), %ecx
- subl %edi, %ecx
- shrl $2, %ecx
- cld
- rep stosl
-
- add $0x4, %esp /* Discard return address */
- movl 8(%esp), %ebx /* struct boot_params pointer */
- jmp efi32_startup
-SYM_FUNC_END(efi32_stub_entry)
-#endif
-
-/*
- * Called using a far call from __efi64_thunk() below, using the x86_64 SysV
- * ABI (except for R8/R9 which are inaccessible to 32-bit code - EAX/EBX are
- * used instead). EBP+16 points to the arguments passed via the stack.
- *
- * The first argument (EDI) is a pointer to the boot service or protocol, to
- * which the remaining arguments are passed, each truncated to 32 bits.
- */
-SYM_FUNC_START_LOCAL(efi_enter32)
- /*
- * Convert x86-64 SysV ABI params to i386 ABI
- */
- pushl 32(%ebp) /* Up to 3 args passed via the stack */
- pushl 24(%ebp)
- pushl 16(%ebp)
- pushl %ebx /* R9 */
- pushl %eax /* R8 */
- pushl %ecx
- pushl %edx
- pushl %esi
-
- /* Disable paging */
- movl %cr0, %eax
- btrl $X86_CR0_PG_BIT, %eax
- movl %eax, %cr0
-
- /* Disable long mode via EFER */
- movl $MSR_EFER, %ecx
- rdmsr
- btrl $_EFER_LME, %eax
- wrmsr
-
- call *%edi
-
- /* We must preserve return value */
- movl %eax, %edi
-
- call efi32_enable_long_mode
-
- addl $32, %esp
- movl %edi, %eax
- lret
-SYM_FUNC_END(efi_enter32)
-
- .code64
-SYM_FUNC_START(__efi64_thunk)
- push %rbp
- movl %esp, %ebp
- push %rbx
-
- /* Move args #5 and #6 into 32-bit accessible registers */
- movl %r8d, %eax
- movl %r9d, %ebx
-
- lcalll *efi32_call(%rip)
-
- pop %rbx
- pop %rbp
- RET
-SYM_FUNC_END(__efi64_thunk)
-
- .code32
-SYM_FUNC_START_LOCAL(efi32_enable_long_mode)
- movl %cr4, %eax
- btsl $(X86_CR4_PAE_BIT), %eax
- movl %eax, %cr4
-
- movl $MSR_EFER, %ecx
- rdmsr
- btsl $_EFER_LME, %eax
- wrmsr
-
- /* Disable interrupts - the firmware's IDT does not work in long mode */
- cli
-
- /* Enable paging */
- movl %cr0, %eax
- btsl $X86_CR0_PG_BIT, %eax
- movl %eax, %cr0
- ret
-SYM_FUNC_END(efi32_enable_long_mode)
-
-/*
- * This is the common EFI stub entry point for mixed mode. It sets up the GDT
- * and page tables needed for 64-bit execution, after which it calls the
- * common 64-bit EFI entrypoint efi_stub_entry().
- *
- * Arguments: 0(%esp) image handle
- * 4(%esp) EFI system table pointer
- * %ebx struct boot_params pointer (or NULL)
- *
- * Since this is the point of no return for ordinary execution, no registers
- * are considered live except for the function parameters. [Note that the EFI
- * stub may still exit and return to the firmware using the Exit() EFI boot
- * service.]
- */
-SYM_FUNC_START_LOCAL(efi32_startup)
- movl %esp, %ebp
-
- subl $8, %esp
- sgdtl (%esp) /* Save GDT descriptor to the stack */
- movl 2(%esp), %esi /* Existing GDT pointer */
- movzwl (%esp), %ecx /* Existing GDT limit */
- inc %ecx /* Existing GDT size */
- andl $~7, %ecx /* Ensure size is multiple of 8 */
-
- subl %ecx, %esp /* Allocate new GDT */
- andl $~15, %esp /* Realign the stack */
- movl %esp, %edi /* New GDT address */
- leal 7(%ecx), %eax /* New GDT limit */
- pushw %cx /* Push 64-bit CS (for LJMP below) */
- pushl %edi /* Push new GDT address */
- pushw %ax /* Push new GDT limit */
-
- /* Copy GDT to the stack and add a 64-bit code segment at the end */
- movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) & 0xffffffff, (%edi,%ecx)
- movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) >> 32, 4(%edi,%ecx)
- shrl $2, %ecx
- cld
- rep movsl /* Copy the firmware GDT */
- lgdtl (%esp) /* Switch to the new GDT */
-
- call 1f
-1: pop %edi
-
- /* Record mixed mode entry */
- movb $0x0, (efi_is64 - 1b)(%edi)
-
- /* Set up indirect far call to re-enter 32-bit mode */
- leal (efi32_call - 1b)(%edi), %eax
- addl %eax, (%eax)
- movw %cs, 4(%eax)
-
- /* Disable paging */
- movl %cr0, %eax
- btrl $X86_CR0_PG_BIT, %eax
- movl %eax, %cr0
-
- /* Set up 1:1 mapping */
- leal (pte - 1b)(%edi), %eax
- movl $_PAGE_PRESENT | _PAGE_RW | _PAGE_PSE, %ecx
- leal (_PAGE_PRESENT | _PAGE_RW)(%eax), %edx
-2: movl %ecx, (%eax)
- addl $8, %eax
- addl $PMD_SIZE, %ecx
- jnc 2b
-
- movl $PAGE_SIZE, %ecx
- .irpc l, 0123
- movl %edx, \l * 8(%eax)
- addl %ecx, %edx
- .endr
- addl %ecx, %eax
- movl %edx, (%eax)
- movl %eax, %cr3
-
- call efi32_enable_long_mode
-
- /* Set up far jump to 64-bit mode (CS is already on the stack) */
- leal (efi_stub_entry - 1b)(%edi), %eax
- movl %eax, 2(%esp)
-
- movl 0(%ebp), %edi
- movl 4(%ebp), %esi
- movl %ebx, %edx
- ljmpl *2(%esp)
-SYM_FUNC_END(efi32_startup)
-
-/*
- * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
- * efi_system_table_32_t *sys_table)
- */
-SYM_FUNC_START(efi32_pe_entry)
- pushl %ebx // save callee-save registers
-
- /* Check whether the CPU supports long mode */
- movl $0x80000001, %eax // assume extended info support
- cpuid
- btl $29, %edx // check long mode bit
- jnc 1f
- leal 8(%esp), %esp // preserve stack alignment
- xor %ebx, %ebx // no struct boot_params pointer
- jmp efi32_startup // only ESP and EBX remain live
-1: movl $0x80000003, %eax // EFI_UNSUPPORTED
- popl %ebx
- RET
-SYM_FUNC_END(efi32_pe_entry)
-
-#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
- .org efi32_stub_entry + 0x200
- .code64
-SYM_FUNC_START_NOALIGN(efi64_stub_entry)
- jmp efi_handover_entry
-SYM_FUNC_END(efi64_stub_entry)
-#endif
-
- .data
- .balign 8
-SYM_DATA_START_LOCAL(efi32_call)
- .long efi_enter32 - .
- .word 0x0
-SYM_DATA_END(efi32_call)
-SYM_DATA(efi_is64, .byte 1)
-
- .bss
- .balign PAGE_SIZE
-SYM_DATA_LOCAL(pte, .fill 6 * PAGE_SIZE, 1, 0)
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
index fb676ded47fa..b6431edd0fc9 100644
--- a/drivers/firmware/efi/libstub/zboot-header.S
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -4,17 +4,17 @@
#ifdef CONFIG_64BIT
.set .Lextra_characteristics, 0x0
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR64_MAGIC
#else
.set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR32_MAGIC
#endif
.section ".head", "a"
.globl __efistub_efi_zboot_header
__efistub_efi_zboot_header:
.Ldoshdr:
- .long MZ_MAGIC
+ .long IMAGE_DOS_SIGNATURE
.ascii "zimg" // image type
.long __efistub__gzdata_start - .Ldoshdr // payload offset
.long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
@@ -25,7 +25,7 @@ __efistub_efi_zboot_header:
.long .Lpehdr - .Ldoshdr // PE header offset
.Lpehdr:
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.short MACHINE_TYPE
.short .Lsection_count
.long 0
@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
- .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
+ .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
@@ -123,11 +123,29 @@ __efistub_efi_zboot_header:
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_EXECUTE
+#ifdef CONFIG_EFI_SBAT
+ .ascii ".sbat\0\0\0"
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_DISCARDABLE
+
+ .pushsection ".sbat", "a", @progbits
+ .incbin CONFIG_EFI_SBAT_FILE
+ .popsection
+#endif
+
.ascii ".data\0\0\0"
.long __data_size
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long __data_rawsize
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long 0, 0
.short 0, 0
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index 9ecc57ff5b45..c3a166675450 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -29,7 +29,17 @@ SECTIONS
. = _etext;
}
+#ifdef CONFIG_EFI_SBAT
+ .sbat : ALIGN(4096) {
+ _sbat = .;
+ *(.sbat)
+ _esbat = ALIGN(4096);
+ . = _esbat;
+ }
+#endif
+
.data : ALIGN(4096) {
+ _data = .;
*(.data* .init.data*)
_edata = ALIGN(512);
. = _edata;
@@ -52,3 +62,4 @@ PROVIDE(__efistub__gzdata_size =
PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
PROVIDE(__data_size = ABSOLUTE(_end - _etext));
+PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat));
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 34109fd86c55..f1c04d7cfd71 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -43,7 +43,8 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
map.map = early_memremap(phys_map, data->size);
if (!map.map) {
- pr_err("Could not map the memory map!\n");
+ pr_err("Could not map the memory map! phys_map=%pa, size=0x%lx\n",
+ &phys_map, data->size);
return -ENOMEM;
}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 9e2628728aad..77b5f7ac3e20 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -361,6 +361,10 @@ static long efi_runtime_get_waketime(unsigned long arg)
getwakeuptime.enabled))
return -EFAULT;
+ if (getwakeuptime.pending && put_user(pending,
+ getwakeuptime.pending))
+ return -EFAULT;
+
if (getwakeuptime.time) {
if (copy_to_user(getwakeuptime.time, &efi_time,
sizeof(efi_time_t)))
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index c964f4924359..127ad752acf8 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -23,6 +23,28 @@ config IMX_SCU
This driver manages the IPC interface between host CPU and the
SCU firmware running on M4.
+config IMX_SCMI_CPU_DRV
+ tristate "IMX SCMI CPU Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide cpu management features.
+
+ This driver can also be built as a module.
+
+config IMX_SCMI_LMM_DRV
+ tristate "IMX SCMI LMM Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide Logical Machine management features.
+
+ This driver can also be built as a module.
+
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 8d046c341be8..3bbaffa6e347 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_DSP) += imx-dsp.o
obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
+obj-${CONFIG_IMX_SCMI_CPU_DRV} += sm-cpu.o
obj-${CONFIG_IMX_SCMI_MISC_DRV} += sm-misc.o
+obj-${CONFIG_IMX_SCMI_LMM_DRV} += sm-lmm.o
diff --git a/drivers/firmware/imx/sm-cpu.c b/drivers/firmware/imx/sm-cpu.c
new file mode 100644
index 000000000000..091b014f739f
--- /dev/null
+++ b/drivers/firmware/imx/sm-cpu.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_cpu_proto_ops *imx_cpu_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot,
+ bool resume)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_cpu_ops->cpu_reset_vector_set(ph, cpuid, vector, start,
+ boot, resume);
+}
+EXPORT_SYMBOL(scmi_imx_cpu_reset_vector_set);
+
+int scmi_imx_cpu_start(u32 cpuid, bool start)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (start)
+ return imx_cpu_ops->cpu_start(ph, cpuid, true);
+
+ return imx_cpu_ops->cpu_start(ph, cpuid, false);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_start);
+
+int scmi_imx_cpu_started(u32 cpuid, bool *started)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!started)
+ return -EINVAL;
+
+ return imx_cpu_ops->cpu_started(ph, cpuid, started);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_started);
+
+static int scmi_imx_cpu_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_cpu_ops) {
+ dev_err(&sdev->dev, "sm cpu already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_cpu_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_CPU, &ph);
+ if (IS_ERR(imx_cpu_ops))
+ return PTR_ERR(imx_cpu_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_CPU, "imx-cpu" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_cpu_driver = {
+ .name = "scmi-imx-cpu",
+ .probe = scmi_imx_cpu_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_cpu_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/imx/sm-lmm.c b/drivers/firmware/imx/sm-lmm.c
new file mode 100644
index 000000000000..6807bf563c03
--- /dev/null
+++ b/drivers/firmware/imx/sm-lmm.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_lmm_proto_ops *imx_lmm_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!info)
+ return -EINVAL;
+
+ return imx_lmm_ops->lmm_info(ph, lmid, info);
+};
+EXPORT_SYMBOL(scmi_imx_lmm_info);
+
+int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_lmm_ops->lmm_reset_vector_set(ph, lmid, cpuid, flags, vector);
+}
+EXPORT_SYMBOL(scmi_imx_lmm_reset_vector_set);
+
+int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ switch (op) {
+ case SCMI_IMX_LMM_BOOT:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, true);
+ case SCMI_IMX_LMM_POWER_ON:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, false);
+ case SCMI_IMX_LMM_SHUTDOWN:
+ return imx_lmm_ops->lmm_shutdown(ph, lmid, flags);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(scmi_imx_lmm_operation);
+
+static int scmi_imx_lmm_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_lmm_ops) {
+ dev_err(&sdev->dev, "lmm already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_lmm_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_LMM, &ph);
+ if (IS_ERR(imx_lmm_ops))
+ return PTR_ERR(imx_lmm_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_LMM, "imx-lmm" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_lmm_driver = {
+ .name = "scmi-imx-lmm",
+ .probe = scmi_imx_lmm_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_lmm_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index a1ebbe9b73b1..38ca190d4a22 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -804,8 +804,10 @@ int __init psci_dt_init(void)
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
- if (!np || !of_device_is_available(np))
+ if (!np || !of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
init_fn = (psci_initcall_t)matched_np->data;
ret = init_fn(np);
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index b662b7e28b80..df02a4ec3398 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -343,7 +343,7 @@ static int suspend_test_thread(void *arg)
* later.
*/
timer_delete(&wakeup_timer);
- destroy_timer_on_stack(&wakeup_timer);
+ timer_destroy_on_stack(&wakeup_timer);
if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
complete(&suspend_threads_done);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index fc4d67e4c4a6..f63b716be5b0 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1986,7 +1986,10 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "asus,vivobook-s15" },
+ { .compatible = "asus,zenbook-a14-ux3407qa" },
+ { .compatible = "asus,zenbook-a14-ux3407ra" },
{ .compatible = "dell,xps13-9345" },
+ { .compatible = "hp,elitebook-ultra-g1q" },
{ .compatible = "hp,omnibook-x14" },
{ .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 097369d38b84..3133d826f5fa 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -44,8 +44,11 @@ enum qcom_scm_arg_types {
/**
* struct qcom_scm_desc
+ * @svc: Service identifier
+ * @cmd: Command identifier
* @arginfo: Metadata describing the arguments in args[]
* @args: The array of arguments for the secure syscall
+ * @owner: Owner identifier
*/
struct qcom_scm_desc {
u32 svc;
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index 92b365178235..94196ad87105 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -79,6 +79,7 @@ static const char *const qcom_tzmem_blacklist[] = {
"qcom,sc8180x",
"qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
+ "qcom,sm7150", /* reset in rmtfs memory assignment */
"qcom,sm8150", /* reset in rmtfs memory assignment */
NULL
};
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
index 85e90d236da2..39b33a356ebd 100644
--- a/drivers/firmware/samsung/exynos-acpm-pmic.c
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -43,13 +43,13 @@ static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
}
-static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd,
+static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd, size_t cmdlen,
unsigned int acpm_chan_id)
{
xfer->txd = cmd;
xfer->rxd = cmd;
- xfer->txlen = sizeof(cmd);
- xfer->rxlen = sizeof(cmd);
+ xfer->txlen = cmdlen;
+ xfer->rxlen = cmdlen;
xfer->acpm_chan_id = acpm_chan_id;
}
@@ -71,7 +71,7 @@ int acpm_pmic_read_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_read_cmd(cmd, type, reg, chan);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -104,7 +104,7 @@ int acpm_pmic_bulk_read(const struct acpm_handle *handle,
return -EINVAL;
acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -144,7 +144,7 @@ int acpm_pmic_write_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -184,7 +184,7 @@ int acpm_pmic_bulk_write(const struct acpm_handle *handle,
return -EINVAL;
acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -214,7 +214,7 @@ int acpm_pmic_update_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
index 15e991b99f5a..e02f14f4bd7c 100644
--- a/drivers/firmware/samsung/exynos-acpm.c
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -15,6 +15,7 @@
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/ktime.h>
#include <linux/mailbox/exynos-message.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
@@ -32,8 +33,7 @@
#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
-/* The unit of counter is 20 us. 5000 * 20 = 100 ms */
-#define ACPM_POLL_TIMEOUT 5000
+#define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC)
#define ACPM_TX_TIMEOUT_US 500000
#define ACPM_GS101_INITDATA_BASE 0xa000
@@ -300,12 +300,13 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan,
const struct acpm_xfer *xfer)
{
struct device *dev = achan->acpm->dev;
- unsigned int cnt_20us = 0;
+ ktime_t timeout;
u32 seqnum;
int ret;
seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+ timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US);
do {
ret = acpm_get_rx(achan, xfer);
if (ret)
@@ -315,12 +316,11 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan,
return 0;
/* Determined experimentally. */
- usleep_range(20, 30);
- cnt_20us++;
- } while (cnt_20us < ACPM_POLL_TIMEOUT);
+ udelay(20);
+ } while (ktime_before(ktime_get(), timeout));
- dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
- achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
+ dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n",
+ achan->id, seqnum, achan->bitmap_seqnum[0]);
return -ETIME;
}
@@ -649,7 +649,7 @@ static int acpm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, acpm);
- return 0;
+ return devm_of_platform_populate(dev);
}
/**
@@ -677,43 +677,30 @@ static void devm_acpm_release(struct device *dev, void *res)
}
/**
- * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
- * @dev: device pointer requesting ACPM handle.
- * @property: property name containing phandle on ACPM node.
+ * acpm_get_by_node() - get the ACPM handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
-static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
- const char *property)
+static const struct acpm_handle *acpm_get_by_node(struct device *dev,
+ struct device_node *np)
{
struct platform_device *pdev;
- struct device_node *acpm_np;
struct device_link *link;
struct acpm_info *acpm;
- acpm_np = of_parse_phandle(dev->of_node, property, 0);
- if (!acpm_np)
- return ERR_PTR(-ENODEV);
-
- pdev = of_find_device_by_node(acpm_np);
- if (!pdev) {
- dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
- of_node_put(acpm_np);
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
- }
-
- of_node_put(acpm_np);
acpm = platform_get_drvdata(pdev);
if (!acpm) {
- dev_err(dev, "Cannot get drvdata from %s\n",
- dev_name(&pdev->dev));
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
if (!try_module_get(pdev->dev.driver->owner)) {
- dev_err(dev, "Cannot get module reference.\n");
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -732,14 +719,14 @@ static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
}
/**
- * devm_acpm_get_by_phandle() - managed get handle using phandle.
- * @dev: device pointer requesting ACPM handle.
- * @property: property name containing phandle on ACPM node.
+ * devm_acpm_get_by_node() - managed get handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
-const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
- const char *property)
+const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np)
{
const struct acpm_handle **ptr, *handle;
@@ -747,7 +734,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- handle = acpm_get_by_phandle(dev, property);
+ handle = acpm_get_by_node(dev, np);
if (!IS_ERR(handle)) {
*ptr = handle;
devres_add(dev, ptr);
@@ -757,6 +744,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
return handle;
}
+EXPORT_SYMBOL_GPL(devm_acpm_get_by_node);
static const struct acpm_match_data acpm_gs101 = {
.initdata_base = ACPM_GS101_INITDATA_BASE,
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 75a186bf8f8e..592d8a644619 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -35,36 +35,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
- /*
- * The meaning of depth and bpp for direct-color formats is
- * inconsistent:
- *
- * - DRM format info specifies depth as the number of color
- * bits; including alpha, but not including filler bits.
- * - Linux' EFI platform code computes lfb_depth from the
- * individual color channels, including the reserved bits.
- * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
- * versions use 15.
- * - On the kernel command line, 'bpp' of 32 is usually
- * XRGB8888 including the filler bits, but 15 is XRGB1555
- * not including the filler bit.
- *
- * It's not easily possible to fix this in struct screen_info,
- * as this could break UAPI. The best solution is to compute
- * bits_per_pixel from the color bits, reserved bits and
- * reported lfb_depth, whichever is highest. In the loop below,
- * ignore simplefb formats with alpha bits, as EFI and VESA
- * don't specify alpha channels.
- */
- if (si->lfb_depth > 8) {
- bits_per_pixel = max(max3(si->red_size + si->red_pos,
- si->green_size + si->green_pos,
- si->blue_size + si->blue_pos),
- si->rsvd_size + si->rsvd_pos);
- bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
- } else {
- bits_per_pixel = si->lfb_depth;
- }
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
const struct simplefb_format *f = &formats[i];
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 806a975fff22..ae5fd1936ad3 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2025 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -3670,6 +3670,7 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
struct ti_sci_info *info = dev_get_drvdata(dev);
struct device *cpu_dev, *cpu_dev_max = NULL;
s32 val, cpu_lat = 0;
+ u16 cpu_lat_ms;
int i, ret;
if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
@@ -3682,9 +3683,16 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
}
}
if (cpu_dev_max) {
- dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u\n", __func__, cpu_lat);
+ /*
+ * PM QoS latency unit is usecs, device manager uses msecs.
+ * Convert to msecs and round down for device manager.
+ */
+ cpu_lat_ms = cpu_lat / USEC_PER_MSEC;
+ dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u ms\n", __func__,
+ cpu_lat_ms);
ret = ti_sci_cmd_set_latency_constraint(&info->handle,
- cpu_lat, TISCI_MSG_CONSTRAINT_SET);
+ cpu_lat_ms,
+ TISCI_MSG_CONSTRAINT_SET);
if (ret)
return ret;
}
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 47fe6261f5a3..1eac9948148f 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -2,29 +2,31 @@
/*
* Turris Mox rWTM firmware driver
*
- * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org>
+ * Copyright (C) 2019, 2024, 2025 Marek Behún <kabel@kernel.org>
*/
#include <crypto/sha2.h>
#include <linux/align.h>
#include <linux/armada-37xx-rwtm-mailbox.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/container_of.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/if_ether.h>
+#include <linux/key.h>
#include <linux/kobject.h>
#include <linux/mailbox_client.h>
+#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/sysfs.h>
+#include <linux/turris-signing-key.h>
#include <linux/types.h>
#define DRIVER_NAME "turris-mox-rwtm"
@@ -37,10 +39,13 @@
* https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
*/
-#define MOX_ECC_NUMBER_WORDS 17
-#define MOX_ECC_NUMBER_LEN (MOX_ECC_NUMBER_WORDS * sizeof(u32))
-
-#define MOX_ECC_SIGNATURE_WORDS (2 * MOX_ECC_NUMBER_WORDS)
+enum {
+ MOX_ECC_NUM_BITS = 521,
+ MOX_ECC_NUM_LEN = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 8),
+ MOX_ECC_NUM_WORDS = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 32),
+ MOX_ECC_SIG_LEN = 2 * MOX_ECC_NUM_LEN,
+ MOX_ECC_PUBKEY_LEN = 1 + MOX_ECC_NUM_LEN,
+};
#define MBOX_STS_SUCCESS (0 << 30)
#define MBOX_STS_FAIL (1 << 30)
@@ -77,10 +82,7 @@ enum mbox_cmd {
* @ram_size: RAM size of the device
* @mac_address1: first MAC address of the device
* @mac_address2: second MAC address of the device
- * @has_pubkey: whether board ECDSA public key is present
* @pubkey: board ECDSA public key
- * @last_sig: last ECDSA signature generated with board ECDSA private key
- * @last_sig_done: whether the last ECDSA signing is complete
*/
struct mox_rwtm {
struct mbox_client mbox_client;
@@ -100,18 +102,8 @@ struct mox_rwtm {
int board_version, ram_size;
u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN];
- bool has_pubkey;
- u8 pubkey[135];
-
-#ifdef CONFIG_DEBUG_FS
- /*
- * Signature process. This is currently done via debugfs, because it
- * does not conform to the sysfs standard "one file per attribute".
- * It should be rewritten via crypto API once akcipher API is available
- * from userspace.
- */
- u32 last_sig[MOX_ECC_SIGNATURE_WORDS];
- bool last_sig_done;
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
+ u8 pubkey[MOX_ECC_PUBKEY_LEN];
#endif
};
@@ -120,24 +112,23 @@ static inline struct device *rwtm_dev(struct mox_rwtm *rwtm)
return rwtm->mbox_client.dev;
}
-#define MOX_ATTR_RO(name, format, cat) \
+#define MOX_ATTR_RO(name, format) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *a, \
char *buf) \
{ \
struct mox_rwtm *rwtm = dev_get_drvdata(dev); \
- if (!rwtm->has_##cat) \
+ if (!rwtm->has_board_info) \
return -ENODATA; \
return sysfs_emit(buf, format, rwtm->name); \
} \
static DEVICE_ATTR_RO(name)
-MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
-MOX_ATTR_RO(board_version, "%i\n", board_info);
-MOX_ATTR_RO(ram_size, "%i\n", board_info);
-MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
-MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
-MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+MOX_ATTR_RO(serial_number, "%016llX\n");
+MOX_ATTR_RO(board_version, "%i\n");
+MOX_ATTR_RO(ram_size, "%i\n");
+MOX_ATTR_RO(mac_address1, "%pM\n");
+MOX_ATTR_RO(mac_address2, "%pM\n");
static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_serial_number.attr,
@@ -145,7 +136,6 @@ static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_ram_size.attr,
&dev_attr_mac_address1.attr,
&dev_attr_mac_address2.attr,
- &dev_attr_pubkey.attr,
NULL
};
ATTRIBUTE_GROUPS(turris_mox_rwtm);
@@ -247,24 +237,6 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
}
- ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
- if (ret == -ENODATA) {
- dev_warn(dev, "Board has no public key burned!\n");
- } else if (ret == -EOPNOTSUPP) {
- dev_notice(dev,
- "Firmware does not support the ECDSA_PUB_KEY command\n");
- } else if (ret < 0) {
- return ret;
- } else {
- u32 *s = reply->status;
-
- rwtm->has_pubkey = true;
- sprintf(rwtm->pubkey,
- "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
- ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
- s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
- }
-
return 0;
}
@@ -306,127 +278,139 @@ unlock_mutex:
return ret;
}
-#ifdef CONFIG_DEBUG_FS
-static int rwtm_debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
- return nonseekable_open(inode, file);
-}
-
-static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
+static void mox_ecc_number_to_bin(void *dst, const u32 *src)
{
- struct mox_rwtm *rwtm = file->private_data;
- ssize_t ret;
+ __be32 tmp[MOX_ECC_NUM_WORDS];
- /* only allow one read, of whole signature, from position 0 */
- if (*ppos != 0)
- return 0;
+ cpu_to_be32_array(tmp, src, MOX_ECC_NUM_WORDS);
- if (len < sizeof(rwtm->last_sig))
- return -EINVAL;
+ memcpy(dst, (void *)tmp + 2, MOX_ECC_NUM_LEN);
+}
- if (!rwtm->last_sig_done)
- return -ENODATA;
+static void mox_ecc_public_key_to_bin(void *dst, u32 src_first,
+ const u32 *src_rest)
+{
+ __be32 tmp[MOX_ECC_NUM_WORDS - 1];
+ u8 *p = dst;
- ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig,
- sizeof(rwtm->last_sig));
- rwtm->last_sig_done = false;
+ /* take 3 bytes from the first word */
+ *p++ = src_first >> 16;
+ *p++ = src_first >> 8;
+ *p++ = src_first;
- return ret;
+ /* take the rest of the words */
+ cpu_to_be32_array(tmp, src_rest, MOX_ECC_NUM_WORDS - 1);
+ memcpy(p, tmp, sizeof(tmp));
}
-static ssize_t do_sign_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+static int mox_rwtm_sign(const struct key *key, const void *data, void *signature)
{
- struct mox_rwtm *rwtm = file->private_data;
- struct armada_37xx_rwtm_tx_msg msg;
- loff_t dummy = 0;
- ssize_t ret;
-
- if (len != SHA512_DIGEST_SIZE)
- return -EINVAL;
-
- /* if last result is not zero user has not read that information yet */
- if (rwtm->last_sig_done)
- return -EBUSY;
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+ struct armada_37xx_rwtm_tx_msg msg = {};
+ u32 offset_r, offset_s;
+ int ret;
- if (!mutex_trylock(&rwtm->busy))
- return -EBUSY;
+ guard(mutex)(&rwtm->busy);
/*
- * Here we have to send:
- * 1. Address of the input to sign.
- * The input is an array of 17 32-bit words, the first (most
- * significat) is 0, the rest 16 words are copied from the SHA-512
- * hash given by the user and converted from BE to LE.
- * 2. Address of the buffer where ECDSA signature value R shall be
- * stored by the rWTM firmware.
- * 3. Address of the buffer where ECDSA signature value S shall be
- * stored by the rWTM firmware.
+ * For MBOX_CMD_SIGN command:
+ * args[0] - must be 1
+ * args[1] - address of message M to sign; message is a 521-bit number
+ * args[2] - address where the R part of the signature will be stored
+ * args[3] - address where the S part of the signature will be stored
+ *
+ * M, R and S are 521-bit numbers encoded as seventeen 32-bit words,
+ * most significat word first.
+ * Since the message in @data is a sha512 digest, the most significat
+ * word is always zero.
*/
+
+ offset_r = MOX_ECC_NUM_WORDS * sizeof(u32);
+ offset_s = 2 * MOX_ECC_NUM_WORDS * sizeof(u32);
+
memset(rwtm->buf, 0, sizeof(u32));
- ret = simple_write_to_buffer(rwtm->buf + sizeof(u32),
- SHA512_DIGEST_SIZE, &dummy, buf, len);
- if (ret < 0)
- goto unlock_mutex;
- be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUMBER_WORDS);
+ memcpy(rwtm->buf + sizeof(u32), data, SHA512_DIGEST_SIZE);
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUM_WORDS);
msg.args[0] = 1;
msg.args[1] = rwtm->buf_phys;
- msg.args[2] = rwtm->buf_phys + MOX_ECC_NUMBER_LEN;
- msg.args[3] = rwtm->buf_phys + 2 * MOX_ECC_NUMBER_LEN;
+ msg.args[2] = rwtm->buf_phys + offset_r;
+ msg.args[3] = rwtm->buf_phys + offset_s;
ret = mox_rwtm_exec(rwtm, MBOX_CMD_SIGN, &msg, true);
if (ret < 0)
- goto unlock_mutex;
+ return ret;
- /*
- * Here we read the R and S values of the ECDSA signature
- * computed by the rWTM firmware and convert their words from
- * LE to BE.
- */
- memcpy(rwtm->last_sig, rwtm->buf + MOX_ECC_NUMBER_LEN,
- sizeof(rwtm->last_sig));
- cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig,
- MOX_ECC_SIGNATURE_WORDS);
- rwtm->last_sig_done = true;
+ /* convert R and S parts of the signature */
+ mox_ecc_number_to_bin(signature, rwtm->buf + offset_r);
+ mox_ecc_number_to_bin(signature + MOX_ECC_NUM_LEN, rwtm->buf + offset_s);
- mutex_unlock(&rwtm->busy);
- return len;
-unlock_mutex:
- mutex_unlock(&rwtm->busy);
- return ret;
+ return 0;
}
-static const struct file_operations do_sign_fops = {
- .owner = THIS_MODULE,
- .open = rwtm_debug_open,
- .read = do_sign_read,
- .write = do_sign_write,
-};
-
-static void rwtm_debugfs_release(void *root)
+static const void *mox_rwtm_get_public_key(const struct key *key)
{
- debugfs_remove_recursive(root);
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+
+ return rwtm->pubkey;
}
-static void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+static const struct turris_signing_key_subtype mox_signing_key_subtype = {
+ .key_size = MOX_ECC_NUM_BITS,
+ .data_size = SHA512_DIGEST_SIZE,
+ .sig_size = MOX_ECC_SIG_LEN,
+ .public_key_size = MOX_ECC_PUBKEY_LEN,
+ .hash_algo = "sha512",
+ .get_public_key = mox_rwtm_get_public_key,
+ .sign = mox_rwtm_sign,
+};
+
+static int mox_register_signing_key(struct mox_rwtm *rwtm)
{
- struct dentry *root;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ struct device *dev = rwtm_dev(rwtm);
+ int ret;
- root = debugfs_create_dir("turris-mox-rwtm", NULL);
+ ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
+ if (ret == -ENODATA) {
+ dev_warn(dev, "Board has no public key burned!\n");
+ } else if (ret == -EOPNOTSUPP) {
+ dev_notice(dev,
+ "Firmware does not support the ECDSA_PUB_KEY command\n");
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ char sn[17] = "unknown";
+ char desc[46];
+
+ if (rwtm->has_board_info)
+ sprintf(sn, "%016llX", rwtm->serial_number);
+
+ sprintf(desc, "Turris MOX SN %s rWTM ECDSA key", sn);
- debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, &do_sign_fops);
+ mox_ecc_public_key_to_bin(rwtm->pubkey, ret, reply->status);
- devm_add_action_or_reset(rwtm_dev(rwtm), rwtm_debugfs_release, root);
+ ret = devm_turris_signing_key_create(dev,
+ &mox_signing_key_subtype,
+ desc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot create signing key\n");
+ }
+
+ return 0;
}
-#else
-static inline void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+
+#else /* CONFIG_TURRIS_MOX_RWTM_KEYCTL */
+
+static inline int mox_register_signing_key(struct mox_rwtm *rwtm)
{
+ return 0;
}
-#endif
+
+#endif /* !CONFIG_TURRIS_MOX_RWTM_KEYCTL */
static void rwtm_devm_mbox_release(void *mbox)
{
@@ -477,6 +461,10 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret < 0)
dev_warn(dev, "Cannot read board information: %i\n", ret);
+ ret = mox_register_signing_key(rwtm);
+ if (ret < 0)
+ return ret;
+
ret = check_get_random_support(rwtm);
if (ret < 0) {
dev_notice(dev,
@@ -491,8 +479,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Cannot register HWRNG!\n");
- rwtm_register_debugfs(rwtm);
-
dev_info(dev, "HWRNG successfully registered\n");
/*
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index f2c39bbff83a..44f922e10db2 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -201,6 +201,7 @@ config GPIO_RASPBERRYPI_EXP
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on ARCH_BCM_MOBILE || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Turn on GPIO support for Broadcom "Kona" chips.
@@ -213,6 +214,18 @@ config GPIO_BCM_XGS_IPROC
help
Say yes here to enable GPIO support for Broadcom XGS iProc SoCs.
+config GPIO_BLZP1600
+ tristate "Blaize BLZP1600 GPIO support"
+ default y if ARCH_BLAIZE
+ depends on ARCH_BLAIZE || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y or M here to add support for the Blaize BLZP1600 GPIO device.
+ The controller is based on the Verisilicon Microelectronics GPIO APB v0.2
+ IP block.
+
config GPIO_BRCMSTB
tristate "BRCMSTB GPIO support"
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
@@ -241,6 +254,7 @@ config GPIO_DAVINCI
tristate "TI Davinci/Keystone GPIO support"
default y if ARCH_DAVINCI
depends on ((ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3)) || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Say yes here to enable GPIO support for TI Davinci/Keystone SoCs.
@@ -340,7 +354,7 @@ config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
depends on OF || COMPILE_TEST
select GPIO_GENERIC
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
VHDL IP core library.
@@ -368,8 +382,7 @@ config GPIO_HLWD
config GPIO_ICH
tristate "Intel ICH GPIO"
- depends on X86
- depends on LPC_ICH
+ depends on (X86 && LPC_ICH) || (COMPILE_TEST && HAS_IOPORT)
help
Say yes here to support the GPIO functionality of a number of Intel
ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
@@ -425,6 +438,7 @@ config GPIO_LPC18XX
default y if ARCH_LPC18XX
depends on OF_GPIO && (ARCH_LPC18XX || COMPILE_TEST)
select IRQ_DOMAIN_HIERARCHY
+ select GPIOLIB_IRQCHIP
help
Select this option to enable GPIO driver for
NXP LPC18XX/43XX devices.
@@ -468,7 +482,7 @@ config GPIO_MPC8XXX
FSL_SOC_BOOKE || PPC_86xx || ARCH_LAYERSCAPE || ARM || \
COMPILE_TEST
select GPIO_GENERIC
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Say Y here if you're going to use hardware that connects to the
MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
@@ -540,7 +554,7 @@ config GPIO_OMAP
config GPIO_PL061
tristate "PrimeCell PL061 GPIO support"
- depends on ARM_AMBA
+ depends on ARM_AMBA || COMPILE_TEST
select IRQ_DOMAIN
select GPIOLIB_IRQCHIP
help
@@ -555,6 +569,7 @@ config GPIO_POLARFIRE_SOC
config GPIO_PXA
bool "PXA GPIO support"
depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the PXA GPIO device.
@@ -604,7 +619,7 @@ config GPIO_ROCKCHIP
config GPIO_RTD
tristate "Realtek DHC GPIO support"
- depends on ARCH_REALTEK
+ depends on ARCH_REALTEK || COMPILE_TEST
default y
select GPIOLIB_IRQCHIP
help
@@ -656,6 +671,15 @@ config GPIO_SNPS_CREG
where only several fields in register belong to GPIO lines and
each GPIO line owns a field with different length and on/off value.
+config GPIO_SPACEMIT_K1
+ tristate "SPACEMIT K1 GPIO support"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the SpacemiT's K1 GPIO device.
+
config GPIO_SPEAR_SPICS
bool "ST SPEAr13xx SPI Chip Select as GPIO support"
depends on PLAT_SPEAR
@@ -753,7 +777,7 @@ config GPIO_UNIPHIER
Say yes here to support UniPhier GPIOs.
config GPIO_VF610
- bool "VF610 GPIO support"
+ tristate "VF610 GPIO support"
default y if SOC_VF610
depends on ARCH_MXC || COMPILE_TEST
select GPIOLIB_IRQCHIP
@@ -830,14 +854,14 @@ config GPIO_ZEVIO
config GPIO_ZYNQ
tristate "Xilinx Zynq GPIO support"
- depends on ARCH_ZYNQ || ARCH_ZYNQMP
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support Xilinx Zynq GPIO controller.
config GPIO_ZYNQMP_MODEPIN
tristate "ZynqMP ps-mode pin GPIO configuration driver"
- depends on ZYNQMP_FIRMWARE
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
default ZYNQMP_FIRMWARE
help
Say yes here to support the ZynqMP ps-mode pin GPIO configuration
@@ -866,7 +890,7 @@ config GPIO_AMD_FCH
config GPIO_MSC313
bool "MStar MSC313 GPIO support"
- depends on ARCH_MSTARV7
+ depends on ARCH_MSTARV7 || COMPILE_TEST
default ARCH_MSTARV7
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -1365,7 +1389,7 @@ config GPIO_DLN2
config HTC_EGPIO
bool "HTC EGPIO support"
- depends on ARM
+ depends on ARM || COMPILE_TEST
help
This driver supports the CPLD egpio chip present on
several HTC phones. It provides basic support for input
@@ -1463,6 +1487,19 @@ config GPIO_MAX77650
GPIO driver for MAX77650/77651 PMIC from Maxim Semiconductor.
These chips have a single pin that can be configured as GPIO.
+config GPIO_MAX77759
+ tristate "Maxim Integrated MAX77759 GPIO support"
+ depends on MFD_MAX77759
+ default MFD_MAX77759
+ select GPIOLIB_IRQCHIP
+ help
+ GPIO driver for MAX77759 PMIC from Maxim Integrated.
+ There are two GPIOs available on these chips in total, both of
+ which can also generate interrupts.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-max77759.
+
config GPIO_PALMAS
bool "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
@@ -1520,12 +1557,13 @@ config GPIO_TC3589X
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
depends on MFD_TIMBERDALE
+ select GPIOLIB_IRQCHIP
help
Add support for the GPIO IP in the timberdale FPGA.
config GPIO_TN48M_CPLD
tristate "Delta Networks TN48M switch CPLD GPIO driver"
- depends on MFD_TN48M_CPLD
+ depends on MFD_TN48M_CPLD || COMPILE_TEST
select GPIO_REGMAP
help
This enables support for the GPIOs found on the Delta
@@ -1869,6 +1907,8 @@ menu "Virtual GPIO drivers"
config GPIO_AGGREGATOR
tristate "GPIO Aggregator"
+ select CONFIGFS_FS
+ select DEV_SYNC_PROBE
help
Say yes here to enable the GPIO Aggregator, which provides a way to
aggregate existing GPIO lines into a new virtual GPIO chip.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index af130882ffee..88dedd298256 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o
obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
+gpiolib-acpi-y := gpiolib-acpi-core.o gpiolib-acpi-quirks.o
obj-$(CONFIG_GPIOLIB) += gpiolib-swnode.o
# Device drivers. Generally keep list sorted alphabetically
@@ -45,6 +46,7 @@ obj-$(CONFIG_GPIO_BCM_XGS_IPROC) += gpio-xgs-iproc.o
obj-$(CONFIG_GPIO_BD71815) += gpio-bd71815.o
obj-$(CONFIG_GPIO_BD71828) += gpio-bd71828.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
+obj-$(CONFIG_GPIO_BLZP1600) += gpio-blzp1600.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CADENCE) += gpio-cadence.o
@@ -105,6 +107,7 @@ obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
+obj-$(CONFIG_GPIO_MAX77759) += gpio-max77759.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
@@ -159,6 +162,7 @@ obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o
obj-$(CONFIG_GPIO_SLOPPY_LOGIC_ANALYZER) += gpio-sloppy-logic-analyzer.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
+obj-$(CONFIG_GPIO_SPACEMIT_K1) += gpio-spacemit-k1.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 4b70cbaa1caa..4a8b349f2483 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -44,6 +44,13 @@ Work items:
to a machine description such as device tree, ACPI or fwnode that
implicitly does not use global GPIO numbers.
+- Fix drivers to not read back struct gpio_chip::base. Some drivers do
+ that and would be broken by attempts to poison it or make it dynamic.
+ Example in AT91 pinctrl driver:
+ https://lore.kernel.org/all/1d00c056-3d61-4c22-bedd-3bae0bf1ddc4@pengutronix.de/
+ This particular driver is also DT-only, so with the above fixed, the
+ base can be made dynamic (set to -1) if CONFIG_GPIO_SYSFS is disabled.
+
- When this work is complete (will require some of the items in the
following ongoing work as well) we can delete the old global
numberspace accessors from <linux/gpio.h> and eventually delete
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index d232ea865356..6f941db02c04 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -9,10 +9,13 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -27,226 +30,200 @@
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
+#include "dev-sync-probe.h"
+
#define AGGREGATOR_MAX_GPIOS 512
+#define AGGREGATOR_LEGACY_PREFIX "_sysfs"
/*
* GPIO Aggregator sysfs interface
*/
struct gpio_aggregator {
+ struct dev_sync_probe_data probe_data;
+ struct config_group group;
struct gpiod_lookup_table *lookups;
- struct platform_device *pdev;
+ struct mutex lock;
+ int id;
+
+ /* List of gpio_aggregator_line. Always added in order */
+ struct list_head list_head;
+
+ /* used by legacy sysfs interface only */
+ bool init_via_sysfs;
char args[];
};
+struct gpio_aggregator_line {
+ struct config_group group;
+ struct gpio_aggregator *parent;
+ struct list_head entry;
+
+ /* Line index within the aggregator device */
+ unsigned int idx;
+
+ /* Custom name for the virtual line */
+ const char *name;
+ /* GPIO chip label or line name */
+ const char *key;
+ /* Can be negative to indicate lookup by line name */
+ int offset;
+
+ enum gpio_lookup_flags flags;
+};
+
+struct gpio_aggregator_pdev_meta {
+ bool init_via_sysfs;
+};
+
static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
static DEFINE_IDR(gpio_aggregator_idr);
-static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
- int hwnum, unsigned int *n)
+static int gpio_aggregator_alloc(struct gpio_aggregator **aggr, size_t arg_size)
{
- struct gpiod_lookup_table *lookups;
+ int ret;
- lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
- GFP_KERNEL);
- if (!lookups)
+ struct gpio_aggregator *new __free(kfree) = kzalloc(
+ sizeof(*new) + arg_size, GFP_KERNEL);
+ if (!new)
return -ENOMEM;
- lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
+ scoped_guard(mutex, &gpio_aggregator_lock)
+ ret = idr_alloc(&gpio_aggregator_idr, new, 0, 0, GFP_KERNEL);
- (*n)++;
- memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
+ if (ret < 0)
+ return ret;
- aggr->lookups = lookups;
+ new->id = ret;
+ INIT_LIST_HEAD(&new->list_head);
+ mutex_init(&new->lock);
+ *aggr = no_free_ptr(new);
return 0;
}
-static int aggr_parse(struct gpio_aggregator *aggr)
+static void gpio_aggregator_free(struct gpio_aggregator *aggr)
{
- char *args = skip_spaces(aggr->args);
- char *name, *offsets, *p;
- unsigned int i, n = 0;
- int error = 0;
-
- unsigned long *bitmap __free(bitmap) =
- bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
- if (!bitmap)
- return -ENOMEM;
-
- args = next_arg(args, &name, &p);
- while (*args) {
- args = next_arg(args, &offsets, &p);
-
- p = get_options(offsets, 0, &error);
- if (error == 0 || *p) {
- /* Named GPIO line */
- error = aggr_add_gpio(aggr, name, U16_MAX, &n);
- if (error)
- return error;
+ scoped_guard(mutex, &gpio_aggregator_lock)
+ idr_remove(&gpio_aggregator_idr, aggr->id);
- name = offsets;
- continue;
- }
+ mutex_destroy(&aggr->lock);
+ kfree(aggr);
+}
- /* GPIO chip + offset(s) */
- error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
- if (error) {
- pr_err("Cannot parse %s: %d\n", offsets, error);
- return error;
- }
+static int gpio_aggregator_add_gpio(struct gpio_aggregator *aggr,
+ const char *key, int hwnum, unsigned int *n)
+{
+ struct gpiod_lookup_table *lookups;
- for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
- error = aggr_add_gpio(aggr, name, i, &n);
- if (error)
- return error;
- }
+ lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
+ GFP_KERNEL);
+ if (!lookups)
+ return -ENOMEM;
- args = next_arg(args, &name, &p);
- }
+ lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
- if (!n) {
- pr_err("No GPIOs specified\n");
- return -EINVAL;
- }
+ (*n)++;
+ memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
+ aggr->lookups = lookups;
return 0;
}
-static ssize_t new_device_store(struct device_driver *driver, const char *buf,
- size_t count)
+static bool gpio_aggregator_is_active(struct gpio_aggregator *aggr)
{
- struct gpio_aggregator *aggr;
- struct platform_device *pdev;
- int res, id;
+ lockdep_assert_held(&aggr->lock);
- if (!try_module_get(THIS_MODULE))
- return -ENOENT;
-
- /* kernfs guarantees string termination, so count + 1 is safe */
- aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
- if (!aggr) {
- res = -ENOMEM;
- goto put_module;
- }
-
- memcpy(aggr->args, buf, count + 1);
+ return aggr->probe_data.pdev && platform_get_drvdata(aggr->probe_data.pdev);
+}
- aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
- GFP_KERNEL);
- if (!aggr->lookups) {
- res = -ENOMEM;
- goto free_ga;
- }
+/* Only aggregators created via legacy sysfs can be "activating". */
+static bool gpio_aggregator_is_activating(struct gpio_aggregator *aggr)
+{
+ lockdep_assert_held(&aggr->lock);
- mutex_lock(&gpio_aggregator_lock);
- id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL);
- mutex_unlock(&gpio_aggregator_lock);
+ return aggr->probe_data.pdev && !platform_get_drvdata(aggr->probe_data.pdev);
+}
- if (id < 0) {
- res = id;
- goto free_table;
- }
+static size_t gpio_aggregator_count_lines(struct gpio_aggregator *aggr)
+{
+ lockdep_assert_held(&aggr->lock);
- aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id);
- if (!aggr->lookups->dev_id) {
- res = -ENOMEM;
- goto remove_idr;
- }
+ return list_count_nodes(&aggr->list_head);
+}
- res = aggr_parse(aggr);
- if (res)
- goto free_dev_id;
+static struct gpio_aggregator_line *
+gpio_aggregator_line_alloc(struct gpio_aggregator *parent, unsigned int idx,
+ char *key, int offset)
+{
+ struct gpio_aggregator_line *line;
- gpiod_add_lookup_table(aggr->lookups);
+ line = kzalloc(sizeof(*line), GFP_KERNEL);
+ if (!line)
+ return ERR_PTR(-ENOMEM);
- pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0);
- if (IS_ERR(pdev)) {
- res = PTR_ERR(pdev);
- goto remove_table;
+ if (key) {
+ line->key = kstrdup(key, GFP_KERNEL);
+ if (!line->key) {
+ kfree(line);
+ return ERR_PTR(-ENOMEM);
+ }
}
- aggr->pdev = pdev;
- module_put(THIS_MODULE);
- return count;
+ line->flags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ line->parent = parent;
+ line->idx = idx;
+ line->offset = offset;
+ INIT_LIST_HEAD(&line->entry);
-remove_table:
- gpiod_remove_lookup_table(aggr->lookups);
-free_dev_id:
- kfree(aggr->lookups->dev_id);
-remove_idr:
- mutex_lock(&gpio_aggregator_lock);
- idr_remove(&gpio_aggregator_idr, id);
- mutex_unlock(&gpio_aggregator_lock);
-free_table:
- kfree(aggr->lookups);
-free_ga:
- kfree(aggr);
-put_module:
- module_put(THIS_MODULE);
- return res;
-}
-
-static DRIVER_ATTR_WO(new_device);
-
-static void gpio_aggregator_free(struct gpio_aggregator *aggr)
-{
- platform_device_unregister(aggr->pdev);
- gpiod_remove_lookup_table(aggr->lookups);
- kfree(aggr->lookups->dev_id);
- kfree(aggr->lookups);
- kfree(aggr);
+ return line;
}
-static ssize_t delete_device_store(struct device_driver *driver,
- const char *buf, size_t count)
+static void gpio_aggregator_line_add(struct gpio_aggregator *aggr,
+ struct gpio_aggregator_line *line)
{
- struct gpio_aggregator *aggr;
- unsigned int id;
- int error;
+ struct gpio_aggregator_line *tmp;
- if (!str_has_prefix(buf, DRV_NAME "."))
- return -EINVAL;
+ lockdep_assert_held(&aggr->lock);
- error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
- if (error)
- return error;
-
- if (!try_module_get(THIS_MODULE))
- return -ENOENT;
-
- mutex_lock(&gpio_aggregator_lock);
- aggr = idr_remove(&gpio_aggregator_idr, id);
- mutex_unlock(&gpio_aggregator_lock);
- if (!aggr) {
- module_put(THIS_MODULE);
- return -ENOENT;
+ list_for_each_entry(tmp, &aggr->list_head, entry) {
+ if (tmp->idx > line->idx) {
+ list_add_tail(&line->entry, &tmp->entry);
+ return;
+ }
}
-
- gpio_aggregator_free(aggr);
- module_put(THIS_MODULE);
- return count;
+ list_add_tail(&line->entry, &aggr->list_head);
}
-static DRIVER_ATTR_WO(delete_device);
-
-static struct attribute *gpio_aggregator_attrs[] = {
- &driver_attr_new_device.attr,
- &driver_attr_delete_device.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(gpio_aggregator);
-static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+static void gpio_aggregator_line_del(struct gpio_aggregator *aggr,
+ struct gpio_aggregator_line *line)
{
- gpio_aggregator_free(p);
- return 0;
+ lockdep_assert_held(&aggr->lock);
+
+ list_del(&line->entry);
}
-static void __exit gpio_aggregator_remove_all(void)
+static void gpio_aggregator_free_lines(struct gpio_aggregator *aggr)
{
- mutex_lock(&gpio_aggregator_lock);
- idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
- idr_destroy(&gpio_aggregator_idr);
- mutex_unlock(&gpio_aggregator_lock);
+ struct gpio_aggregator_line *line, *tmp;
+
+ list_for_each_entry_safe(line, tmp, &aggr->list_head, entry) {
+ configfs_unregister_group(&line->group);
+ /*
+ * Normally, we acquire aggr->lock within the configfs
+ * callback. However, in the legacy sysfs interface case,
+ * calling configfs_(un)register_group while holding
+ * aggr->lock could cause a deadlock. Fortunately, this is
+ * unnecessary because the new_device/delete_device path
+ * and the module unload path are mutually exclusive,
+ * thanks to an explicit try_module_get. That's why this
+ * minimal scoped_guard suffices.
+ */
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_del(aggr, line);
+ kfree(line->key);
+ kfree(line->name);
+ kfree(line);
+ }
}
@@ -582,6 +559,728 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
return fwd;
}
+/*
+ * Configfs interface
+ */
+
+static struct gpio_aggregator *
+to_gpio_aggregator(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_aggregator, group);
+}
+
+static struct gpio_aggregator_line *
+to_gpio_aggregator_line(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_aggregator_line, group);
+}
+
+static struct fwnode_handle *
+gpio_aggregator_make_device_sw_node(struct gpio_aggregator *aggr)
+{
+ struct property_entry properties[2];
+ struct gpio_aggregator_line *line;
+ size_t num_lines;
+ int n = 0;
+
+ memset(properties, 0, sizeof(properties));
+
+ num_lines = gpio_aggregator_count_lines(aggr);
+ if (num_lines == 0)
+ return NULL;
+
+ const char **line_names __free(kfree) = kcalloc(
+ num_lines, sizeof(*line_names), GFP_KERNEL);
+ if (!line_names)
+ return ERR_PTR(-ENOMEM);
+
+ /* The list is always sorted as new elements are inserted in order. */
+ list_for_each_entry(line, &aggr->list_head, entry)
+ line_names[n++] = line->name ?: "";
+
+ properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
+ "gpio-line-names",
+ line_names, num_lines);
+
+ return fwnode_create_software_node(properties, NULL);
+}
+
+static int gpio_aggregator_activate(struct gpio_aggregator *aggr)
+{
+ struct platform_device_info pdevinfo;
+ struct gpio_aggregator_line *line;
+ struct fwnode_handle *swnode;
+ unsigned int n = 0;
+ int ret = 0;
+
+ if (gpio_aggregator_count_lines(aggr) == 0)
+ return -EINVAL;
+
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups)
+ return -ENOMEM;
+
+ swnode = gpio_aggregator_make_device_sw_node(aggr);
+ if (IS_ERR(swnode)) {
+ ret = PTR_ERR(swnode);
+ goto err_remove_lookups;
+ }
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.name = DRV_NAME;
+ pdevinfo.id = aggr->id;
+ pdevinfo.fwnode = swnode;
+
+ /* The list is always sorted as new elements are inserted in order. */
+ list_for_each_entry(line, &aggr->list_head, entry) {
+ /*
+ * - Either GPIO chip label or line name must be configured
+ * (i.e. line->key must be non-NULL)
+ * - Line directories must be named with sequential numeric
+ * suffixes starting from 0. (i.e. ./line0, ./line1, ...)
+ */
+ if (!line->key || line->idx != n) {
+ ret = -EINVAL;
+ goto err_remove_swnode;
+ }
+
+ if (line->offset < 0)
+ ret = gpio_aggregator_add_gpio(aggr, line->key,
+ U16_MAX, &n);
+ else
+ ret = gpio_aggregator_add_gpio(aggr, line->key,
+ line->offset, &n);
+ if (ret)
+ goto err_remove_swnode;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
+ if (!aggr->lookups->dev_id) {
+ ret = -ENOMEM;
+ goto err_remove_swnode;
+ }
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ ret = dev_sync_probe_register(&aggr->probe_data, &pdevinfo);
+ if (ret)
+ goto err_remove_lookup_table;
+
+ return 0;
+
+err_remove_lookup_table:
+ kfree(aggr->lookups->dev_id);
+ gpiod_remove_lookup_table(aggr->lookups);
+err_remove_swnode:
+ fwnode_remove_software_node(swnode);
+err_remove_lookups:
+ kfree(aggr->lookups);
+
+ return ret;
+}
+
+static void gpio_aggregator_deactivate(struct gpio_aggregator *aggr)
+{
+ dev_sync_probe_unregister(&aggr->probe_data);
+ gpiod_remove_lookup_table(aggr->lookups);
+ kfree(aggr->lookups->dev_id);
+ kfree(aggr->lookups);
+}
+
+static void gpio_aggregator_lockup_configfs(struct gpio_aggregator *aggr,
+ bool lock)
+{
+ struct configfs_subsystem *subsys = aggr->group.cg_subsys;
+ struct gpio_aggregator_line *line;
+
+ /*
+ * The device only needs to depend on leaf lines. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(line, &aggr->list_head, entry) {
+ if (lock)
+ configfs_depend_item_unlocked(
+ subsys, &line->group.cg_item);
+ else
+ configfs_undepend_item_unlocked(
+ &line->group.cg_item);
+ }
+}
+
+static ssize_t
+gpio_aggregator_line_key_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%s\n", line->key ?: "");
+}
+
+static ssize_t
+gpio_aggregator_line_key_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ char *key __free(kfree) = kstrndup(skip_spaces(page), count,
+ GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
+ strim(key);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ kfree(line->key);
+ line->key = no_free_ptr(key);
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, key);
+
+static ssize_t
+gpio_aggregator_line_name_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%s\n", line->name ?: "");
+}
+
+static ssize_t
+gpio_aggregator_line_name_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ char *name __free(kfree) = kstrndup(skip_spaces(page), count,
+ GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strim(name);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ kfree(line->name);
+ line->name = no_free_ptr(name);
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, name);
+
+static ssize_t
+gpio_aggregator_line_offset_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%d\n", line->offset);
+}
+
+static ssize_t
+gpio_aggregator_line_offset_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+ int offset, ret;
+
+ ret = kstrtoint(page, 0, &offset);
+ if (ret)
+ return ret;
+
+ /*
+ * When offset == -1, 'key' represents a line name to lookup.
+ * When 0 <= offset < 65535, 'key' represents the label of the chip with
+ * the 'offset' value representing the line within that chip.
+ *
+ * GPIOLIB uses the U16_MAX value to indicate lookup by line name so
+ * the greatest offset we can accept is (U16_MAX - 1).
+ */
+ if (offset > (U16_MAX - 1) || offset < -1)
+ return -EINVAL;
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ line->offset = offset;
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, offset);
+
+static struct configfs_attribute *gpio_aggregator_line_attrs[] = {
+ &gpio_aggregator_line_attr_key,
+ &gpio_aggregator_line_attr_name,
+ &gpio_aggregator_line_attr_offset,
+ NULL
+};
+
+static ssize_t
+gpio_aggregator_device_dev_name_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+ struct platform_device *pdev;
+
+ guard(mutex)(&aggr->lock);
+
+ pdev = aggr->probe_data.pdev;
+ if (pdev)
+ return sysfs_emit(page, "%s\n", dev_name(&pdev->dev));
+
+ return sysfs_emit(page, "%s.%d\n", DRV_NAME, aggr->id);
+}
+CONFIGFS_ATTR_RO(gpio_aggregator_device_, dev_name);
+
+static ssize_t
+gpio_aggregator_device_live_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%c\n",
+ gpio_aggregator_is_active(aggr) ? '1' : '0');
+}
+
+static ssize_t
+gpio_aggregator_device_live_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+ int ret = 0;
+ bool live;
+
+ ret = kstrtobool(page, &live);
+ if (ret)
+ return ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ if (live && !aggr->init_via_sysfs)
+ gpio_aggregator_lockup_configfs(aggr, true);
+
+ scoped_guard(mutex, &aggr->lock) {
+ if (gpio_aggregator_is_activating(aggr) ||
+ (live == gpio_aggregator_is_active(aggr)))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_aggregator_activate(aggr);
+ else
+ gpio_aggregator_deactivate(aggr);
+ }
+
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret && !aggr->init_via_sysfs)
+ gpio_aggregator_lockup_configfs(aggr, false);
+
+ module_put(THIS_MODULE);
+
+ return ret ?: count;
+}
+CONFIGFS_ATTR(gpio_aggregator_device_, live);
+
+static struct configfs_attribute *gpio_aggregator_device_attrs[] = {
+ &gpio_aggregator_device_attr_dev_name,
+ &gpio_aggregator_device_attr_live,
+ NULL
+};
+
+static void
+gpio_aggregator_line_release(struct config_item *item)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ gpio_aggregator_line_del(aggr, line);
+ kfree(line->key);
+ kfree(line->name);
+ kfree(line);
+}
+
+static struct configfs_item_operations gpio_aggregator_line_item_ops = {
+ .release = gpio_aggregator_line_release,
+};
+
+static const struct config_item_type gpio_aggregator_line_type = {
+ .ct_item_ops = &gpio_aggregator_line_item_ops,
+ .ct_attrs = gpio_aggregator_line_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void gpio_aggregator_device_release(struct config_item *item)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+
+ /*
+ * At this point, aggr is neither active nor activating,
+ * so calling gpio_aggregator_deactivate() is always unnecessary.
+ */
+ gpio_aggregator_free(aggr);
+}
+
+static struct configfs_item_operations gpio_aggregator_device_item_ops = {
+ .release = gpio_aggregator_device_release,
+};
+
+static struct config_group *
+gpio_aggregator_device_make_group(struct config_group *group, const char *name)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(&group->cg_item);
+ struct gpio_aggregator_line *line;
+ unsigned int idx;
+ int ret, nchar;
+
+ ret = sscanf(name, "line%u%n", &idx, &nchar);
+ if (ret != 1 || nchar != strlen(name))
+ return ERR_PTR(-EINVAL);
+
+ if (aggr->init_via_sysfs)
+ /*
+ * Aggregators created via legacy sysfs interface are exposed as
+ * default groups, which means rmdir(2) is prohibited for them.
+ * For simplicity, and to avoid confusion, we also prohibit
+ * mkdir(2).
+ */
+ return ERR_PTR(-EPERM);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_active(aggr))
+ return ERR_PTR(-EBUSY);
+
+ list_for_each_entry(line, &aggr->list_head, entry)
+ if (line->idx == idx)
+ return ERR_PTR(-EINVAL);
+
+ line = gpio_aggregator_line_alloc(aggr, idx, NULL, -1);
+ if (IS_ERR(line))
+ return ERR_CAST(line);
+
+ config_group_init_type_name(&line->group, name, &gpio_aggregator_line_type);
+
+ gpio_aggregator_line_add(aggr, line);
+
+ return &line->group;
+}
+
+static struct configfs_group_operations gpio_aggregator_device_group_ops = {
+ .make_group = gpio_aggregator_device_make_group,
+};
+
+static const struct config_item_type gpio_aggregator_device_type = {
+ .ct_group_ops = &gpio_aggregator_device_group_ops,
+ .ct_item_ops = &gpio_aggregator_device_item_ops,
+ .ct_attrs = gpio_aggregator_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_aggregator_make_group(struct config_group *group, const char *name)
+{
+ struct gpio_aggregator *aggr;
+ int ret;
+
+ /*
+ * "_sysfs" prefix is reserved for auto-generated config group
+ * for devices create via legacy sysfs interface.
+ */
+ if (strncmp(name, AGGREGATOR_LEGACY_PREFIX,
+ sizeof(AGGREGATOR_LEGACY_PREFIX) - 1) == 0)
+ return ERR_PTR(-EINVAL);
+
+ /* arg space is unneeded */
+ ret = gpio_aggregator_alloc(&aggr, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
+ dev_sync_probe_init(&aggr->probe_data);
+
+ return &aggr->group;
+}
+
+static struct configfs_group_operations gpio_aggregator_group_ops = {
+ .make_group = gpio_aggregator_make_group,
+};
+
+static const struct config_item_type gpio_aggregator_type = {
+ .ct_group_ops = &gpio_aggregator_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem gpio_aggregator_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = DRV_NAME,
+ .ci_type = &gpio_aggregator_type,
+ },
+ },
+};
+
+/*
+ * Sysfs interface
+ */
+static int gpio_aggregator_parse(struct gpio_aggregator *aggr)
+{
+ char *args = skip_spaces(aggr->args);
+ struct gpio_aggregator_line *line;
+ char name[CONFIGFS_ITEM_NAME_LEN];
+ char *key, *offsets, *p;
+ unsigned int i, n = 0;
+ int error = 0;
+
+ unsigned long *bitmap __free(bitmap) =
+ bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
+ if (!bitmap)
+ return -ENOMEM;
+
+ args = next_arg(args, &key, &p);
+ while (*args) {
+ args = next_arg(args, &offsets, &p);
+
+ p = get_options(offsets, 0, &error);
+ if (error == 0 || *p) {
+ /* Named GPIO line */
+ scnprintf(name, sizeof(name), "line%u", n);
+ line = gpio_aggregator_line_alloc(aggr, n, key, -1);
+ if (IS_ERR(line)) {
+ error = PTR_ERR(line);
+ goto err;
+ }
+ config_group_init_type_name(&line->group, name,
+ &gpio_aggregator_line_type);
+ error = configfs_register_group(&aggr->group,
+ &line->group);
+ if (error)
+ goto err;
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_add(aggr, line);
+
+ error = gpio_aggregator_add_gpio(aggr, key, U16_MAX, &n);
+ if (error)
+ goto err;
+
+ key = offsets;
+ continue;
+ }
+
+ /* GPIO chip + offset(s) */
+ error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
+ if (error) {
+ pr_err("Cannot parse %s: %d\n", offsets, error);
+ goto err;
+ }
+
+ for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
+ scnprintf(name, sizeof(name), "line%u", n);
+ line = gpio_aggregator_line_alloc(aggr, n, key, i);
+ if (IS_ERR(line)) {
+ error = PTR_ERR(line);
+ goto err;
+ }
+ config_group_init_type_name(&line->group, name,
+ &gpio_aggregator_line_type);
+ error = configfs_register_group(&aggr->group,
+ &line->group);
+ if (error)
+ goto err;
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_add(aggr, line);
+
+ error = gpio_aggregator_add_gpio(aggr, key, i, &n);
+ if (error)
+ goto err;
+ }
+
+ args = next_arg(args, &key, &p);
+ }
+
+ if (!n) {
+ pr_err("No GPIOs specified\n");
+ error = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ gpio_aggregator_free_lines(aggr);
+ return error;
+}
+
+static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator_pdev_meta meta = { .init_via_sysfs = true };
+ char name[CONFIGFS_ITEM_NAME_LEN];
+ struct gpio_aggregator *aggr;
+ struct platform_device *pdev;
+ int res;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ /* kernfs guarantees string termination, so count + 1 is safe */
+ res = gpio_aggregator_alloc(&aggr, count + 1);
+ if (res)
+ goto put_module;
+
+ memcpy(aggr->args, buf, count + 1);
+
+ aggr->init_via_sysfs = true;
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups) {
+ res = -ENOMEM;
+ goto free_ga;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
+ if (!aggr->lookups->dev_id) {
+ res = -ENOMEM;
+ goto free_table;
+ }
+
+ scnprintf(name, sizeof(name), "%s.%d", AGGREGATOR_LEGACY_PREFIX, aggr->id);
+ config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
+
+ /*
+ * Since the device created by sysfs might be toggled via configfs
+ * 'live' attribute later, this initialization is needed.
+ */
+ dev_sync_probe_init(&aggr->probe_data);
+
+ /* Expose to configfs */
+ res = configfs_register_group(&gpio_aggregator_subsys.su_group,
+ &aggr->group);
+ if (res)
+ goto free_dev_id;
+
+ res = gpio_aggregator_parse(aggr);
+ if (res)
+ goto unregister_group;
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ pdev = platform_device_register_data(NULL, DRV_NAME, aggr->id, &meta, sizeof(meta));
+ if (IS_ERR(pdev)) {
+ res = PTR_ERR(pdev);
+ goto remove_table;
+ }
+
+ aggr->probe_data.pdev = pdev;
+ module_put(THIS_MODULE);
+ return count;
+
+remove_table:
+ gpiod_remove_lookup_table(aggr->lookups);
+unregister_group:
+ configfs_unregister_group(&aggr->group);
+free_dev_id:
+ kfree(aggr->lookups->dev_id);
+free_table:
+ kfree(aggr->lookups);
+free_ga:
+ gpio_aggregator_free(aggr);
+put_module:
+ module_put(THIS_MODULE);
+ return res;
+}
+
+static struct driver_attribute driver_attr_gpio_aggregator_new_device =
+ __ATTR(new_device, 0200, NULL, gpio_aggregator_new_device_store);
+
+static void gpio_aggregator_destroy(struct gpio_aggregator *aggr)
+{
+ scoped_guard(mutex, &aggr->lock) {
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ gpio_aggregator_deactivate(aggr);
+ }
+ gpio_aggregator_free_lines(aggr);
+ configfs_unregister_group(&aggr->group);
+ kfree(aggr);
+}
+
+static ssize_t gpio_aggregator_delete_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator *aggr;
+ unsigned int id;
+ int error;
+
+ if (!str_has_prefix(buf, DRV_NAME "."))
+ return -EINVAL;
+
+ error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
+ if (error)
+ return error;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ mutex_lock(&gpio_aggregator_lock);
+ aggr = idr_find(&gpio_aggregator_idr, id);
+ /*
+ * For simplicity, devices created via configfs cannot be deleted
+ * via sysfs.
+ */
+ if (aggr && aggr->init_via_sysfs)
+ idr_remove(&gpio_aggregator_idr, id);
+ else {
+ mutex_unlock(&gpio_aggregator_lock);
+ module_put(THIS_MODULE);
+ return -ENOENT;
+ }
+ mutex_unlock(&gpio_aggregator_lock);
+
+ gpio_aggregator_destroy(aggr);
+ module_put(THIS_MODULE);
+ return count;
+}
+
+static struct driver_attribute driver_attr_gpio_aggregator_delete_device =
+ __ATTR(delete_device, 0200, NULL, gpio_aggregator_delete_device_store);
+
+static struct attribute *gpio_aggregator_attrs[] = {
+ &driver_attr_gpio_aggregator_new_device.attr,
+ &driver_attr_gpio_aggregator_delete_device.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpio_aggregator);
/*
* GPIO Aggregator platform device
@@ -589,7 +1288,9 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
static int gpio_aggregator_probe(struct platform_device *pdev)
{
+ struct gpio_aggregator_pdev_meta *meta;
struct device *dev = &pdev->dev;
+ bool init_via_sysfs = false;
struct gpio_desc **descs;
struct gpiochip_fwd *fwd;
unsigned long features;
@@ -603,10 +1304,28 @@ static int gpio_aggregator_probe(struct platform_device *pdev)
if (!descs)
return -ENOMEM;
+ meta = dev_get_platdata(&pdev->dev);
+ if (meta && meta->init_via_sysfs)
+ init_via_sysfs = true;
+
for (i = 0; i < n; i++) {
descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
- if (IS_ERR(descs[i]))
+ if (IS_ERR(descs[i])) {
+ /*
+ * Deferred probing is not suitable when the aggregator
+ * is created via configfs. They should just retry later
+ * whenever they like. For device creation via sysfs,
+ * error is propagated without overriding for backward
+ * compatibility. .prevent_deferred_probe is kept unset
+ * for other cases.
+ */
+ if (!init_via_sysfs && !dev_of_node(dev) &&
+ descs[i] == ERR_PTR(-EPROBE_DEFER)) {
+ pr_warn("Deferred probe canceled for creation via configfs.\n");
+ return -ENODEV;
+ }
return PTR_ERR(descs[i]);
+ }
}
features = (uintptr_t)device_get_match_data(dev);
@@ -640,9 +1359,63 @@ static struct platform_driver gpio_aggregator_driver = {
},
};
+static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+{
+ /*
+ * There should be no aggregator created via configfs, as their
+ * presence would prevent module unloading.
+ */
+ gpio_aggregator_destroy(p);
+ return 0;
+}
+
+static void __exit gpio_aggregator_remove_all(void)
+{
+ /*
+ * Configfs callbacks acquire gpio_aggregator_lock when accessing
+ * gpio_aggregator_idr, so to prevent lock inversion deadlock, we
+ * cannot protect idr_for_each invocation here with
+ * gpio_aggregator_lock, as gpio_aggregator_idr_remove() accesses
+ * configfs groups. Fortunately, the new_device/delete_device path
+ * and the module unload path are mutually exclusive, thanks to an
+ * explicit try_module_get inside of those driver attr handlers.
+ * Also, when we reach here, no configfs entries present or being
+ * created. Therefore, no need to protect with gpio_aggregator_lock
+ * below.
+ */
+ idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
+ idr_destroy(&gpio_aggregator_idr);
+}
+
static int __init gpio_aggregator_init(void)
{
- return platform_driver_register(&gpio_aggregator_driver);
+ int ret = 0;
+
+ config_group_init(&gpio_aggregator_subsys.su_group);
+ mutex_init(&gpio_aggregator_subsys.su_mutex);
+ ret = configfs_register_subsystem(&gpio_aggregator_subsys);
+ if (ret) {
+ pr_err("Failed to register the '%s' configfs subsystem: %d\n",
+ gpio_aggregator_subsys.su_group.cg_item.ci_namebuf, ret);
+ mutex_destroy(&gpio_aggregator_subsys.su_mutex);
+ return ret;
+ }
+
+ /*
+ * CAVEAT: This must occur after configfs registration. Otherwise,
+ * a race condition could arise: driver attribute groups might be
+ * exposed and accessed by users before configfs registration
+ * completes. new_device_store() does not expect a partially
+ * initialized configfs state.
+ */
+ ret = platform_driver_register(&gpio_aggregator_driver);
+ if (ret) {
+ pr_err("Failed to register the platform driver: %d\n", ret);
+ mutex_destroy(&gpio_aggregator_subsys.su_mutex);
+ configfs_unregister_subsystem(&gpio_aggregator_subsys);
+ }
+
+ return ret;
}
module_init(gpio_aggregator_init);
@@ -650,6 +1423,7 @@ static void __exit gpio_aggregator_exit(void)
{
gpio_aggregator_remove_all();
platform_driver_unregister(&gpio_aggregator_driver);
+ configfs_unregister_subsystem(&gpio_aggregator_subsys);
}
module_exit(gpio_aggregator_exit);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 17c287dc7471..8f22cb36004d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -516,6 +516,7 @@ static struct irq_chip bcm_gpio_irq_chip = {
.irq_set_type = bcm_kona_gpio_irq_set_type,
.irq_request_resources = bcm_kona_gpio_irq_reqres,
.irq_release_resources = bcm_kona_gpio_irq_relres,
+ .flags = IRQCHIP_IMMUTABLE,
};
static struct of_device_id const bcm_kona_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-blzp1600.c b/drivers/gpio/gpio-blzp1600.c
new file mode 100644
index 000000000000..055cb296ae54
--- /dev/null
+++ b/drivers/gpio/gpio-blzp1600.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 VeriSilicon Limited.
+ * Copyright (C) 2025 Blaize, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define GPIO_DIR_REG 0x00
+#define GPIO_CTRL_REG 0x04
+#define GPIO_SET_REG 0x08
+#define GPIO_CLR_REG 0x0C
+#define GPIO_ODATA_REG 0x10
+#define GPIO_IDATA_REG 0x14
+#define GPIO_IEN_REG 0x18
+#define GPIO_IS_REG 0x1C
+#define GPIO_IBE_REG 0x20
+#define GPIO_IEV_REG 0x24
+#define GPIO_RIS_REG 0x28
+#define GPIO_IM_REG 0x2C
+#define GPIO_MIS_REG 0x30
+#define GPIO_IC_REG 0x34
+#define GPIO_DB_REG 0x38
+#define GPIO_DFG_REG 0x3C
+
+#define DRIVER_NAME "blzp1600-gpio"
+
+struct blzp1600_gpio {
+ void __iomem *base;
+ struct gpio_chip gc;
+ int irq;
+};
+
+static inline struct blzp1600_gpio *get_blzp1600_gpio_from_irq_data(struct irq_data *d)
+{
+ return gpiochip_get_data(irq_data_get_irq_chip_data(d));
+}
+
+static inline struct blzp1600_gpio *get_blzp1600_gpio_from_irq_desc(struct irq_desc *d)
+{
+ return gpiochip_get_data(irq_desc_get_handler_data(d));
+}
+
+static inline u32 blzp1600_gpio_read(struct blzp1600_gpio *chip, unsigned int offset)
+{
+ return readl_relaxed(chip->base + offset);
+}
+
+static inline void blzp1600_gpio_write(struct blzp1600_gpio *chip, unsigned int offset, u32 val)
+{
+ writel_relaxed(val, chip->base + offset);
+}
+
+static inline void blzp1600_gpio_rmw(void __iomem *reg, u32 mask, bool set)
+{
+ u32 val = readl_relaxed(reg);
+
+ if (set)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ writel_relaxed(val, reg);
+}
+
+static void blzp1600_gpio_irq_mask(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 1);
+}
+
+static void blzp1600_gpio_irq_unmask(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 0);
+}
+
+static void blzp1600_gpio_irq_ack(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ blzp1600_gpio_write(chip, GPIO_IC_REG, BIT(d->hwirq));
+}
+
+static void blzp1600_gpio_irq_enable(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ gpiochip_enable_irq(&chip->gc, irqd_to_hwirq(d));
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_DIR_REG, BIT(d->hwirq), 0);
+ blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 1);
+}
+
+static void blzp1600_gpio_irq_disable(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 0);
+ gpiochip_disable_irq(&chip->gc, irqd_to_hwirq(d));
+}
+
+static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+ u32 edge_level, single_both, fall_rise;
+ int mask = BIT(d->hwirq);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ edge_level = blzp1600_gpio_read(chip, GPIO_IS_REG);
+ single_both = blzp1600_gpio_read(chip, GPIO_IBE_REG);
+ fall_rise = blzp1600_gpio_read(chip, GPIO_IEV_REG);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ edge_level &= ~mask;
+ single_both |= mask;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ edge_level &= ~mask;
+ single_both &= ~mask;
+ fall_rise |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge_level &= ~mask;
+ single_both &= ~mask;
+ fall_rise &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge_level |= mask;
+ fall_rise |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge_level |= mask;
+ fall_rise &= ~mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ blzp1600_gpio_write(chip, GPIO_IS_REG, edge_level);
+ blzp1600_gpio_write(chip, GPIO_IBE_REG, single_both);
+ blzp1600_gpio_write(chip, GPIO_IEV_REG, fall_rise);
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+ else
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ return 0;
+}
+
+static const struct irq_chip blzp1600_gpio_irqchip = {
+ .name = DRIVER_NAME,
+ .irq_ack = blzp1600_gpio_irq_ack,
+ .irq_mask = blzp1600_gpio_irq_mask,
+ .irq_unmask = blzp1600_gpio_irq_unmask,
+ .irq_set_type = blzp1600_gpio_irq_set_type,
+ .irq_enable = blzp1600_gpio_irq_enable,
+ .irq_disable = blzp1600_gpio_irq_disable,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void blzp1600_gpio_irqhandler(struct irq_desc *desc)
+{
+ struct blzp1600_gpio *gpio = get_blzp1600_gpio_from_irq_desc(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long irq_status;
+ int hwirq = 0;
+
+ chained_irq_enter(irqchip, desc);
+ irq_status = blzp1600_gpio_read(gpio, GPIO_RIS_REG);
+ for_each_set_bit(hwirq, &irq_status, gpio->gc.ngpio)
+ generic_handle_domain_irq(gpio->gc.irq.domain, hwirq);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int blzp1600_gpio_set_debounce(struct gpio_chip *gc, unsigned int offset,
+ unsigned int debounce)
+{
+ struct blzp1600_gpio *chip = gpiochip_get_data(gc);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_DB_REG, BIT(offset), debounce);
+
+ return 0;
+}
+
+static int blzp1600_gpio_set_config(struct gpio_chip *gc, unsigned int offset, unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return blzp1600_gpio_set_debounce(gc, offset, debounce);
+}
+
+static int blzp1600_gpio_probe(struct platform_device *pdev)
+{
+ struct blzp1600_gpio *chip;
+ struct gpio_chip *gc;
+ int ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ ret = bgpio_init(&chip->gc, &pdev->dev, 4, chip->base + GPIO_IDATA_REG,
+ chip->base + GPIO_SET_REG, chip->base + GPIO_CLR_REG,
+ chip->base + GPIO_DIR_REG, NULL, 0);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register generic gpio\n");
+
+ /* configure the gpio chip */
+ gc = &chip->gc;
+ gc->set_config = blzp1600_gpio_set_config;
+
+ if (device_property_present(&pdev->dev, "interrupt-controller")) {
+ struct gpio_irq_chip *girq;
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0)
+ return chip->irq;
+
+ girq = &gc->irq;
+ gpio_irq_chip_set_chip(girq, &blzp1600_gpio_irqchip);
+ girq->parent_handler = blzp1600_gpio_irqhandler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->parents[0] = chip->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ }
+
+ return devm_gpiochip_add_data(&pdev->dev, gc, chip);
+}
+
+static const struct of_device_id blzp1600_gpio_of_match[] = {
+ { .compatible = "blaize,blzp1600-gpio", },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, blzp1600_gpio_of_match);
+
+static struct platform_driver blzp1600_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = blzp1600_gpio_of_match,
+ },
+ .probe = blzp1600_gpio_probe,
+};
+
+module_platform_driver(blzp1600_gpio_driver);
+
+MODULE_AUTHOR("Nikolaos Pasaloukos <nikolaos.pasaloukos@blaize.com>");
+MODULE_DESCRIPTION("Blaize BLZP1600 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index ca3472977431..e7671bcd5c07 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -437,7 +437,7 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
int err;
priv->irq_domain =
- irq_domain_add_linear(np, priv->num_gpios,
+ irq_domain_create_linear(of_fwnode_handle(np), priv->num_gpios,
&brcmstb_gpio_irq_domain_ops,
priv);
if (!priv->irq_domain) {
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 63fc7888c1d4..80a82492171e 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -68,15 +68,6 @@ static inline u32 __gpio_mask(unsigned gpio)
return 1 << (gpio % 32);
}
-static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d)
-{
- struct davinci_gpio_regs __iomem *g;
-
- g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d);
-
- return g;
-}
-
static int davinci_gpio_irq_setup(struct platform_device *pdev);
/*--------------------------------------------------------------------------*/
@@ -255,19 +246,27 @@ static int davinci_gpio_probe(struct platform_device *pdev)
static void gpio_irq_mask(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ struct davinci_gpio_controller *chips = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct davinci_gpio_regs __iomem *g = chips->regs[hwirq / 32];
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
writel_relaxed(mask, &g->clr_falling);
writel_relaxed(mask, &g->clr_rising);
+
+ gpiochip_disable_irq(&chips->chip, hwirq);
}
static void gpio_irq_unmask(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ struct davinci_gpio_controller *chips = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct davinci_gpio_regs __iomem *g = chips->regs[hwirq / 32];
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
unsigned status = irqd_get_trigger_type(d);
+ gpiochip_enable_irq(&chips->chip, hwirq);
+
status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
if (!status)
status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
@@ -286,12 +285,13 @@ static int gpio_irq_type(struct irq_data *d, unsigned trigger)
return 0;
}
-static struct irq_chip gpio_irqchip = {
+static const struct irq_chip gpio_irqchip = {
.name = "GPIO",
.irq_unmask = gpio_irq_unmask,
.irq_mask = gpio_irq_mask,
.irq_set_type = gpio_irq_type,
- .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void gpio_irq_handler(struct irq_desc *desc)
@@ -399,12 +399,11 @@ davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq,
{
struct davinci_gpio_controller *chips =
(struct davinci_gpio_controller *)d->host_data;
- struct davinci_gpio_regs __iomem *g = chips->regs[hw / 32];
irq_set_chip_and_handler_name(irq, &gpio_irqchip, handle_simple_irq,
"davinci_gpio");
irq_set_irq_type(irq, IRQ_TYPE_NONE);
- irq_set_chip_data(irq, (__force void *)g);
+ irq_set_chip_data(irq, (__force void *)chips);
irq_set_handler_data(irq, (void *)(uintptr_t)__gpio_mask(hw));
return 0;
@@ -479,9 +478,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
return irq;
}
- irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
- &davinci_gpio_irq_ops,
- chips);
+ irq_domain = irq_domain_create_legacy(of_fwnode_handle(dev->of_node), ngpio, irq, 0,
+ &davinci_gpio_irq_ops, chips);
if (!irq_domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
return -ENODEV;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 596da59d4b13..4bd3c47eaf93 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -220,11 +220,12 @@ static int dln2_gpio_get(struct gpio_chip *chip, unsigned int offset)
return dln2_gpio_pin_get_out_val(dln2, offset);
}
-static void dln2_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int dln2_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct dln2_gpio *dln2 = gpiochip_get_data(chip);
- dln2_gpio_pin_set_out_val(dln2, offset, value);
+ return dln2_gpio_pin_set_out_val(dln2, offset, value);
}
static int dln2_gpio_set_direction(struct gpio_chip *chip, unsigned offset,
@@ -468,7 +469,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.base = -1;
dln2->gpio.ngpio = pins;
dln2->gpio.can_sleep = true;
- dln2->gpio.set = dln2_gpio_set;
+ dln2->gpio.set_rv = dln2_gpio_set;
dln2->gpio.get = dln2_gpio_get;
dln2->gpio.request = dln2_gpio_request;
dln2->gpio.free = dln2_gpio_free;
diff --git a/drivers/gpio/gpio-ds4520.c b/drivers/gpio/gpio-ds4520.c
index 1903deaef3e9..f52ecae382a4 100644
--- a/drivers/gpio/gpio-ds4520.c
+++ b/drivers/gpio/gpio-ds4520.c
@@ -25,7 +25,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
struct gpio_regmap_config config = { };
struct device *dev = &client->dev;
struct regmap *regmap;
- u32 ngpio;
u32 base;
int ret;
@@ -33,10 +32,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Missing 'reg' property.\n");
- ret = device_property_read_u32(dev, "ngpios", &ngpio);
- if (ret)
- return dev_err_probe(dev, ret, "Missing 'ngpios' property.\n");
-
regmap = devm_regmap_init_i2c(client, &ds4520_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
@@ -44,7 +39,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
config.regmap = regmap;
config.parent = dev;
- config.ngpio = ngpio;
config.reg_dat_base = base + DS4520_IO_STATUS0;
config.reg_set_base = base + DS4520_PULLUP0;
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index d4bf8d187e16..f2973d0b7138 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -203,9 +203,10 @@ static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
return 0;
}
-static void sprd_eic_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int sprd_eic_set(struct gpio_chip *chip, unsigned int offset, int value)
{
/* EICs are always input, nothing need to do here. */
+ return 0;
}
static int sprd_eic_set_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -662,7 +663,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->chip.request = sprd_eic_request;
sprd_eic->chip.free = sprd_eic_free;
sprd_eic->chip.set_config = sprd_eic_set_config;
- sprd_eic->chip.set = sprd_eic_set;
+ sprd_eic->chip.set_rv = sprd_eic_set;
fallthrough;
case SPRD_EIC_ASYNC:
case SPRD_EIC_SYNC:
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 6c862c572322..a5e6e446f39c 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -204,13 +204,15 @@ static void __em_gio_set(struct gpio_chip *chip, unsigned int reg,
(BIT(shift + 16)) | (value << shift));
}
-static void em_gio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int em_gio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
/* output is split into two registers */
if (offset < 16)
__em_gio_set(chip, GIO_OL, offset, value);
else
__em_gio_set(chip, GIO_OH, offset - 16, value);
+
+ return 0;
}
static int em_gio_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -304,7 +306,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
- gpio_chip->set = em_gio_set;
+ gpio_chip->set_rv = em_gio_set;
gpio_chip->to_irq = em_gio_to_irq;
gpio_chip->request = pinctrl_gpio_request;
gpio_chip->free = em_gio_free;
@@ -323,8 +325,9 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_release_resources = em_gio_irq_relres;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(dev->of_node, ngpios, 0,
- &em_gio_irq_domain_ops, p);
+ p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node),
+ ngpios, 0,
+ &em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
dev_err(dev, "cannot initialize irq domain\n");
return -ENXIO;
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index d5909a4f0433..beb98286d13e 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -93,8 +93,8 @@ static int exar_get_value(struct gpio_chip *chip, unsigned int offset)
return !!(regmap_test_bits(exar_gpio->regmap, addr, BIT(bit)));
}
-static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int exar_set_value(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
@@ -105,7 +105,7 @@ static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
* regmap_write_bits() forces value to be written when an external
* pull up/down might otherwise indicate value was already set.
*/
- regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value);
+ return regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value);
}
static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
@@ -114,11 +114,13 @@ static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+ int ret;
- exar_set_value(chip, offset, value);
- regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
+ ret = exar_set_value(chip, offset, value);
+ if (ret)
+ return ret;
- return 0;
+ return regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
}
static int exar_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -209,7 +211,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
exar_gpio->gpio_chip.get = exar_get_value;
- exar_gpio->gpio_chip.set = exar_set_value;
+ exar_gpio->gpio_chip.set_rv = exar_set_value;
exar_gpio->gpio_chip.base = -1;
exar_gpio->gpio_chip.ngpio = ngpios;
exar_gpio->index = index;
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 3875fd940ccb..dfcd3634f279 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -159,7 +159,8 @@ static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value);
-static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
+static int f7188x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value);
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
unsigned long config);
@@ -172,7 +173,7 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
.direction_input = f7188x_gpio_direction_in, \
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
- .set = f7188x_gpio_set, \
+ .set_rv = f7188x_gpio_set, \
.set_config = f7188x_gpio_set_config, \
.base = -1, \
.ngpio = _ngpio, \
@@ -391,7 +392,8 @@ static int f7188x_gpio_direction_out(struct gpio_chip *chip,
return 0;
}
-static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int f7188x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int err;
struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
@@ -400,7 +402,8 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
err = superio_enter(sio->addr);
if (err)
- return;
+ return err;
+
superio_select(sio->addr, sio->device);
data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
@@ -411,6 +414,8 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out);
superio_exit(sio->addr);
+
+ return 0;
}
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index ad6a045fd3d2..f25283e5239d 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -116,7 +116,7 @@ static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return !!(dw & GNR_CFG_DW_RXSTATE);
}
-static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
u32 clear = 0;
u32 set = 0;
@@ -126,7 +126,7 @@ static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
else
clear = GNR_CFG_DW_TXSTATE;
- gnr_gpio_configure_line(gc, gpio, clear, set);
+ return gnr_gpio_configure_line(gc, gpio, clear, set);
}
static int gnr_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
@@ -159,7 +159,7 @@ static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE,
.request = gnr_gpio_request,
.get = gnr_gpio_get,
- .set = gnr_gpio_set,
+ .set_rv = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction,
.direction_input = gnr_gpio_direction_input,
.direction_output = gnr_gpio_direction_output,
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 30a0522ae735..d38a2d9854ca 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -170,6 +170,8 @@ static void grgpio_irq_mask(struct irq_data *d)
grgpio_set_imask(priv, offset, 0);
raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+
+ gpiochip_disable_irq(&priv->gc, d->hwirq);
}
static void grgpio_irq_unmask(struct irq_data *d)
@@ -178,6 +180,7 @@ static void grgpio_irq_unmask(struct irq_data *d)
int offset = d->hwirq;
unsigned long flags;
+ gpiochip_enable_irq(&priv->gc, d->hwirq);
raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
grgpio_set_imask(priv, offset, 1);
@@ -185,11 +188,13 @@ static void grgpio_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
}
-static struct irq_chip grgpio_irq_chip = {
+static const struct irq_chip grgpio_irq_chip = {
.name = "grgpio",
.irq_mask = grgpio_irq_mask,
.irq_unmask = grgpio_irq_unmask,
.irq_set_type = grgpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t grgpio_irq_handler(int irq, void *dev)
@@ -397,7 +402,7 @@ static int grgpio_probe(struct platform_device *ofdev)
return -EINVAL;
}
- priv->domain = irq_domain_add_linear(np, gc->ngpio,
+ priv->domain = irq_domain_create_linear(of_fwnode_handle(np), gc->ngpio,
&grgpio_irq_domain_ops,
priv);
if (!priv->domain) {
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index 7e29a2d8de1a..a40ba99a3aea 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -62,9 +62,9 @@ static int gw_pld_output8(struct gpio_chip *gc, unsigned offset, int value)
return i2c_smbus_write_byte(gw->client, gw->out);
}
-static void gw_pld_set8(struct gpio_chip *gc, unsigned offset, int value)
+static int gw_pld_set8(struct gpio_chip *gc, unsigned int offset, int value)
{
- gw_pld_output8(gc, offset, value);
+ return gw_pld_output8(gc, offset, value);
}
static int gw_pld_probe(struct i2c_client *client)
@@ -86,7 +86,7 @@ static int gw_pld_probe(struct i2c_client *client)
gw->chip.direction_input = gw_pld_input8;
gw->chip.get = gw_pld_get8;
gw->chip.direction_output = gw_pld_output8;
- gw->chip.set = gw_pld_set8;
+ gw->chip.set_rv = gw_pld_set8;
gw->client = client;
/*
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index a40bd56673fe..b1844a676c7c 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -170,7 +170,7 @@ static int egpio_direction_input(struct gpio_chip *chip, unsigned offset)
* Output pins
*/
-static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int egpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
unsigned long flag;
struct egpio_chip *egpio;
@@ -198,6 +198,8 @@ static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
egpio->cached_values &= ~(1 << offset);
egpio_writew((egpio->cached_values >> shift) & ei->reg_mask, ei, reg);
spin_unlock_irqrestore(&ei->lock, flag);
+
+ return 0;
}
static int egpio_direction_output(struct gpio_chip *chip,
@@ -206,12 +208,10 @@ static int egpio_direction_output(struct gpio_chip *chip,
struct egpio_chip *egpio;
egpio = gpiochip_get_data(chip);
- if (test_bit(offset, &egpio->is_out)) {
- egpio_set(chip, offset, value);
- return 0;
- } else {
- return -EINVAL;
- }
+ if (test_bit(offset, &egpio->is_out))
+ return egpio_set(chip, offset, value);
+
+ return -EINVAL;
}
static int egpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -324,7 +324,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
- chip->set = egpio_set;
+ chip->set_rv = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
chip->get_direction = egpio_get_direction;
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 0be9285efebc..67089b2423d8 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -175,12 +175,16 @@ static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned int nr)
static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned int nr,
int val)
{
+ int ret;
+
/* Disable blink hardware which is available for GPIOs from 0 to 31. */
if (nr < 32 && ichx_priv.desc->have_blink)
ichx_write_bit(GPO_BLINK, nr, 0, 0);
/* Set GPIO output value. */
- ichx_write_bit(GPIO_LVL, nr, val, 0);
+ ret = ichx_write_bit(GPIO_LVL, nr, val, 0);
+ if (ret)
+ return ret;
/*
* Try setting pin as an output and verify it worked since many pins
@@ -252,9 +256,9 @@ static int ich6_gpio_request(struct gpio_chip *chip, unsigned int nr)
return ichx_gpio_request(chip, nr);
}
-static void ichx_gpio_set(struct gpio_chip *chip, unsigned int nr, int val)
+static int ichx_gpio_set(struct gpio_chip *chip, unsigned int nr, int val)
{
- ichx_write_bit(GPIO_LVL, nr, val, 0);
+ return ichx_write_bit(GPIO_LVL, nr, val, 0);
}
static void ichx_gpiolib_setup(struct gpio_chip *chip)
@@ -269,7 +273,7 @@ static void ichx_gpiolib_setup(struct gpio_chip *chip)
chip->get = ichx_priv.desc->get ?
ichx_priv.desc->get : ichx_gpio_get;
- chip->set = ichx_gpio_set;
+ chip->set_rv = ichx_gpio_set;
chip->get_direction = ichx_gpio_get_direction;
chip->direction_input = ichx_gpio_direction_input;
chip->direction_output = ichx_gpio_direction_output;
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 00f547d26254..535f25514455 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -37,7 +37,7 @@ static void idt_gpio_dispatch(struct irq_desc *desc)
pending = readl(ctrl->pic + IDT_PIC_IRQ_PEND);
pending &= ~ctrl->mask_cache;
for_each_set_bit(bit, &pending, gc->ngpio) {
- virq = irq_linear_revmap(gc->irq.domain, bit);
+ virq = irq_find_mapping(gc->irq.domain, bit);
if (virq)
generic_handle_irq(virq);
}
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index 13baf465aedf..1693dbf1b777 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -6,8 +6,10 @@
* to control the PIN resources on SCU domain.
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/firmware/imx/svc/rm.h>
@@ -37,16 +39,11 @@ static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
int level;
int err;
- if (offset >= chip->ngpio)
- return -EINVAL;
-
- mutex_lock(&priv->lock);
-
- /* to read PIN state via scu api */
- err = imx_sc_misc_get_control(priv->handle,
- scu_rsrc_arr[offset], 0, &level);
- mutex_unlock(&priv->lock);
-
+ scoped_guard(mutex, &priv->lock) {
+ /* to read PIN state via scu api */
+ err = imx_sc_misc_get_control(priv->handle,
+ scu_rsrc_arr[offset], 0, &level);
+ }
if (err) {
dev_err(priv->dev, "SCU get failed: %d\n", err);
return err;
@@ -55,31 +52,26 @@ static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
return level;
}
-static void imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct scu_gpio_priv *priv = gpiochip_get_data(chip);
int err;
- if (offset >= chip->ngpio)
- return;
-
- mutex_lock(&priv->lock);
-
- /* to set PIN output level via scu api */
- err = imx_sc_misc_set_control(priv->handle,
- scu_rsrc_arr[offset], 0, value);
- mutex_unlock(&priv->lock);
-
+ scoped_guard(mutex, &priv->lock) {
+ /* to set PIN output level via scu api */
+ err = imx_sc_misc_set_control(priv->handle,
+ scu_rsrc_arr[offset], 0, value);
+ }
if (err)
dev_err(priv->dev, "SCU set (%d) failed: %d\n",
scu_rsrc_arr[offset], err);
+
+ return err;
}
static int imx_scu_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
- return -EINVAL;
-
return GPIO_LINE_DIRECTION_OUT;
}
@@ -99,7 +91,10 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
return ret;
priv->dev = dev;
- mutex_init(&priv->lock);
+
+ ret = devm_mutex_init(&pdev->dev, &priv->lock);
+ if (ret)
+ return ret;
gc = &priv->chip;
gc->base = -1;
@@ -107,7 +102,7 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
gc->ngpio = ARRAY_SIZE(scu_rsrc_arr);
gc->label = dev_name(dev);
gc->get = imx_scu_gpio_get;
- gc->set = imx_scu_gpio_set;
+ gc->set_rv = imx_scu_gpio_set;
gc->get_direction = imx_scu_gpio_get_direction;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index f332341fd4c8..d8184b527bac 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -213,8 +213,7 @@ exit:
return rc;
}
-static void it87_gpio_set(struct gpio_chip *chip,
- unsigned gpio_num, int val)
+static int it87_gpio_set(struct gpio_chip *chip, unsigned int gpio_num, int val)
{
u8 mask, curr_vals;
u16 reg;
@@ -228,6 +227,8 @@ static void it87_gpio_set(struct gpio_chip *chip,
outb(curr_vals | mask, reg);
else
outb(curr_vals & ~mask, reg);
+
+ return 0;
}
static int it87_gpio_direction_out(struct gpio_chip *chip,
@@ -249,7 +250,9 @@ static int it87_gpio_direction_out(struct gpio_chip *chip,
/* set the output enable bit */
superio_set_mask(mask, group + it87_gpio->output_base);
- it87_gpio_set(chip, gpio_num, val);
+ rc = it87_gpio_set(chip, gpio_num, val);
+ if (rc)
+ goto exit;
superio_exit();
@@ -264,7 +267,7 @@ static const struct gpio_chip it87_template_chip = {
.request = it87_gpio_request,
.get = it87_gpio_get,
.direction_input = it87_gpio_direction_in,
- .set = it87_gpio_set,
+ .set_rv = it87_gpio_set,
.direction_output = it87_gpio_direction_out,
.base = -1
};
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index cdf50e4ea165..9f548eda3888 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -76,7 +76,7 @@ static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
return !!ret;
}
-static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
+static int ttl_set_value(struct gpio_chip *gpio, unsigned int offset, int value)
{
struct ttl_module *mod = dev_get_drvdata(gpio->parent);
void __iomem *port;
@@ -103,6 +103,8 @@ static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
iowrite16be(*shadow, port);
spin_unlock(&mod->lock);
+
+ return 0;
}
static void ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
@@ -169,7 +171,7 @@ static int ttl_probe(struct platform_device *pdev)
gpio->parent = &pdev->dev;
gpio->label = pdev->name;
gpio->get = ttl_get_value;
- gpio->set = ttl_set_value;
+ gpio->set_rv = ttl_set_value;
gpio->owner = THIS_MODULE;
/* request dynamic allocation */
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index 4ea15f08e0f4..e38e604baa22 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -63,7 +63,8 @@ static int kempld_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!kempld_gpio_get_bit(pld, KEMPLD_GPIO_LVL_NUM(offset), offset);
}
-static void kempld_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int kempld_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct kempld_gpio_data *gpio = gpiochip_get_data(chip);
struct kempld_device_data *pld = gpio->pld;
@@ -71,6 +72,8 @@ static void kempld_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
kempld_get_mutex(pld);
kempld_gpio_bitop(pld, KEMPLD_GPIO_LVL_NUM(offset), offset, value);
kempld_release_mutex(pld);
+
+ return 0;
}
static int kempld_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -166,7 +169,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
chip->direction_output = kempld_gpio_direction_output;
chip->get_direction = kempld_gpio_get_direction;
chip->get = kempld_gpio_get;
- chip->set = kempld_gpio_set;
+ chip->set_rv = kempld_gpio_set;
chip->ngpio = kempld_gpio_pincount(pld);
if (chip->ngpio == 0) {
dev_err(dev, "No GPIO pins detected\n");
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 817ecb12d550..61524a9ba765 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -144,8 +144,8 @@ static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
return ljca_gpio_read(ljca_gpio, offset);
}
-static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
int ret;
@@ -155,6 +155,8 @@ static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
dev_err(chip->parent,
"set value failed offset: %u val: %d ret: %d\n",
offset, val, ret);
+
+ return ret;
}
static int ljca_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -183,7 +185,10 @@ static int ljca_gpio_direction_output(struct gpio_chip *chip,
if (ret)
return ret;
- ljca_gpio_set_value(chip, offset, val);
+ ret = ljca_gpio_set_value(chip, offset, val);
+ if (ret)
+ return ret;
+
set_bit(offset, ljca_gpio->output_enabled);
return 0;
@@ -432,7 +437,7 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
ljca_gpio->gc.get = ljca_gpio_get_value;
- ljca_gpio->gc.set = ljca_gpio_set_value;
+ ljca_gpio->gc.set_rv = ljca_gpio_set_value;
ljca_gpio->gc.set_config = ljca_gpio_set_config;
ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask;
ljca_gpio->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 05d62011f335..19cd2847467c 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -61,23 +61,22 @@ static int logicvc_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(value & bit);
}
-static void logicvc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int logicvc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
unsigned int reg, bit;
logicvc_gpio_offset(logicvc, offset, &reg, &bit);
- regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
+ return regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
}
static int logicvc_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
/* Pins are always configured as output, so just set the value. */
- logicvc_gpio_set(chip, offset, value);
-
- return 0;
+ return logicvc_gpio_set(chip, offset, value);
}
static struct regmap_config logicvc_gpio_regmap_config = {
@@ -135,7 +134,7 @@ static int logicvc_gpio_probe(struct platform_device *pdev)
logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
LOGICVC_POWER_CTRL_GPIO_BITS;
logicvc->chip.get = logicvc_gpio_get;
- logicvc->chip.set = logicvc_gpio_set;
+ logicvc->chip.set_rv = logicvc_gpio_set;
logicvc->chip.direction_output = logicvc_gpio_direction_output;
return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index a9a93036f08f..26227669f026 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -105,7 +105,7 @@ static int loongson_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
return GPIO_LINE_DIRECTION_OUT;
}
-static void loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+static int loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
unsigned long flags;
struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
@@ -113,6 +113,8 @@ static void loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int valu
spin_lock_irqsave(&lgpio->lock, flags);
loongson_commit_level(lgpio, pin, value);
spin_unlock_irqrestore(&lgpio->lock, flags);
+
+ return 0;
}
static int loongson_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
@@ -155,7 +157,7 @@ static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgp
lgpio->chip.get = loongson_gpio_get;
lgpio->chip.get_direction = loongson_gpio_get_direction;
lgpio->chip.direction_output = loongson_gpio_direction_output;
- lgpio->chip.set = loongson_gpio_set;
+ lgpio->chip.set_rv = loongson_gpio_set;
lgpio->chip.parent = dev;
spin_lock_init(&lgpio->lock);
}
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index a42145873cc9..8f3668169ebf 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -48,8 +48,8 @@ static int loongson_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!(val & BIT(gpio + LOONGSON_GPIO_IN_OFFSET));
}
-static void loongson_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int loongson_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
u32 val;
@@ -61,6 +61,8 @@ static void loongson_gpio_set_value(struct gpio_chip *chip,
val &= ~BIT(gpio);
LOONGSON_GPIODATA = val;
spin_unlock(&gpio_lock);
+
+ return 0;
}
static int loongson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
@@ -104,7 +106,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
gc->base = 0;
gc->ngpio = LOONGSON_N_GPIO;
gc->get = loongson_gpio_get_value;
- gc->set = loongson_gpio_set_value;
+ gc->set_rv = loongson_gpio_set_value;
gc->direction_input = loongson_gpio_direction_input;
gc->direction_output = loongson_gpio_direction_output;
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 8e58242f5123..52ab3ac4844c 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -147,7 +147,8 @@ static int lp3943_gpio_get(struct gpio_chip *chip, unsigned int offset)
return lp3943_get_gpio_out_status(lp3943_gpio, chip, offset);
}
-static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
u8 data;
@@ -157,15 +158,19 @@ static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int val
else
data = LP3943_GPIO_OUT_LOW;
- lp3943_gpio_set_mode(lp3943_gpio, offset, data);
+ return lp3943_gpio_set_mode(lp3943_gpio, offset, data);
}
static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
+ int ret;
+
+ ret = lp3943_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
- lp3943_gpio_set(chip, offset, value);
lp3943_gpio->input_mask &= ~BIT(offset);
return 0;
@@ -179,7 +184,7 @@ static const struct gpio_chip lp3943_gpio_chip = {
.direction_input = lp3943_gpio_direction_input,
.get = lp3943_gpio_get,
.direction_output = lp3943_gpio_direction_output,
- .set = lp3943_gpio_set,
+ .set_rv = lp3943_gpio_set,
.base = -1,
.ngpio = LP3943_MAX_GPIO,
.can_sleep = 1,
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 5c79ba1f229c..1908ed302e92 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -58,14 +58,14 @@ static int lp873x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return val & BIT(offset * BITS_PER_GPO);
}
-static void lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp873x_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
- BIT(offset * BITS_PER_GPO),
- value ? BIT(offset * BITS_PER_GPO) : 0);
+ return regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
+ BIT(offset * BITS_PER_GPO),
+ value ? BIT(offset * BITS_PER_GPO) : 0);
}
static int lp873x_gpio_request(struct gpio_chip *gc, unsigned int offset)
@@ -124,7 +124,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp873x_gpio_direction_input,
.direction_output = lp873x_gpio_direction_output,
.get = lp873x_gpio_get,
- .set = lp873x_gpio_set,
+ .set_rv = lp873x_gpio_set,
.set_config = lp873x_gpio_set_config,
.base = -1,
.ngpio = 2,
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index d3ce027de081..8ea687d5d028 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -30,13 +30,13 @@ static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
- BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
}
static int lp87565_gpio_get_direction(struct gpio_chip *chip,
@@ -69,8 +69,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ int ret;
- lp87565_gpio_set(chip, offset, value);
+ ret = lp87565_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
return regmap_update_bits(gpio->map,
LP87565_REG_GPIO_CONFIG,
@@ -136,7 +139,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp87565_gpio_direction_input,
.direction_output = lp87565_gpio_direction_output,
.get = lp87565_gpio_get,
- .set = lp87565_gpio_set,
+ .set_rv = lp87565_gpio_set,
.set_config = lp87565_gpio_set_config,
.base = -1,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index 2cf9fb4637a2..b0a8da5c058d 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -42,6 +42,7 @@ struct lpc18xx_gpio_pin_ic {
void __iomem *base;
struct irq_domain *domain;
struct raw_spinlock lock;
+ struct gpio_chip *gpio;
};
struct lpc18xx_gpio_chip {
@@ -74,6 +75,7 @@ static void lpc18xx_gpio_pin_ic_mask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock(&ic->lock);
@@ -88,12 +90,17 @@ static void lpc18xx_gpio_pin_ic_mask(struct irq_data *d)
raw_spin_unlock(&ic->lock);
irq_chip_mask_parent(d);
+
+ gpiochip_disable_irq(ic->gpio, hwirq);
}
static void lpc18xx_gpio_pin_ic_unmask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(ic->gpio, hwirq);
raw_spin_lock(&ic->lock);
@@ -149,13 +156,14 @@ static int lpc18xx_gpio_pin_ic_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip lpc18xx_gpio_pin_ic = {
+static const struct irq_chip lpc18xx_gpio_pin_ic = {
.name = "LPC18xx GPIO pin",
.irq_mask = lpc18xx_gpio_pin_ic_mask,
.irq_unmask = lpc18xx_gpio_pin_ic_unmask,
.irq_eoi = lpc18xx_gpio_pin_ic_eoi,
.irq_set_type = lpc18xx_gpio_pin_ic_set_type,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int lpc18xx_gpio_pin_ic_domain_alloc(struct irq_domain *domain,
@@ -240,17 +248,16 @@ static int lpc18xx_gpio_pin_ic_probe(struct lpc18xx_gpio_chip *gc)
raw_spin_lock_init(&ic->lock);
- ic->domain = irq_domain_add_hierarchy(parent_domain, 0,
- NR_LPC18XX_GPIO_PIN_IC_IRQS,
- dev->of_node,
- &lpc18xx_gpio_pin_ic_domain_ops,
- ic);
+ ic->domain = irq_domain_create_hierarchy(parent_domain, 0, NR_LPC18XX_GPIO_PIN_IC_IRQS,
+ of_fwnode_handle(dev->of_node),
+ &lpc18xx_gpio_pin_ic_domain_ops, ic);
if (!ic->domain) {
pr_err("unable to add irq domain\n");
ret = -ENODEV;
goto free_iomap;
}
+ ic->gpio = &gc->gpio;
gc->pin_ic = ic;
return 0;
@@ -263,10 +270,14 @@ free_ic:
return ret;
}
-static void lpc18xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int lpc18xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lpc18xx_gpio_chip *gc = gpiochip_get_data(chip);
+
writeb(value ? 1 : 0, gc->base + offset);
+
+ return 0;
}
static int lpc18xx_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -316,7 +327,7 @@ static const struct gpio_chip lpc18xx_chip = {
.free = gpiochip_generic_free,
.direction_input = lpc18xx_gpio_direction_input,
.direction_output = lpc18xx_gpio_direction_output,
- .set = lpc18xx_gpio_set,
+ .set_rv = lpc18xx_gpio_set,
.get = lpc18xx_gpio_get,
.ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT,
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index c097e310c9e8..6668b8bd9f1e 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -340,28 +340,34 @@ static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
return 0;
}
-static void lpc32xx_gpio_set_value_p012(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpio_set_value_p012(struct gpio_chip *chip,
+ unsigned int pin, int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p012(group, pin, value);
+
+ return 0;
}
-static void lpc32xx_gpio_set_value_p3(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpio_set_value_p3(struct gpio_chip *chip,
+ unsigned int pin, int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p3(group, pin, value);
+
+ return 0;
}
-static void lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned int pin,
+ int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpo_level_p3(group, pin, value);
+
+ return 0;
}
static int lpc32xx_gpo_get_value(struct gpio_chip *chip, unsigned pin)
@@ -401,7 +407,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set = lpc32xx_gpio_set_value_p012,
+ .set_rv = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P0_GRP,
@@ -417,7 +423,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set = lpc32xx_gpio_set_value_p012,
+ .set_rv = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P1_GRP,
@@ -433,7 +439,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set = lpc32xx_gpio_set_value_p012,
+ .set_rv = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPIO_P2_GRP,
.ngpio = LPC32XX_GPIO_P2_MAX,
@@ -448,7 +454,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p3,
.get = lpc32xx_gpio_get_value_p3,
.direction_output = lpc32xx_gpio_dir_output_p3,
- .set = lpc32xx_gpio_set_value_p3,
+ .set_rv = lpc32xx_gpio_set_value_p3,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_gpio_p3,
.base = LPC32XX_GPIO_P3_GRP,
@@ -476,7 +482,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.chip = {
.label = "gpo_p3",
.direction_output = lpc32xx_gpio_dir_out_always,
- .set = lpc32xx_gpo_set_value,
+ .set_rv = lpc32xx_gpo_set_value,
.get = lpc32xx_gpo_get_value,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPO_P3_GRP,
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index 8f38303fcbc4..e73e72d62bc8 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -87,23 +87,17 @@ static int madera_gpio_direction_out(struct gpio_chip *chip,
MADERA_GP1_LVL_MASK, reg_val);
}
-static void madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
struct madera *madera = madera_gpio->madera;
unsigned int reg_offset = 2 * offset;
unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
- int ret;
-
- ret = regmap_update_bits(madera->regmap,
- MADERA_GPIO1_CTRL_1 + reg_offset,
- MADERA_GP1_LVL_MASK, reg_val);
- /* set() doesn't return an error so log a warning */
- if (ret)
- dev_warn(madera->dev, "Failed to write to 0x%x (%d)\n",
- MADERA_GPIO1_CTRL_1 + reg_offset, ret);
+ return regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_1 + reg_offset,
+ MADERA_GP1_LVL_MASK, reg_val);
}
static const struct gpio_chip madera_gpio_chip = {
@@ -115,7 +109,7 @@ static const struct gpio_chip madera_gpio_chip = {
.direction_input = madera_gpio_direction_in,
.get = madera_gpio_get,
.direction_output = madera_gpio_direction_out,
- .set = madera_gpio_set,
+ .set_rv = madera_gpio_set,
.set_config = gpiochip_generic_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index fc0708ab5192..6e6504ab740a 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -103,19 +103,6 @@ static int max3191x_direction_input(struct gpio_chip *gpio, unsigned int offset)
return 0;
}
-static int max3191x_direction_output(struct gpio_chip *gpio,
- unsigned int offset, int value)
-{
- return -EINVAL;
-}
-
-static void max3191x_set(struct gpio_chip *gpio, unsigned int offset, int value)
-{ }
-
-static void max3191x_set_multiple(struct gpio_chip *gpio, unsigned long *mask,
- unsigned long *bits)
-{ }
-
static unsigned int max3191x_wordlen(struct max3191x_chip *max3191x)
{
return max3191x->mode == STATUS_BYTE_ENABLED ? 2 : 1;
@@ -421,9 +408,6 @@ static int max3191x_probe(struct spi_device *spi)
max3191x->gpio.get_direction = max3191x_get_direction;
max3191x->gpio.direction_input = max3191x_direction_input;
- max3191x->gpio.direction_output = max3191x_direction_output;
- max3191x->gpio.set = max3191x_set;
- max3191x->gpio.set_multiple = max3191x_set_multiple;
max3191x->gpio.get = max3191x_get;
max3191x->gpio.get_multiple = max3191x_get_multiple;
max3191x->gpio.set_config = max3191x_set_config;
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index e688c13c8cc3..75d414d8c992 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -143,18 +143,21 @@ static int max7301_get(struct gpio_chip *chip, unsigned offset)
return level;
}
-static void max7301_set(struct gpio_chip *chip, unsigned offset, int value)
+static int max7301_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct max7301 *ts = gpiochip_get_data(chip);
+ int ret;
/* First 4 pins are unused in the controller */
offset += 4;
mutex_lock(&ts->lock);
- __max7301_set(ts, offset, value);
+ ret = __max7301_set(ts, offset, value);
mutex_unlock(&ts->lock);
+
+ return ret;
}
int __max730x_probe(struct max7301 *ts)
@@ -185,7 +188,7 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.direction_input = max7301_direction_input;
ts->chip.get = max7301_get;
ts->chip.direction_output = max7301_direction_output;
- ts->chip.set = max7301_set;
+ ts->chip.set_rv = max7301_set;
ts->chip.ngpio = PIN_NUMBER;
ts->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 49d362907bc7..d5ffedb086af 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -225,16 +225,19 @@ out:
mutex_unlock(&chip->lock);
}
-static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+static int max732x_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
{
unsigned base = off & ~0x7;
uint8_t mask = 1u << (off & 0x7);
max732x_gpio_set_mask(gc, base, mask, val << (off & 0x7));
+
+ return 0;
}
-static void max732x_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int max732x_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
unsigned mask_lo = mask[0] & 0xff;
unsigned mask_hi = (mask[0] >> 8) & 0xff;
@@ -243,6 +246,8 @@ static void max732x_gpio_set_multiple(struct gpio_chip *gc,
max732x_gpio_set_mask(gc, 0, mask_lo, bits[0] & 0xff);
if (mask_hi)
max732x_gpio_set_mask(gc, 8, mask_hi, (bits[0] >> 8) & 0xff);
+
+ return 0;
}
static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
@@ -580,8 +585,8 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->direction_input = max732x_gpio_direction_input;
if (chip->dir_output) {
gc->direction_output = max732x_gpio_direction_output;
- gc->set = max732x_gpio_set_value;
- gc->set_multiple = max732x_gpio_set_multiple;
+ gc->set_rv = max732x_gpio_set_value;
+ gc->set_multiple_rv = max732x_gpio_set_multiple;
}
gc->get = max732x_gpio_get_value;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 8c2a5609161f..af7af8e40afe 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -223,20 +223,17 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
return ret;
}
-static void max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
u8 val;
- int ret;
val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH :
MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW;
- ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
- MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
- if (ret < 0)
- dev_err(mgpio->dev, "CNFG_GPIO_OUT update failed: %d\n", ret);
+ return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
}
static int max77620_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
@@ -314,7 +311,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
- mgpio->gpio_chip.set = max77620_gpio_set;
+ mgpio->gpio_chip.set_rv = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-max77759.c b/drivers/gpio/gpio-max77759.c
new file mode 100644
index 000000000000..7fe8e6f697d0
--- /dev/null
+++ b/drivers/gpio/gpio-max77759.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2020 Google Inc
+// Copyright 2025 Linaro Ltd.
+//
+// GPIO driver for Maxim MAX77759
+
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/device/driver.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/lockdep.h>
+#include <linux/mfd/max77759.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+#define MAX77759_N_GPIOS ARRAY_SIZE(max77759_gpio_line_names)
+static const char * const max77759_gpio_line_names[] = { "GPIO5", "GPIO6" };
+
+struct max77759_gpio_chip {
+ struct regmap *map;
+ struct max77759 *max77759;
+ struct gpio_chip gc;
+ struct mutex maxq_lock; /* protect MaxQ r/m/w operations */
+
+ struct mutex irq_lock; /* protect irq bus */
+ int irq_mask;
+ int irq_mask_changed;
+ int irq_trig;
+ int irq_trig_changed;
+};
+
+#define MAX77759_GPIOx_TRIGGER(offs, val) (((val) & 1) << (offs))
+#define MAX77759_GPIOx_TRIGGER_MASK(offs) MAX77759_GPIOx_TRIGGER(offs, ~0)
+enum max77759_trigger_gpio_type {
+ MAX77759_GPIO_TRIGGER_RISING = 0,
+ MAX77759_GPIO_TRIGGER_FALLING = 1
+};
+
+#define MAX77759_GPIOx_DIR(offs, dir) (((dir) & 1) << (2 + (3 * (offs))))
+#define MAX77759_GPIOx_DIR_MASK(offs) MAX77759_GPIOx_DIR(offs, ~0)
+enum max77759_control_gpio_dir {
+ MAX77759_GPIO_DIR_IN = 0,
+ MAX77759_GPIO_DIR_OUT = 1
+};
+
+#define MAX77759_GPIOx_OUTVAL(offs, val) (((val) & 1) << (3 + (3 * (offs))))
+#define MAX77759_GPIOx_OUTVAL_MASK(offs) MAX77759_GPIOx_OUTVAL(offs, ~0)
+
+#define MAX77759_GPIOx_INVAL_MASK(offs) (BIT(4) << (3 * (offs)))
+
+static int max77759_gpio_maxq_gpio_trigger_read(struct max77759_gpio_chip *chip)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 1);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length, 2);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_READ;
+
+ ret = max77759_maxq_command(chip->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ return rsp->rsp[1];
+}
+
+static int max77759_gpio_maxq_gpio_trigger_write(struct max77759_gpio_chip *chip,
+ u8 trigger)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 2);
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_WRITE;
+ cmd->cmd[1] = trigger;
+
+ return max77759_maxq_command(chip->max77759, cmd, NULL);
+}
+
+static int max77759_gpio_maxq_gpio_control_read(struct max77759_gpio_chip *chip)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 1);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length, 2);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_CONTROL_READ;
+
+ ret = max77759_maxq_command(chip->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ return rsp->rsp[1];
+}
+
+static int max77759_gpio_maxq_gpio_control_write(struct max77759_gpio_chip *chip,
+ u8 ctrl)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 2);
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_CONTROL_WRITE;
+ cmd->cmd[1] = ctrl;
+
+ return max77759_maxq_command(chip->max77759, cmd, NULL);
+}
+
+static int
+max77759_gpio_direction_from_control(int ctrl, unsigned int offset)
+{
+ enum max77759_control_gpio_dir dir;
+
+ dir = !!(ctrl & MAX77759_GPIOx_DIR_MASK(offset));
+ return ((dir == MAX77759_GPIO_DIR_OUT)
+ ? GPIO_LINE_DIRECTION_OUT
+ : GPIO_LINE_DIRECTION_IN);
+}
+
+static int max77759_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl;
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ return max77759_gpio_direction_from_control(ctrl, offset);
+}
+
+static int max77759_gpio_direction_helper(struct gpio_chip *gc,
+ unsigned int offset,
+ enum max77759_control_gpio_dir dir,
+ int value)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, new_ctrl;
+
+ guard(mutex)(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ new_ctrl = ctrl & ~MAX77759_GPIOx_DIR_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_DIR(offset, dir);
+
+ if (dir == MAX77759_GPIO_DIR_OUT) {
+ new_ctrl &= ~MAX77759_GPIOx_OUTVAL_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_OUTVAL(offset, value);
+ }
+
+ if (new_ctrl == ctrl)
+ return 0;
+
+ return max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+}
+
+static int max77759_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ return max77759_gpio_direction_helper(gc, offset,
+ MAX77759_GPIO_DIR_IN, -1);
+}
+
+static int max77759_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ return max77759_gpio_direction_helper(gc, offset,
+ MAX77759_GPIO_DIR_OUT, value);
+}
+
+static int max77759_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, mask;
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ /*
+ * The input status bit doesn't reflect the pin state when the GPIO is
+ * configured as an output. Check the direction, and inspect the input
+ * or output bit accordingly.
+ */
+ mask = ((max77759_gpio_direction_from_control(ctrl, offset)
+ == GPIO_LINE_DIRECTION_IN)
+ ? MAX77759_GPIOx_INVAL_MASK(offset)
+ : MAX77759_GPIOx_OUTVAL_MASK(offset));
+
+ return !!(ctrl & mask);
+}
+
+static int max77759_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, new_ctrl;
+
+ guard(mutex)(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ new_ctrl = ctrl & ~MAX77759_GPIOx_OUTVAL_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_OUTVAL(offset, value);
+
+ if (new_ctrl == ctrl)
+ return 0;
+
+ return max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+}
+
+static void max77759_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ chip->irq_mask &= ~MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(hwirq);
+ chip->irq_mask |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+ chip->irq_mask_changed |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void max77759_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+
+ chip->irq_mask &= ~MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(hwirq);
+ chip->irq_mask |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 0);
+ chip->irq_mask_changed |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+}
+
+static int max77759_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ chip->irq_trig &= ~MAX77759_GPIOx_TRIGGER_MASK(hwirq);
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ chip->irq_trig |= MAX77759_GPIOx_TRIGGER(hwirq,
+ MAX77759_GPIO_TRIGGER_RISING);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ chip->irq_trig |= MAX77759_GPIOx_TRIGGER(hwirq,
+ MAX77759_GPIO_TRIGGER_FALLING);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ chip->irq_trig_changed |= MAX77759_GPIOx_TRIGGER(hwirq, 1);
+
+ return 0;
+}
+
+static void max77759_gpio_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static int max77759_gpio_bus_sync_unlock_helper(struct gpio_chip *gc,
+ struct max77759_gpio_chip *chip)
+ __must_hold(&chip->maxq_lock)
+{
+ int ctrl, trigger, new_trigger, new_ctrl;
+ unsigned long irq_trig_changed;
+ int offset;
+ int ret;
+
+ lockdep_assert_held(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ trigger = max77759_gpio_maxq_gpio_trigger_read(chip);
+ if (ctrl < 0 || trigger < 0) {
+ dev_err(gc->parent, "failed to read current state: %d / %d\n",
+ ctrl, trigger);
+ return (ctrl < 0) ? ctrl : trigger;
+ }
+
+ new_trigger = trigger & ~chip->irq_trig_changed;
+ new_trigger |= (chip->irq_trig & chip->irq_trig_changed);
+
+ /* change GPIO direction if required */
+ new_ctrl = ctrl;
+ irq_trig_changed = chip->irq_trig_changed;
+ for_each_set_bit(offset, &irq_trig_changed, MAX77759_N_GPIOS) {
+ new_ctrl &= ~MAX77759_GPIOx_DIR_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_DIR(offset, MAX77759_GPIO_DIR_IN);
+ }
+
+ if (new_trigger != trigger) {
+ ret = max77759_gpio_maxq_gpio_trigger_write(chip, new_trigger);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to write new trigger: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (new_ctrl != ctrl) {
+ ret = max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to write new control: %d\n", ret);
+ return ret;
+ }
+ }
+
+ chip->irq_trig_changed = 0;
+
+ return 0;
+}
+
+static void max77759_gpio_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ret;
+
+ scoped_guard(mutex, &chip->maxq_lock) {
+ ret = max77759_gpio_bus_sync_unlock_helper(gc, chip);
+ if (ret)
+ goto out_unlock;
+ }
+
+ ret = regmap_update_bits(chip->map,
+ MAX77759_MAXQ_REG_UIC_INT1_M,
+ chip->irq_mask_changed, chip->irq_mask);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to update UIC_INT1 irq mask: %d\n", ret);
+ goto out_unlock;
+ }
+
+ chip->irq_mask_changed = 0;
+
+out_unlock:
+ mutex_unlock(&chip->irq_lock);
+}
+
+static void max77759_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ seq_puts(p, dev_name(gc->parent));
+}
+
+static const struct irq_chip max77759_gpio_irq_chip = {
+ .irq_mask = max77759_gpio_irq_mask,
+ .irq_unmask = max77759_gpio_irq_unmask,
+ .irq_set_type = max77759_gpio_set_irq_type,
+ .irq_bus_lock = max77759_gpio_bus_lock,
+ .irq_bus_sync_unlock = max77759_gpio_bus_sync_unlock,
+ .irq_print_chip = max77759_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static irqreturn_t max77759_gpio_irqhandler(int irq, void *data)
+{
+ struct max77759_gpio_chip *chip = data;
+ struct gpio_chip *gc = &chip->gc;
+ bool handled = false;
+
+ /* iterate until no interrupt is pending */
+ while (true) {
+ unsigned int uic_int1;
+ int ret;
+ unsigned long pending;
+ int offset;
+
+ ret = regmap_read(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
+ &uic_int1);
+ if (ret < 0) {
+ dev_err_ratelimited(gc->parent,
+ "failed to read IRQ status: %d\n",
+ ret);
+ /*
+ * If !handled, we have looped not even once, which
+ * means we should return IRQ_NONE in that case (and
+ * of course IRQ_HANDLED otherwise).
+ */
+ return IRQ_RETVAL(handled);
+ }
+
+ pending = uic_int1;
+ pending &= (MAX77759_MAXQ_REG_UIC_INT1_GPIO6I
+ | MAX77759_MAXQ_REG_UIC_INT1_GPIO5I);
+ if (!pending)
+ break;
+
+ for_each_set_bit(offset, &pending, MAX77759_N_GPIOS) {
+ /*
+ * ACK interrupt by writing 1 to bit 'offset', all
+ * others need to be written as 0. This needs to be
+ * done unconditionally hence regmap_set_bits() is
+ * inappropriate here.
+ */
+ regmap_write(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
+ BIT(offset));
+
+ handle_nested_irq(irq_find_mapping(gc->irq.domain,
+ offset));
+
+ handled = true;
+ }
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static int max77759_gpio_probe(struct platform_device *pdev)
+{
+ struct max77759_gpio_chip *chip;
+ int irq;
+ struct gpio_irq_chip *girq;
+ int ret;
+ unsigned long irq_flags;
+ struct irq_data *irqd;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->map = dev_get_regmap(pdev->dev.parent, "maxq");
+ if (!chip->map)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Missing regmap\n");
+
+ irq = platform_get_irq_byname(pdev, "GPI");
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "Failed to get IRQ\n");
+
+ chip->max77759 = dev_get_drvdata(pdev->dev.parent);
+ ret = devm_mutex_init(&pdev->dev, &chip->maxq_lock);
+ if (ret)
+ return ret;
+ ret = devm_mutex_init(&pdev->dev, &chip->irq_lock);
+ if (ret)
+ return ret;
+
+ chip->gc.base = -1;
+ chip->gc.label = dev_name(&pdev->dev);
+ chip->gc.parent = &pdev->dev;
+ chip->gc.can_sleep = true;
+
+ chip->gc.names = max77759_gpio_line_names;
+ chip->gc.ngpio = MAX77759_N_GPIOS;
+ chip->gc.get_direction = max77759_gpio_get_direction;
+ chip->gc.direction_input = max77759_gpio_direction_input;
+ chip->gc.direction_output = max77759_gpio_direction_output;
+ chip->gc.get = max77759_gpio_get_value;
+ chip->gc.set_rv = max77759_gpio_set_value;
+
+ girq = &chip->gc.irq;
+ gpio_irq_chip_set_chip(girq, &max77759_gpio_irq_chip);
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to add GPIO chip\n");
+
+ irq_flags = IRQF_ONESHOT | IRQF_SHARED;
+ irqd = irq_get_irq_data(irq);
+ if (irqd)
+ irq_flags |= irqd_get_trigger_type(irqd);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ max77759_gpio_irqhandler, irq_flags,
+ dev_name(&pdev->dev), chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request IRQ\n");
+
+ return ret;
+}
+
+static const struct of_device_id max77759_gpio_of_id[] = {
+ { .compatible = "maxim,max77759-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77759_gpio_of_id);
+
+static const struct platform_device_id max77759_gpio_platform_id[] = {
+ { "max77759-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max77759_gpio_platform_id);
+
+static struct platform_driver max77759_gpio_driver = {
+ .driver = {
+ .name = "max77759-gpio",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = max77759_gpio_of_id,
+ },
+ .probe = max77759_gpio_probe,
+ .id_table = max77759_gpio_platform_id,
+};
+
+module_platform_driver(max77759_gpio_driver);
+
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("GPIO driver for Maxim MAX77759");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 7ee891ef6905..5ee2991ecdfd 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -119,7 +119,7 @@ static int mb86s70_gpio_get(struct gpio_chip *gc, unsigned gpio)
return !!(readl(gchip->base + PDR(gpio)) & OFFSET(gpio));
}
-static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
+static int mb86s70_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc);
unsigned long flags;
@@ -135,6 +135,8 @@ static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
writel(val, gchip->base + PDR(gpio));
spin_unlock_irqrestore(&gchip->lock, flags);
+
+ return 0;
}
static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
@@ -178,7 +180,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.request = mb86s70_gpio_request;
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
- gchip->gc.set = mb86s70_gpio_set;
+ gchip->gc.set_rv = mb86s70_gpio_set;
gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 5fb357d7b78a..e68956104161 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -57,15 +57,18 @@ static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value)
}
-static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value)
+static int mc33880_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct mc33880 *mc = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&mc->lock);
- __mc33880_set(mc, offset, value);
+ ret = __mc33880_set(mc, offset, value);
mutex_unlock(&mc->lock);
+
+ return ret;
}
static int mc33880_probe(struct spi_device *spi)
@@ -100,7 +103,7 @@ static int mc33880_probe(struct spi_device *spi)
mc->spi = spi;
mc->chip.label = DRIVER_NAME;
- mc->chip.set = mc33880_set;
+ mc->chip.set_rv = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
mc->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 48e3768a830e..12cf36f9ca63 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -89,7 +89,7 @@ struct ioh_gpio {
static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12};
-static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+static int ioh_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
u32 reg_val;
struct ioh_gpio *chip = gpiochip_get_data(gpio);
@@ -104,6 +104,8 @@ static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
spin_unlock_irqrestore(&chip->spinlock, flags);
+
+ return 0;
}
static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
@@ -222,7 +224,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
gpio->direction_input = ioh_gpio_direction_input;
gpio->get = ioh_gpio_get;
gpio->direction_output = ioh_gpio_direction_output;
- gpio->set = ioh_gpio_set;
+ gpio->set_rv = ioh_gpio_set;
gpio->dbg_show = NULL;
gpio->base = -1;
gpio->ngpio = num_port;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 541517536489..121efdd71e45 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -123,9 +123,12 @@ static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
static void mpc8xxx_irq_unmask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
+ gpiochip_enable_irq(gc, hwirq);
+
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
@@ -138,6 +141,7 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
static void mpc8xxx_irq_mask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
@@ -148,6 +152,8 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+
+ gpiochip_disable_irq(gc, hwirq);
}
static void mpc8xxx_irq_ack(struct irq_data *d)
@@ -244,6 +250,8 @@ static struct irq_chip mpc8xxx_irq_chip = {
.irq_ack = mpc8xxx_irq_ack,
/* this might get overwritten in mpc8xxx_probe() */
.irq_set_type = mpc8xxx_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 3604abcb6fec..57633a7b4270 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -408,9 +408,8 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
struct mvebu_gpio_chip *mvchip = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
@@ -420,10 +419,9 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
@@ -433,11 +431,10 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_mask(struct irq_data *d)
@@ -447,10 +444,9 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
@@ -460,10 +456,9 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
/*****************************************************************************
@@ -1242,7 +1237,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
return 0;
mvchip->domain =
- irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+ irq_domain_create_linear(of_fwnode_handle(np), ngpios, &irq_generic_chip_ops, NULL);
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 619b6fb9d833..fae1a30f8ae6 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -490,7 +490,14 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->gc.request = mxc_gpio_request;
port->gc.free = mxc_gpio_free;
port->gc.to_irq = mxc_gpio_to_irq;
- port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ /*
+ * Driver is DT-only, so a fixed base needs only be maintained for legacy
+ * userspace with sysfs interface.
+ */
+ if (IS_ENABLED(CONFIG_GPIO_SYSFS))
+ port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ else /* silence boot time warning */
+ port->gc.base = -1;
err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
if (err)
@@ -502,7 +509,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
goto out_bgio;
}
- port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
+ port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 024ad077e98d..b418fbccb26c 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -303,8 +303,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
goto out_iounmap;
}
- port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
goto out_iounmap;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 13cc120cf11f..b852e4997629 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -215,6 +215,8 @@ struct pca953x_chip {
DECLARE_BITMAP(irq_stat, MAX_LINE);
DECLARE_BITMAP(irq_trig_raise, MAX_LINE);
DECLARE_BITMAP(irq_trig_fall, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_level_high, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_level_low, MAX_LINE);
#endif
atomic_t wakeup_path;
@@ -774,6 +776,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+ bitmap_or(irq_mask, irq_mask, chip->irq_trig_level_high, gc->ngpio);
+ bitmap_or(irq_mask, irq_mask, chip->irq_trig_level_low, gc->ngpio);
bitmap_complement(reg_direction, reg_direction, gc->ngpio);
bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
@@ -791,13 +795,15 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
struct device *dev = &chip->client->dev;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+ if (!(type & IRQ_TYPE_SENSE_MASK)) {
dev_err(dev, "irq %d: unsupported type %d\n", d->irq, type);
return -EINVAL;
}
assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
+ assign_bit(hwirq, chip->irq_trig_level_low, type & IRQ_TYPE_LEVEL_LOW);
+ assign_bit(hwirq, chip->irq_trig_level_high, type & IRQ_TYPE_LEVEL_HIGH);
return 0;
}
@@ -810,6 +816,8 @@ static void pca953x_irq_shutdown(struct irq_data *d)
clear_bit(hwirq, chip->irq_trig_raise);
clear_bit(hwirq, chip->irq_trig_fall);
+ clear_bit(hwirq, chip->irq_trig_level_low);
+ clear_bit(hwirq, chip->irq_trig_level_high);
}
static void pca953x_irq_print_chip(struct irq_data *data, struct seq_file *p)
@@ -840,6 +848,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(cur_stat, MAX_LINE);
DECLARE_BITMAP(new_stat, MAX_LINE);
DECLARE_BITMAP(trigger, MAX_LINE);
+ DECLARE_BITMAP(edges, MAX_LINE);
int ret;
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
@@ -857,13 +866,26 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
- if (bitmap_empty(trigger, gc->ngpio))
- return false;
+ if (bitmap_empty(chip->irq_trig_level_high, gc->ngpio) &&
+ bitmap_empty(chip->irq_trig_level_low, gc->ngpio)) {
+ if (bitmap_empty(trigger, gc->ngpio))
+ return false;
+ }
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
- bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
+ bitmap_or(edges, old_stat, cur_stat, gc->ngpio);
+ bitmap_and(pending, edges, trigger, gc->ngpio);
+
+ bitmap_and(cur_stat, new_stat, chip->irq_trig_level_high, gc->ngpio);
+ bitmap_and(cur_stat, cur_stat, chip->irq_mask, gc->ngpio);
+ bitmap_or(pending, pending, cur_stat, gc->ngpio);
+
+ bitmap_complement(cur_stat, new_stat, gc->ngpio);
+ bitmap_and(cur_stat, cur_stat, reg_direction, gc->ngpio);
+ bitmap_and(old_stat, cur_stat, chip->irq_trig_level_low, gc->ngpio);
+ bitmap_and(old_stat, old_stat, chip->irq_mask, gc->ngpio);
+ bitmap_or(pending, pending, old_stat, gc->ngpio);
return !bitmap_empty(pending, gc->ngpio);
}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 91cea97255fa..aead35ea090e 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -497,6 +497,8 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio);
writel_relaxed(grer, base + GRER_OFFSET);
writel_relaxed(gfer, base + GFER_OFFSET);
+
+ gpiochip_disable_irq(&pchip->chip, gpio);
}
static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
@@ -516,17 +518,21 @@ static void pxa_unmask_muxed_gpio(struct irq_data *d)
unsigned int gpio = irqd_to_hwirq(d);
struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
+ gpiochip_enable_irq(&pchip->chip, gpio);
+
c->irq_mask |= GPIO_bit(gpio);
update_edge_detect(c);
}
-static struct irq_chip pxa_muxed_gpio_chip = {
+static const struct irq_chip pxa_muxed_gpio_chip = {
.name = "GPIO",
.irq_ack = pxa_ack_muxed_gpio,
.irq_mask = pxa_mask_muxed_gpio,
.irq_unmask = pxa_unmask_muxed_gpio,
.irq_set_type = pxa_gpio_irq_type,
.irq_set_wake = pxa_gpio_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int pxa_gpio_nums(struct platform_device *pdev)
@@ -636,9 +642,9 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (!pxa_last_gpio)
return -EINVAL;
- pchip->irqdomain = irq_domain_add_legacy(pdev->dev.of_node,
- pxa_last_gpio + 1, irq_base,
- 0, &pxa_irq_domain_ops, pchip);
+ pchip->irqdomain = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node),
+ pxa_last_gpio + 1, irq_base, 0,
+ &pxa_irq_domain_ops, pchip);
if (!pchip->irqdomain)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 01a3b3dac58b..c63352f2f1ec 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -521,7 +521,7 @@ static int rockchip_interrupts_register(struct rockchip_pin_bank *bank)
struct irq_chip_generic *gc;
int ret;
- bank->domain = irq_domain_add_linear(bank->of_node, 32,
+ bank->domain = irq_domain_create_linear(of_fwnode_handle(bank->of_node), 32,
&irq_generic_chip_ops, NULL);
if (!bank->domain) {
dev_warn(bank->dev, "could not init irq domain for bank %s\n",
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 242dad763ac4..3f3ee36bc3cb 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -319,7 +319,7 @@ void __init sa1100_init_gpio(void)
gpiochip_add_data(&sa1100_gpio_chip.chip, NULL);
- sa1100_gpio_irqdomain = irq_domain_add_simple(NULL,
+ sa1100_gpio_irqdomain = irq_domain_create_simple(NULL,
28, IRQ_GPIO0,
&sa1100_gpio_irqdomain_ops, sgc);
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index c2a2c76c1652..6a3c4c625138 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -169,7 +169,7 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
- sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
+ sd->id = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), SDV_NUM_PUB_GPIOS,
sd->irq_base, 0, &irq_domain_sdv_ops, sd);
if (!sd->id)
return -ENODEV;
diff --git a/drivers/gpio/gpio-spacemit-k1.c b/drivers/gpio/gpio-spacemit-k1.c
new file mode 100644
index 000000000000..f027066365ff
--- /dev/null
+++ b/drivers/gpio/gpio-spacemit-k1.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Yixun Lan <dlan@gentoo.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+/* register offset */
+#define SPACEMIT_GPLR 0x00 /* port level - R */
+#define SPACEMIT_GPDR 0x0c /* port direction - R/W */
+#define SPACEMIT_GPSR 0x18 /* port set - W */
+#define SPACEMIT_GPCR 0x24 /* port clear - W */
+#define SPACEMIT_GRER 0x30 /* port rising edge R/W */
+#define SPACEMIT_GFER 0x3c /* port falling edge R/W */
+#define SPACEMIT_GEDR 0x48 /* edge detect status - R/W1C */
+#define SPACEMIT_GSDR 0x54 /* (set) direction - W */
+#define SPACEMIT_GCDR 0x60 /* (clear) direction - W */
+#define SPACEMIT_GSRER 0x6c /* (set) rising edge detect enable - W */
+#define SPACEMIT_GCRER 0x78 /* (clear) rising edge detect enable - W */
+#define SPACEMIT_GSFER 0x84 /* (set) falling edge detect enable - W */
+#define SPACEMIT_GCFER 0x90 /* (clear) falling edge detect enable - W */
+#define SPACEMIT_GAPMASK 0x9c /* interrupt mask , 0 disable, 1 enable - R/W */
+
+#define SPACEMIT_NR_BANKS 4
+#define SPACEMIT_NR_GPIOS_PER_BANK 32
+
+#define to_spacemit_gpio_bank(x) container_of((x), struct spacemit_gpio_bank, gc)
+
+struct spacemit_gpio;
+
+struct spacemit_gpio_bank {
+ struct gpio_chip gc;
+ struct spacemit_gpio *sg;
+ void __iomem *base;
+ u32 irq_mask;
+ u32 irq_rising_edge;
+ u32 irq_falling_edge;
+};
+
+struct spacemit_gpio {
+ struct device *dev;
+ struct spacemit_gpio_bank sgb[SPACEMIT_NR_BANKS];
+};
+
+static u32 spacemit_gpio_bank_index(struct spacemit_gpio_bank *gb)
+{
+ return (u32)(gb - gb->sg->sgb);
+}
+
+static irqreturn_t spacemit_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct spacemit_gpio_bank *gb = dev_id;
+ unsigned long pending;
+ u32 n, gedr;
+
+ gedr = readl(gb->base + SPACEMIT_GEDR);
+ if (!gedr)
+ return IRQ_NONE;
+ writel(gedr, gb->base + SPACEMIT_GEDR);
+
+ pending = gedr & gb->irq_mask;
+ if (!pending)
+ return IRQ_NONE;
+
+ for_each_set_bit(n, &pending, BITS_PER_LONG)
+ handle_nested_irq(irq_find_mapping(gb->gc.irq.domain, n));
+
+ return IRQ_HANDLED;
+}
+
+static void spacemit_gpio_irq_ack(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(irqd_to_hwirq(d)), gb->base + SPACEMIT_GEDR);
+}
+
+static void spacemit_gpio_irq_mask(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ gb->irq_mask &= ~bit;
+ writel(gb->irq_mask, gb->base + SPACEMIT_GAPMASK);
+
+ if (bit & gb->irq_rising_edge)
+ writel(bit, gb->base + SPACEMIT_GCRER);
+
+ if (bit & gb->irq_falling_edge)
+ writel(bit, gb->base + SPACEMIT_GCFER);
+}
+
+static void spacemit_gpio_irq_unmask(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ gb->irq_mask |= bit;
+
+ if (bit & gb->irq_rising_edge)
+ writel(bit, gb->base + SPACEMIT_GSRER);
+
+ if (bit & gb->irq_falling_edge)
+ writel(bit, gb->base + SPACEMIT_GSFER);
+
+ writel(gb->irq_mask, gb->base + SPACEMIT_GAPMASK);
+}
+
+static int spacemit_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ if (type & IRQ_TYPE_EDGE_RISING) {
+ gb->irq_rising_edge |= bit;
+ writel(bit, gb->base + SPACEMIT_GSRER);
+ } else {
+ gb->irq_rising_edge &= ~bit;
+ writel(bit, gb->base + SPACEMIT_GCRER);
+ }
+
+ if (type & IRQ_TYPE_EDGE_FALLING) {
+ gb->irq_falling_edge |= bit;
+ writel(bit, gb->base + SPACEMIT_GSFER);
+ } else {
+ gb->irq_falling_edge &= ~bit;
+ writel(bit, gb->base + SPACEMIT_GCFER);
+ }
+
+ return 0;
+}
+
+static void spacemit_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(data);
+
+ seq_printf(p, "%s-%d", dev_name(gb->gc.parent), spacemit_gpio_bank_index(gb));
+}
+
+static struct irq_chip spacemit_gpio_chip = {
+ .name = "k1-gpio-irqchip",
+ .irq_ack = spacemit_gpio_irq_ack,
+ .irq_mask = spacemit_gpio_irq_mask,
+ .irq_unmask = spacemit_gpio_irq_unmask,
+ .irq_set_type = spacemit_gpio_irq_set_type,
+ .irq_print_chip = spacemit_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static bool spacemit_of_node_instance_match(struct gpio_chip *gc, unsigned int i)
+{
+ struct spacemit_gpio_bank *gb = gpiochip_get_data(gc);
+ struct spacemit_gpio *sg = gb->sg;
+
+ if (i >= SPACEMIT_NR_BANKS)
+ return false;
+
+ return (gc == &sg->sgb[i].gc);
+}
+
+static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
+ void __iomem *regs,
+ int index, int irq)
+{
+ struct spacemit_gpio_bank *gb = &sg->sgb[index];
+ struct gpio_chip *gc = &gb->gc;
+ struct device *dev = sg->dev;
+ struct gpio_irq_chip *girq;
+ void __iomem *dat, *set, *clr, *dirin, *dirout;
+ int ret, bank_base[] = { 0x0, 0x4, 0x8, 0x100 };
+
+ gb->base = regs + bank_base[index];
+
+ dat = gb->base + SPACEMIT_GPLR;
+ set = gb->base + SPACEMIT_GPSR;
+ clr = gb->base + SPACEMIT_GPCR;
+ dirin = gb->base + SPACEMIT_GCDR;
+ dirout = gb->base + SPACEMIT_GSDR;
+
+ /* This registers 32 GPIO lines per bank */
+ ret = bgpio_init(gc, dev, 4, dat, set, clr, dirout, dirin,
+ BGPIOF_UNREADABLE_REG_SET | BGPIOF_UNREADABLE_REG_DIR);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init gpio chip\n");
+
+ gb->sg = sg;
+
+ gc->label = dev_name(dev);
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->ngpio = SPACEMIT_NR_GPIOS_PER_BANK;
+ gc->base = -1;
+ gc->of_gpio_n_cells = 3;
+ gc->of_node_instance_match = spacemit_of_node_instance_match;
+
+ girq = &gc->irq;
+ girq->threaded = true;
+ girq->handler = handle_simple_irq;
+
+ gpio_irq_chip_set_chip(girq, &spacemit_gpio_chip);
+
+ /* Disable Interrupt */
+ writel(0, gb->base + SPACEMIT_GAPMASK);
+ /* Disable Edge Detection Settings */
+ writel(0x0, gb->base + SPACEMIT_GRER);
+ writel(0x0, gb->base + SPACEMIT_GFER);
+ /* Clear Interrupt */
+ writel(0xffffffff, gb->base + SPACEMIT_GCRER);
+ writel(0xffffffff, gb->base + SPACEMIT_GCFER);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ spacemit_gpio_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ gb->gc.label, gb);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register IRQ\n");
+
+ ret = devm_gpiochip_add_data(dev, gc, gb);
+ if (ret)
+ return ret;
+
+ /* Distuingish IRQ domain, for selecting threecells mode */
+ irq_domain_update_bus_token(girq->domain, DOMAIN_BUS_WIRED);
+
+ return 0;
+}
+
+static int spacemit_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spacemit_gpio *sg;
+ struct clk *core_clk, *bus_clk;
+ void __iomem *regs;
+ int i, irq, ret;
+
+ sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ sg->dev = dev;
+
+ core_clk = devm_clk_get_enabled(dev, "core");
+ if (IS_ERR(core_clk))
+ return dev_err_probe(dev, PTR_ERR(core_clk), "failed to get clock\n");
+
+ bus_clk = devm_clk_get_enabled(dev, "bus");
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(dev, PTR_ERR(bus_clk), "failed to get bus clock\n");
+
+ for (i = 0; i < SPACEMIT_NR_BANKS; i++) {
+ ret = spacemit_gpio_add_bank(sg, regs, i, irq);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id spacemit_gpio_dt_ids[] = {
+ { .compatible = "spacemit,k1-gpio" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver spacemit_gpio_driver = {
+ .probe = spacemit_gpio_probe,
+ .driver = {
+ .name = "k1-gpio",
+ .of_match_table = spacemit_gpio_dt_ids,
+ },
+};
+module_platform_driver(spacemit_gpio_driver);
+
+MODULE_AUTHOR("Yixun Lan <dlan@gentoo.org>");
+MODULE_DESCRIPTION("GPIO driver for SpacemiT K1 SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index b6335cde455f..8cf676fd0a0b 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -183,7 +183,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- tb10x_gpio->domain = irq_domain_add_linear(np,
+ tb10x_gpio->domain = irq_domain_create_linear(of_fwnode_handle(np),
tb10x_gpio->gc.ngpio,
&irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index fad979797486..cb303a26f4d3 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -103,20 +103,26 @@ static void timbgpio_irq_disable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier &= ~(1UL << offset);
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
+
+ gpiochip_disable_irq(&tgpio->gpio, hwirq);
}
static void timbgpio_irq_enable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
+ gpiochip_enable_irq(&tgpio->gpio, hwirq);
+
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier |= 1UL << offset;
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
@@ -205,11 +211,13 @@ static void timbgpio_irq(struct irq_desc *desc)
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
}
-static struct irq_chip timbgpio_irqchip = {
+static const struct irq_chip timbgpio_irqchip = {
.name = "GPIO",
.irq_enable = timbgpio_irq_enable,
.irq_disable = timbgpio_irq_disable,
.irq_set_type = timbgpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int timbgpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index bcd692229c7c..0d17985a5fdc 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -502,7 +502,6 @@ static void gpio_twl4030_power_off_action(void *data)
static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata;
- struct device_node *node = pdev->dev.of_node;
struct gpio_twl4030_priv *priv;
int ret, irq_base;
@@ -524,8 +523,8 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
return irq_base;
}
- irq_domain_add_legacy(node, TWL4030_GPIO_MAX, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), TWL4030_GPIO_MAX, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base);
if (ret < 0)
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 4dad7ce0c4dc..7de0d5b53d56 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -345,4 +345,6 @@ static struct platform_driver vf610_gpio_driver = {
.probe = vf610_gpio_probe,
};
-builtin_platform_driver(vf610_gpio_driver);
+module_platform_driver(vf610_gpio_driver);
+MODULE_DESCRIPTION("VF610 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 48b829733b15..b51b1fa726bb 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -103,12 +103,32 @@ static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static struct irq_chip xgene_gpio_sb_irq_chip = {
+static void xgene_gpio_sb_irq_mask(struct irq_data *d)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+
+ gpiochip_disable_irq(&priv->gc, d->hwirq);
+}
+
+static void xgene_gpio_sb_irq_unmask(struct irq_data *d)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(&priv->gc, d->hwirq);
+
+ irq_chip_unmask_parent(d);
+}
+
+static const struct irq_chip xgene_gpio_sb_irq_chip = {
.name = "sbgpio",
.irq_eoi = irq_chip_eoi_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
+ .irq_mask = xgene_gpio_sb_irq_mask,
+ .irq_unmask = xgene_gpio_sb_irq_unmask,
.irq_set_type = xgene_gpio_sb_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi-core.c
index 69caa35c58df..12b24a717e43 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -23,29 +23,6 @@
#include "gpiolib.h"
#include "gpiolib-acpi.h"
-static int run_edge_events_on_boot = -1;
-module_param(run_edge_events_on_boot, int, 0444);
-MODULE_PARM_DESC(run_edge_events_on_boot,
- "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
-
-static char *ignore_wake;
-module_param(ignore_wake, charp, 0444);
-MODULE_PARM_DESC(ignore_wake,
- "controller@pin combos on which to ignore the ACPI wake flag "
- "ignore_wake=controller@pin[,controller@pin[,...]]");
-
-static char *ignore_interrupt;
-module_param(ignore_interrupt, charp, 0444);
-MODULE_PARM_DESC(ignore_interrupt,
- "controller@pin combos on which to ignore interrupt "
- "ignore_interrupt=controller@pin[,controller@pin[,...]]");
-
-struct acpi_gpiolib_dmi_quirk {
- bool no_edge_events_on_boot;
- char *ignore_wake;
- char *ignore_interrupt;
-};
-
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
@@ -96,10 +73,10 @@ struct acpi_gpio_chip {
* @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @wake_capable: wake capability as provided by ACPI
* @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
- * @wake_capable: wake capability as provided by ACPI
* @debounce: debounce timeout as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
@@ -107,25 +84,14 @@ struct acpi_gpio_info {
struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
+ bool wake_capable;
int pin_config;
int polarity;
int triggering;
- bool wake_capable;
unsigned int debounce;
unsigned int quirks;
};
-/*
- * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
- * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
- * late_initcall_sync() handler, so that other builtin drivers can register their
- * OpRegions before the event handlers can run. This list contains GPIO chips
- * for which the acpi_gpiochip_request_irqs() call has been deferred.
- */
-static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
-static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
-static bool acpi_gpio_deferred_req_irqs_done;
-
static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
{
/* First check the actual GPIO device */
@@ -268,7 +234,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
event->irq_requested = true;
/* Make sure we trigger the initial state of edge-triggered IRQs */
- if (run_edge_events_on_boot &&
+ if (acpi_gpio_need_run_edge_events_on_boot() &&
(event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
value = gpiod_get_raw_value_cansleep(event->desc);
if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
@@ -350,42 +316,6 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
return desc;
}
-static bool acpi_gpio_in_ignore_list(const char *ignore_list, const char *controller_in,
- unsigned int pin_in)
-{
- const char *controller, *pin_str;
- unsigned int pin;
- char *endp;
- int len;
-
- controller = ignore_list;
- while (controller) {
- pin_str = strchr(controller, '@');
- if (!pin_str)
- goto err;
-
- len = pin_str - controller;
- if (len == strlen(controller_in) &&
- strncmp(controller, controller_in, len) == 0) {
- pin = simple_strtoul(pin_str + 1, &endp, 10);
- if (*endp != 0 && *endp != ',')
- goto err;
-
- if (pin == pin_in)
- return true;
- }
-
- controller = strchr(controller, ',');
- if (controller)
- controller++;
- }
-
- return false;
-err:
- pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
- return false;
-}
-
static bool acpi_gpio_irq_is_wake(struct device *parent,
const struct acpi_resource_gpio *agpio)
{
@@ -394,7 +324,7 @@ static bool acpi_gpio_irq_is_wake(struct device *parent,
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
return false;
- if (acpi_gpio_in_ignore_list(ignore_wake, dev_name(parent), pin)) {
+ if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_WAKE, dev_name(parent), pin)) {
dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
return false;
}
@@ -437,7 +367,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
if (!handler)
return AE_OK;
- if (acpi_gpio_in_ignore_list(ignore_interrupt, dev_name(chip->parent), pin)) {
+ if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_INTERRUPT, dev_name(chip->parent), pin)) {
dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
return AE_OK;
}
@@ -525,7 +455,6 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
struct acpi_gpio_chip *acpi_gpio;
acpi_handle handle;
acpi_status status;
- bool defer;
if (!chip->parent || !chip->to_irq)
return;
@@ -544,14 +473,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
acpi_walk_resources(handle, METHOD_NAME__AEI,
acpi_gpiochip_alloc_event, acpi_gpio);
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- defer = !acpi_gpio_deferred_req_irqs_done;
- if (defer)
- list_add(&acpi_gpio->deferred_req_irqs_list_entry,
- &acpi_gpio_deferred_req_irqs_list);
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
-
- if (defer)
+ if (acpi_gpio_add_to_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry))
return;
acpi_gpiochip_request_irqs(acpi_gpio);
@@ -583,10 +505,7 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
- list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+ acpi_gpio_remove_from_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry);
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
if (event->irq_requested) {
@@ -604,6 +523,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
}
EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts);
+void __init acpi_gpio_process_deferred_list(struct list_head *list)
+{
+ struct acpi_gpio_chip *acpi_gpio, *tmp;
+
+ list_for_each_entry_safe(acpi_gpio, tmp, list, deferred_req_irqs_list_entry)
+ acpi_gpiochip_request_irqs(acpi_gpio);
+}
+
int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
{
@@ -653,12 +580,12 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
for (gm = adev->driver_gpios; gm->name; gm++)
if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
- const struct acpi_gpio_params *par = gm->data + index;
+ const struct acpi_gpio_params *params = gm->data + index;
args->fwnode = acpi_fwnode_handle(adev);
- args->args[0] = par->crs_entry_index;
- args->args[1] = par->line_index;
- args->args[2] = par->active_low;
+ args->args[0] = params->crs_entry_index;
+ args->args[1] = params->line_index;
+ args->args[2] = params->active_low;
args->nargs = 3;
*quirks = gm->quirks;
@@ -743,10 +670,8 @@ static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
}
struct acpi_gpio_lookup {
- struct acpi_gpio_info info;
- int index;
- u16 pin_index;
- bool active_low;
+ struct acpi_gpio_params params;
+ struct acpi_gpio_info *info;
struct gpio_desc *desc;
int n;
};
@@ -754,6 +679,8 @@ struct acpi_gpio_lookup {
static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
{
struct acpi_gpio_lookup *lookup = data;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
return 1;
@@ -764,26 +691,26 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
struct gpio_desc *desc;
u16 pin_index;
- if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
- lookup->index++;
+ if (info->quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
+ params->crs_entry_index++;
- if (lookup->n++ != lookup->index)
+ if (lookup->n++ != params->crs_entry_index)
return 1;
- pin_index = lookup->pin_index;
+ pin_index = params->line_index;
if (pin_index >= agpio->pin_table_length)
return 1;
- if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
+ if (info->quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
desc = gpio_to_desc(agpio->pin_table[pin_index]);
else
desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
lookup->desc = desc;
- lookup->info.pin_config = agpio->pin_config;
- lookup->info.debounce = agpio->debounce_timeout;
- lookup->info.gpioint = gpioint;
- lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
+ info->pin_config = agpio->pin_config;
+ info->debounce = agpio->debounce_timeout;
+ info->gpioint = gpioint;
+ info->wake_capable = acpi_gpio_irq_is_wake(&info->adev->dev, agpio);
/*
* Polarity and triggering are only specified for GpioInt
@@ -792,23 +719,23 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
* - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW
* - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
*/
- if (lookup->info.gpioint) {
- lookup->info.polarity = agpio->polarity;
- lookup->info.triggering = agpio->triggering;
+ if (info->gpioint) {
+ info->polarity = agpio->polarity;
+ info->triggering = agpio->triggering;
} else {
- lookup->info.polarity = lookup->active_low;
+ info->polarity = params->active_low;
}
- lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity);
+ info->flags = acpi_gpio_to_gpiod_flags(agpio, info->polarity);
}
return 1;
}
-static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
- struct acpi_gpio_info *info)
+static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup)
{
- struct acpi_device *adev = lookup->info.adev;
+ struct acpi_gpio_info *info = lookup->info;
+ struct acpi_device *adev = info->adev;
struct list_head res_list;
int ret;
@@ -825,22 +752,22 @@ static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
if (!lookup->desc)
return -ENOENT;
- if (info)
- *info = lookup->info;
return 0;
}
-static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
- const char *propname, int index,
+static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode, const char *propname,
struct acpi_gpio_lookup *lookup)
{
struct fwnode_reference_args args;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
+ unsigned int index = params->crs_entry_index;
unsigned int quirks = 0;
int ret;
memset(&args, 0, sizeof(args));
- ret = __acpi_node_get_property_reference(fwnode, propname, index, 3,
- &args);
+
+ ret = __acpi_node_get_property_reference(fwnode, propname, index, 3, &args);
if (ret) {
struct acpi_device *adev;
@@ -857,12 +784,12 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
if (args.nargs != 3)
return -EPROTO;
- lookup->index = args.args[0];
- lookup->pin_index = args.args[1];
- lookup->active_low = !!args.args[2];
+ params->crs_entry_index = args.args[0];
+ params->line_index = args.args[1];
+ params->active_low = !!args.args[2];
- lookup->info.adev = to_acpi_device_node(args.fwnode);
- lookup->info.quirks = quirks;
+ info->adev = to_acpi_device_node(args.fwnode);
+ info->quirks = quirks;
return 0;
}
@@ -871,96 +798,83 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources
* @adev: pointer to a ACPI device to get GPIO from
* @propname: Property name of the GPIO (optional)
- * @index: index of GpioIo/GpioInt resource (starting from %0)
- * @info: info pointer to fill in (optional)
+ * @lookup: pointer to struct acpi_gpio_lookup to fill in
*
- * Function goes through ACPI resources for @adev and based on @index looks
+ * Function goes through ACPI resources for @adev and based on @lookup.index looks
* up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor,
- * and returns it. @index matches GpioIo/GpioInt resources only so if there
- * are total %3 GPIO resources, the index goes from %0 to %2.
+ * and returns it. @lookup.index matches GpioIo/GpioInt resources only so if there
+ * are total 3 GPIO resources, the index goes from 0 to 2.
*
* If @propname is specified the GPIO is looked using device property. In
* that case @index is used to select the GPIO entry in the property value
* (in case of multiple).
*
* Returns:
- * GPIO descriptor to use with Linux generic GPIO API.
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
- * returned.
+ * 0 on success, negative errno on failure.
+ *
+ * The @lookup is filled with GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated an error will be returned.
*
* Note: if the GPIO resource has multiple entries in the pin list, this
* function only returns the first.
*/
-static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
- const char *propname,
- int index,
- struct acpi_gpio_info *info)
+static int acpi_get_gpiod_by_index(struct acpi_device *adev, const char *propname,
+ struct acpi_gpio_lookup *lookup)
{
- struct acpi_gpio_lookup lookup;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
int ret;
- memset(&lookup, 0, sizeof(lookup));
- lookup.index = index;
-
if (propname) {
dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname);
- ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev),
- propname, index, &lookup);
+ ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev), propname, lookup);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n",
- dev_name(&lookup.info.adev->dev), lookup.index,
- lookup.pin_index, lookup.active_low);
+ dev_dbg(&adev->dev, "GPIO: _DSD returned %s %u %u %u\n",
+ dev_name(&info->adev->dev),
+ params->crs_entry_index, params->line_index, params->active_low);
} else {
- dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
- lookup.info.adev = adev;
+ dev_dbg(&adev->dev, "GPIO: looking up %u in _CRS\n", params->crs_entry_index);
+ info->adev = adev;
}
- ret = acpi_gpio_resource_lookup(&lookup, info);
- return ret ? ERR_PTR(ret) : lookup.desc;
+ return acpi_gpio_resource_lookup(lookup);
}
/**
* acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node
* @fwnode: pointer to an ACPI firmware node to get the GPIO information from
* @propname: Property name of the GPIO
- * @index: index of GpioIo/GpioInt resource (starting from %0)
- * @info: info pointer to fill in (optional)
+ * @lookup: pointer to struct acpi_gpio_lookup to fill in
*
* This function uses the property-based GPIO lookup to get to the GPIO
* resource with the relevant information from a data-only ACPI firmware node
* and uses that to obtain the GPIO descriptor to return.
*
* Returns:
- * GPIO descriptor to use with Linux generic GPIO API.
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
- * returned.
+ * 0 on success, negative errno on failure.
+ *
+ * The @lookup is filled with GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated an error will be returned.
*/
-static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
- const char *propname,
- int index,
- struct acpi_gpio_info *info)
+static int acpi_get_gpiod_from_data(struct fwnode_handle *fwnode, const char *propname,
+ struct acpi_gpio_lookup *lookup)
{
- struct acpi_gpio_lookup lookup;
int ret;
if (!is_acpi_data_node(fwnode))
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
if (!propname)
- return ERR_PTR(-EINVAL);
-
- memset(&lookup, 0, sizeof(lookup));
- lookup.index = index;
+ return -EINVAL;
- ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
+ ret = acpi_gpio_property_lookup(fwnode, propname, lookup);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- ret = acpi_gpio_resource_lookup(&lookup, info);
- return ret ? ERR_PTR(ret) : lookup.desc;
+ return acpi_gpio_resource_lookup(lookup);
}
static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
@@ -982,17 +896,25 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
bool can_fallback, struct acpi_gpio_info *info)
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
+ struct acpi_gpio_lookup lookup;
struct gpio_desc *desc;
char propname[32];
+ int ret;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.params.crs_entry_index = idx;
+ lookup.info = info;
/* Try first from _DSD */
for_each_gpio_property_name(propname, con_id) {
if (adev)
- desc = acpi_get_gpiod_by_index(adev,
- propname, idx, info);
+ ret = acpi_get_gpiod_by_index(adev, propname, &lookup);
else
- desc = acpi_get_gpiod_from_data(fwnode,
- propname, idx, info);
+ ret = acpi_get_gpiod_from_data(fwnode, propname, &lookup);
+ if (ret)
+ continue;
+
+ desc = lookup.desc;
if (PTR_ERR(desc) == -EPROBE_DEFER)
return desc;
@@ -1001,8 +923,13 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
}
/* Then from plain _CRS GPIOs */
- if (can_fallback)
- return acpi_get_gpiod_by_index(adev, NULL, idx, info);
+ if (can_fallback) {
+ ret = acpi_get_gpiod_by_index(adev, NULL, &lookup);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return lookup.desc;
+ }
return ERR_PTR(-ENOENT);
}
@@ -1488,248 +1415,3 @@ int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
}
return count ? count : -ENOENT;
}
-
-/* Run deferred acpi_gpiochip_request_irqs() */
-static int __init acpi_gpio_handle_deferred_request_irqs(void)
-{
- struct acpi_gpio_chip *acpi_gpio, *tmp;
-
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- list_for_each_entry_safe(acpi_gpio, tmp,
- &acpi_gpio_deferred_req_irqs_list,
- deferred_req_irqs_list_entry)
- acpi_gpiochip_request_irqs(acpi_gpio);
-
- acpi_gpio_deferred_req_irqs_done = true;
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
-
- return 0;
-}
-/* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-
-static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
- {
- /*
- * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
- * a non existing micro-USB-B connector which puts the HDMI
- * DDC pins in GPIO mode, breaking HDMI support.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .no_edge_events_on_boot = true,
- },
- },
- {
- /*
- * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
- * instead of controlling the actual micro-USB-B turns the 5V
- * boost for its USB-A connector off. The actual micro-USB-B
- * connector is wired for charging only.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .no_edge_events_on_boot = true,
- },
- },
- {
- /*
- * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FC:02@12",
- },
- },
- {
- /*
- * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FF:01 pin 0, causing spurious wakeups.
- * When suspending by closing the LID, the power to the USB
- * keyboard is turned off, causing INT0002 ACPI events to
- * trigger once the XHCI controller notices the keyboard is
- * gone. So INT0002 events cause spurious wakeups too. Ignoring
- * EC wakes breaks wakeup when opening the lid, the user needs
- * to press the power-button to wakeup the system. The
- * alternative is suspend simply not working, which is worse.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FF:01@0,INT0002:00@2",
- },
- },
- {
- /*
- * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FC:02 pin 28, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
- DMI_MATCH(DMI_BOARD_NAME, "815D"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FC:02@28",
- },
- },
- {
- /*
- * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FF:01 pin 0, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
- DMI_MATCH(DMI_BOARD_NAME, "813E"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FF:01@0",
- },
- },
- {
- /*
- * Interrupt storm caused from edge triggered floating pin
- * Found in BIOS UX325UAZ.300
- * https://bugzilla.kernel.org/show_bug.cgi?id=216208
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "AMDI0030:00@18",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.8
- * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "ELAN0415:00@9",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.8
- * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "ELAN0415:00@9",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.7
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "SYNA1202:00@16",
- },
- },
- {
- /*
- * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
- * a "dolby" button. At the ACPI level an _AEI event-handler
- * is connected which sets an ACPI variable to 1 on both
- * edges. This variable can be polled + cleared to 0 using
- * WMI. But since the variable is set on both edges the WMI
- * interface is pretty useless even when polling.
- * So instead the x86-android-tablets code instantiates
- * a gpio-keys platform device for it.
- * Ignore the _AEI handler for the pin, so that it is not busy.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "INT33FC:00@3",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 0.35
- * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
- DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "PNP0C50:00@8",
- },
- },
- {
- /*
- * Spurious wakeups from GPIO 11
- * Found in BIOS 1.04
- * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "AMDI0030:00@11",
- },
- },
- {} /* Terminating entry */
-};
-
-static int __init acpi_gpio_setup_params(void)
-{
- const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
- const struct dmi_system_id *id;
-
- id = dmi_first_match(gpiolib_acpi_quirks);
- if (id)
- quirk = id->driver_data;
-
- if (run_edge_events_on_boot < 0) {
- if (quirk && quirk->no_edge_events_on_boot)
- run_edge_events_on_boot = 0;
- else
- run_edge_events_on_boot = 1;
- }
-
- if (ignore_wake == NULL && quirk && quirk->ignore_wake)
- ignore_wake = quirk->ignore_wake;
-
- if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
- ignore_interrupt = quirk->ignore_interrupt;
-
- return 0;
-}
-
-/* Directly after dmi_setup() which runs as core_initcall() */
-postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
new file mode 100644
index 000000000000..219667315b2c
--- /dev/null
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI quirks for GPIO ACPI helpers
+ *
+ * Author: Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "gpiolib-acpi.h"
+
+static int run_edge_events_on_boot = -1;
+module_param(run_edge_events_on_boot, int, 0444);
+MODULE_PARM_DESC(run_edge_events_on_boot,
+ "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+
+static char *ignore_wake;
+module_param(ignore_wake, charp, 0444);
+MODULE_PARM_DESC(ignore_wake,
+ "controller@pin combos on which to ignore the ACPI wake flag "
+ "ignore_wake=controller@pin[,controller@pin[,...]]");
+
+static char *ignore_interrupt;
+module_param(ignore_interrupt, charp, 0444);
+MODULE_PARM_DESC(ignore_interrupt,
+ "controller@pin combos on which to ignore interrupt "
+ "ignore_interrupt=controller@pin[,controller@pin[,...]]");
+
+/*
+ * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
+ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
+ * late_initcall_sync() handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run. This list contains GPIO chips
+ * for which the acpi_gpiochip_request_irqs() call has been deferred.
+ */
+static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+static bool acpi_gpio_deferred_req_irqs_done;
+
+bool acpi_gpio_add_to_deferred_list(struct list_head *list)
+{
+ bool defer;
+
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ defer = !acpi_gpio_deferred_req_irqs_done;
+ if (defer)
+ list_add(list, &acpi_gpio_deferred_req_irqs_list);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+ return defer;
+}
+
+void acpi_gpio_remove_from_deferred_list(struct list_head *list)
+{
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ if (!list_empty(list))
+ list_del_init(list);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+}
+
+int acpi_gpio_need_run_edge_events_on_boot(void)
+{
+ return run_edge_events_on_boot;
+}
+
+bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
+ const char *controller_in, unsigned int pin_in)
+{
+ const char *ignore_list, *controller, *pin_str;
+ unsigned int pin;
+ char *endp;
+ int len;
+
+ switch (list) {
+ case ACPI_GPIO_IGNORE_WAKE:
+ ignore_list = ignore_wake;
+ break;
+ case ACPI_GPIO_IGNORE_INTERRUPT:
+ ignore_list = ignore_interrupt;
+ break;
+ default:
+ return false;
+ }
+
+ controller = ignore_list;
+ while (controller) {
+ pin_str = strchr(controller, '@');
+ if (!pin_str)
+ goto err;
+
+ len = pin_str - controller;
+ if (len == strlen(controller_in) &&
+ strncmp(controller, controller_in, len) == 0) {
+ pin = simple_strtoul(pin_str + 1, &endp, 10);
+ if (*endp != 0 && *endp != ',')
+ goto err;
+
+ if (pin == pin_in)
+ return true;
+ }
+
+ controller = strchr(controller, ',');
+ if (controller)
+ controller++;
+ }
+
+ return false;
+err:
+ pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
+ return false;
+}
+
+/* Run deferred acpi_gpiochip_request_irqs() */
+static int __init acpi_gpio_handle_deferred_request_irqs(void)
+{
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ acpi_gpio_process_deferred_list(&acpi_gpio_deferred_req_irqs_list);
+ acpi_gpio_deferred_req_irqs_done = true;
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+ return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+
+struct acpi_gpiolib_dmi_quirk {
+ bool no_edge_events_on_boot;
+ char *ignore_wake;
+ char *ignore_interrupt;
+};
+
+static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ {
+ /*
+ * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
+ * a non existing micro-USB-B connector which puts the HDMI
+ * DDC pins in GPIO mode, breaking HDMI support.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .no_edge_events_on_boot = true,
+ },
+ },
+ {
+ /*
+ * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
+ * instead of controlling the actual micro-USB-B turns the 5V
+ * boost for its USB-A connector off. The actual micro-USB-B
+ * connector is wired for charging only.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .no_edge_events_on_boot = true,
+ },
+ },
+ {
+ /*
+ * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@12",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+ * When suspending by closing the LID, the power to the USB
+ * keyboard is turned off, causing INT0002 ACPI events to
+ * trigger once the XHCI controller notices the keyboard is
+ * gone. So INT0002 events cause spurious wakeups too. Ignoring
+ * EC wakes breaks wakeup when opening the lid, the user needs
+ * to press the power-button to wakeup the system. The
+ * alternative is suspend simply not working, which is worse.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FF:01@0,INT0002:00@2",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FC:02 pin 28, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_MATCH(DMI_BOARD_NAME, "815D"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@28",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_MATCH(DMI_BOARD_NAME, "813E"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FF:01@0",
+ },
+ },
+ {
+ /*
+ * Interrupt storm caused from edge triggered floating pin
+ * Found in BIOS UX325UAZ.300
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216208
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@18",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.7
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
+ {
+ /*
+ * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
+ * a "dolby" button. At the ACPI level an _AEI event-handler
+ * is connected which sets an ACPI variable to 1 on both
+ * edges. This variable can be polled + cleared to 0 using
+ * WMI. But since the variable is set on both edges the WMI
+ * interface is pretty useless even when polling.
+ * So instead the x86-android-tablets code instantiates
+ * a gpio-keys platform device for it.
+ * Ignore the _AEI handler for the pin, so that it is not busy.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "INT33FC:00@3",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 0.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from GPIO 11
+ * Found in BIOS 1.04
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@11",
+ },
+ },
+ {} /* Terminating entry */
+};
+
+static int __init acpi_gpio_setup_params(void)
+{
+ const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
+ const struct dmi_system_id *id;
+
+ id = dmi_first_match(gpiolib_acpi_quirks);
+ if (id)
+ quirk = id->driver_data;
+
+ if (run_edge_events_on_boot < 0) {
+ if (quirk && quirk->no_edge_events_on_boot)
+ run_edge_events_on_boot = 0;
+ else
+ run_edge_events_on_boot = 1;
+ }
+
+ if (ignore_wake == NULL && quirk && quirk->ignore_wake)
+ ignore_wake = quirk->ignore_wake;
+
+ if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
+ ignore_interrupt = quirk->ignore_interrupt;
+
+ return 0;
+}
+
+/* Directly after dmi_setup() which runs as core_initcall() */
+postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 7e1c51d04040..a90267470a4e 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -58,4 +58,19 @@ static inline int acpi_gpio_count(const struct fwnode_handle *fwnode,
}
#endif
+void acpi_gpio_process_deferred_list(struct list_head *list);
+
+bool acpi_gpio_add_to_deferred_list(struct list_head *list);
+void acpi_gpio_remove_from_deferred_list(struct list_head *list);
+
+int acpi_gpio_need_run_edge_events_on_boot(void);
+
+enum acpi_gpio_ignore_list {
+ ACPI_GPIO_IGNORE_WAKE,
+ ACPI_GPIO_IGNORE_INTERRUPT,
+};
+
+bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
+ const char *controller_in, unsigned int pin_in);
+
#endif /* GPIOLIB_ACPI_H */
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 107d75558b5a..e6a289fa0f8f 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1366,9 +1366,6 @@ static long linereq_set_values(struct linereq *lr, void __user *ip)
/* scan requested lines to determine the subset to be set */
for (num_set = 0, i = 0; i < lr->num_lines; i++) {
if (lv.mask & BIT_ULL(i)) {
- /* setting inputs is not allowed */
- if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
- return -EPERM;
/* add to compacted values */
if (lv.bits & BIT_ULL(i))
__set_bit(num_set, vals);
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 120d1ec5af3b..4d5f83b17624 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -6,7 +6,7 @@
* Copyright (c) 2011 John Crispin <john@phrozen.org>
*/
-#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -19,32 +19,14 @@
struct fwnode_handle;
struct lock_class_key;
-static void devm_gpiod_release(struct device *dev, void *res)
+static void devm_gpiod_release(void *desc)
{
- struct gpio_desc **desc = res;
-
- gpiod_put(*desc);
-}
-
-static int devm_gpiod_match(struct device *dev, void *res, void *data)
-{
- struct gpio_desc **this = res, **gpio = data;
-
- return *this == *gpio;
+ gpiod_put(desc);
}
-static void devm_gpiod_release_array(struct device *dev, void *res)
+static void devm_gpiod_release_array(void *descs)
{
- struct gpio_descs **descs = res;
-
- gpiod_put_array(*descs);
-}
-
-static int devm_gpiod_match_array(struct device *dev, void *res, void *data)
-{
- struct gpio_descs **this = res, **gpios = data;
-
- return *this == *gpios;
+ gpiod_put_array(descs);
}
/**
@@ -114,8 +96,8 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
unsigned int idx,
enum gpiod_flags flags)
{
- struct gpio_desc **dr;
struct gpio_desc *desc;
+ int ret;
desc = gpiod_get_index(dev, con_id, idx, flags);
if (IS_ERR(desc))
@@ -126,23 +108,16 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
* already under resource management by this device.
*/
if (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
- struct devres *dres;
+ bool dres;
- dres = devres_find(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
+ dres = devm_is_action_added(dev, devm_gpiod_release, desc);
if (dres)
return desc;
}
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr) {
- gpiod_put(desc);
- return ERR_PTR(-ENOMEM);
- }
-
- *dr = desc;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release, desc);
+ if (ret)
+ return ERR_PTR(ret);
return desc;
}
@@ -171,22 +146,16 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
enum gpiod_flags flags,
const char *label)
{
- struct gpio_desc **dr;
struct gpio_desc *desc;
-
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
desc = gpiod_find_and_request(dev, fwnode, con_id, index, flags, label, false);
- if (IS_ERR(desc)) {
- devres_free(dr);
+ if (IS_ERR(desc))
return desc;
- }
- *dr = desc;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release, desc);
+ if (ret)
+ return ERR_PTR(ret);
return desc;
}
@@ -244,22 +213,16 @@ struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
- struct gpio_descs **dr;
struct gpio_descs *descs;
-
- dr = devres_alloc(devm_gpiod_release_array,
- sizeof(struct gpio_descs *), GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
descs = gpiod_get_array(dev, con_id, flags);
- if (IS_ERR(descs)) {
- devres_free(dr);
+ if (IS_ERR(descs))
return descs;
- }
- *dr = descs;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release_array, descs);
+ if (ret)
+ return ERR_PTR(ret);
return descs;
}
@@ -307,8 +270,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_array_optional);
*/
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
- WARN_ON(devres_release(dev, devm_gpiod_release, devm_gpiod_match,
- &desc));
+ devm_release_action(dev, devm_gpiod_release, desc);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put);
@@ -332,13 +294,13 @@ void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc)
if (IS_ERR_OR_NULL(desc))
return;
- ret = devres_destroy(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
+
/*
* If the GPIO descriptor is requested as nonexclusive, we
* may call this function several times on the same descriptor
* so it is OK if devres_destroy() returns -ENOENT.
*/
+ ret = devm_remove_action_nowarn(dev, devm_gpiod_release, desc);
if (ret == -ENOENT)
return;
/* Anything else we should warn about */
@@ -357,8 +319,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
*/
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
{
- WARN_ON(devres_release(dev, devm_gpiod_release_array,
- devm_gpiod_match_array, &descs));
+ devm_remove_action(dev, devm_gpiod_release_array, descs);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 65f6a7177b78..73ba73b31cb1 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -224,6 +224,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "lantiq,pci-xway", "gpio-reset", false },
#endif
+#if IS_ENABLED(CONFIG_REGULATOR_S5M8767)
+ /*
+ * According to S5M8767, the DVS and DS pin are
+ * active-high signals. However, exynos5250-spring.dts use
+ * active-low setting.
+ */
+ { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-dvs-gpios", true },
+ { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-ds-gpios", true },
+#endif
#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
/*
* DTS for Nokia N900 incorrectly specified "active high"
@@ -1278,3 +1287,11 @@ void of_gpiochip_remove(struct gpio_chip *chip)
{
of_node_put(dev_of_node(&chip->gpiodev->dev));
}
+
+bool of_gpiochip_instance_match(struct gpio_chip *gc, unsigned int index)
+{
+ if (gc->of_node_instance_match)
+ return gc->of_node_instance_match(gc, index);
+
+ return false;
+}
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index 16d6ac8cb156..3eebfac290c5 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -22,6 +22,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np,
unsigned long *lookupflags);
int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
+bool of_gpiochip_instance_match(struct gpio_chip *gc, unsigned int index);
int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id);
#else
static inline struct gpio_desc *of_find_gpio(struct device_node *np,
@@ -33,6 +34,11 @@ static inline struct gpio_desc *of_find_gpio(struct device_node *np,
}
static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
+static inline bool of_gpiochip_instance_match(struct gpio_chip *gc,
+ unsigned int index)
+{
+ return false;
+}
static inline int of_gpio_count(const struct fwnode_handle *fwnode,
const char *con_id)
{
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 1acfa43bf1ab..4a3aa09dad9d 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -134,17 +134,15 @@ static ssize_t value_store(struct device *dev,
long value;
status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
guard(mutex)(&data->mutex);
- if (!test_bit(FLAG_IS_OUT, &desc->flags))
- return -EPERM;
-
+ status = gpiod_set_value_cansleep(desc, value);
if (status)
return status;
- gpiod_set_value_cansleep(desc, value);
-
return size;
}
static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 113c5d90f2df..fdafa0df1b43 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -266,6 +266,20 @@ struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_to_gpio_device);
/**
+ * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin.
+ * @desc: Descriptor to compare.
+ * @other: The second descriptor to compare against.
+ *
+ * Returns:
+ * True if the descriptors refer to the same physical pin. False otherwise.
+ */
+bool gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other)
+{
+ return desc == other;
+}
+EXPORT_SYMBOL_GPL(gpiod_is_equal);
+
+/**
* gpio_device_get_base() - Get the base GPIO number allocated by this device
* @gdev: GPIO device
*
@@ -342,6 +356,37 @@ static int gpiochip_find_base_unlocked(u16 ngpio)
}
}
+/*
+ * This descriptor validation needs to be inserted verbatim into each
+ * function taking a descriptor, so we need to use a preprocessor
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
+ */
+static int validate_desc(const struct gpio_desc *desc, const char *func)
+{
+ if (!desc)
+ return 0;
+
+ if (IS_ERR(desc)) {
+ pr_warn("%s: invalid GPIO (errorpointer: %pe)\n", func, desc);
+ return PTR_ERR(desc);
+ }
+
+ return 1;
+}
+
+#define VALIDATE_DESC(desc) do { \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return __valid; \
+ } while (0)
+
+#define VALIDATE_DESC_VOID(desc) do { \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return; \
+ } while (0)
+
static int gpiochip_get_direction(struct gpio_chip *gc, unsigned int offset)
{
int ret;
@@ -376,11 +421,8 @@ int gpiod_get_direction(struct gpio_desc *desc)
unsigned int offset;
int ret;
- /*
- * We cannot use VALIDATE_DESC() as we must not return 0 for a NULL
- * descriptor like we usually do.
- */
- if (IS_ERR_OR_NULL(desc))
+ ret = validate_desc(desc, __func__);
+ if (ret <= 0)
return -EINVAL;
CLASS(gpio_chip_guard, guard)(desc);
@@ -880,14 +922,12 @@ static void machine_gpiochip_add(struct gpio_chip *gc)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
list_for_each_entry(hog, &gpio_machine_hogs, list) {
if (!strcmp(gc->label, hog->chip_label))
gpiochip_machine_hog(gc, hog);
}
-
- mutex_unlock(&gpio_machine_hogs_mutex);
}
static void gpiochip_setup_devs(void)
@@ -981,7 +1021,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct gpio_device *gdev;
unsigned int desc_index;
int base = 0;
- int ret = 0;
+ int ret;
/* Only allow one set() and one set_multiple(). */
if ((gc->set && gc->set_rv) ||
@@ -1006,11 +1046,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc));
- gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
- if (gdev->id < 0) {
- ret = gdev->id;
+ ret = ida_alloc(&gpio_ida, GFP_KERNEL);
+ if (ret < 0)
goto err_free_gdev;
- }
+ gdev->id = ret;
ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
if (ret)
@@ -1513,9 +1552,8 @@ static int gpiochip_hierarchy_irq_domain_translate(struct irq_domain *d,
unsigned int *type)
{
/* We support standard DT translation */
- if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
- return irq_domain_translate_twocell(d, fwspec, hwirq, type);
- }
+ if (is_of_node(fwspec->fwnode))
+ return irq_domain_translate_twothreecell(d, fwspec, hwirq, type);
/* This is for board files and others not using DT */
if (is_fwnode_irqchip(fwspec->fwnode)) {
@@ -1817,11 +1855,26 @@ static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
irq_set_chip_data(irq, NULL);
}
+static int gpiochip_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ struct fwnode_handle *fwnode = fwspec->fwnode;
+ struct gpio_chip *gc = d->host_data;
+ unsigned int index = fwspec->param[0];
+
+ if (fwspec->param_count == 3 && is_of_node(fwnode))
+ return of_gpiochip_instance_match(gc, index);
+
+ /* Fallback for twocells */
+ return (fwnode && (d->fwnode == fwnode) && (d->bus_token == bus_token));
+}
+
static const struct irq_domain_ops gpiochip_domain_ops = {
.map = gpiochip_irq_map,
.unmap = gpiochip_irq_unmap,
+ .select = gpiochip_irq_select,
/* Virtually all GPIO irqchips are twocell:ed */
- .xlate = irq_domain_xlate_twocell,
+ .xlate = irq_domain_xlate_twothreecell,
};
static struct irq_domain *gpiochip_simple_create_domain(struct gpio_chip *gc)
@@ -1841,7 +1894,6 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
-#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
* Avoid race condition with other code, which tries to lookup
* an IRQ before the irqchip has been properly registered,
@@ -1849,7 +1901,6 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
*/
if (!gc->irq.initialized)
return -EPROBE_DEFER;
-#endif
if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
@@ -2411,37 +2462,6 @@ out_clear_bit:
return ret;
}
-/*
- * This descriptor validation needs to be inserted verbatim into each
- * function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication. If the desc is NULL it is an
- * optional GPIO and calls should just bail out.
- */
-static int validate_desc(const struct gpio_desc *desc, const char *func)
-{
- if (!desc)
- return 0;
-
- if (IS_ERR(desc)) {
- pr_warn("%s: invalid GPIO (errorpointer)\n", func);
- return PTR_ERR(desc);
- }
-
- return 1;
-}
-
-#define VALIDATE_DESC(desc) do { \
- int __valid = validate_desc(desc, __func__); \
- if (__valid <= 0) \
- return __valid; \
- } while (0)
-
-#define VALIDATE_DESC_VOID(desc) do { \
- int __valid = validate_desc(desc, __func__); \
- if (__valid <= 0) \
- return; \
- } while (0)
-
int gpiod_request(struct gpio_desc *desc, const char *label)
{
int ret = -EPROBE_DEFER;
@@ -3051,7 +3071,7 @@ set_output_flag:
*/
int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
@@ -3084,7 +3104,7 @@ EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns);
*/
int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
@@ -3599,6 +3619,9 @@ static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
+ if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ return -EPERM;
+
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
return -ENODEV;
@@ -3670,6 +3693,12 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
if (!can_sleep)
WARN_ON(array_info->gdev->can_sleep);
+ for (i = 0; i < array_size; i++) {
+ if (unlikely(!test_bit(FLAG_IS_OUT,
+ &desc_array[i]->flags)))
+ return -EPERM;
+ }
+
guard(srcu)(&array_info->gdev->srcu);
gc = srcu_dereference(array_info->gdev->chip,
&array_info->gdev->srcu);
@@ -3729,6 +3758,9 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
int hwgpio = gpio_chip_hwgpio(desc);
int value = test_bit(i, value_bitmap);
+ if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ return -EPERM;
+
/*
* Pins applicable for fast input but not for
* fast output processing may have been already
@@ -3950,13 +3982,10 @@ int gpiod_to_irq(const struct gpio_desc *desc)
struct gpio_device *gdev;
struct gpio_chip *gc;
int offset;
+ int ret;
- /*
- * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
- * requires this function to not return zero on an invalid descriptor
- * but rather a negative error number.
- */
- if (IS_ERR_OR_NULL(desc))
+ ret = validate_desc(desc, __func__);
+ if (ret <= 0)
return -EINVAL;
gdev = desc->gdev;
@@ -3968,13 +3997,12 @@ int gpiod_to_irq(const struct gpio_desc *desc)
offset = gpio_chip_hwgpio(desc);
if (gc->to_irq) {
- int retirq = gc->to_irq(gc, offset);
+ ret = gc->to_irq(gc, offset);
+ if (ret)
+ return ret;
/* Zero means NO_IRQ */
- if (!retirq)
- return -ENXIO;
-
- return retirq;
+ return -ENXIO;
}
#ifdef CONFIG_GPIOLIB_IRQCHIP
if (gc->irq.chip) {
@@ -4329,12 +4357,10 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n)
{
unsigned int i;
- mutex_lock(&gpio_lookup_lock);
+ guard(mutex)(&gpio_lookup_lock);
for (i = 0; i < n; i++)
list_add_tail(&tables[i]->list, &gpio_lookup_list);
-
- mutex_unlock(&gpio_lookup_lock);
}
/**
@@ -4393,11 +4419,9 @@ void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
if (!table)
return;
- mutex_lock(&gpio_lookup_lock);
+ guard(mutex)(&gpio_lookup_lock);
list_del(&table->list);
-
- mutex_unlock(&gpio_lookup_lock);
}
EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table);
@@ -4409,7 +4433,7 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
for (hog = &hogs[0]; hog->chip_label; hog++) {
list_add_tail(&hog->list, &gpio_machine_hogs);
@@ -4423,8 +4447,6 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
if (gdev)
gpiochip_machine_hog(gpio_device_get_chip(gdev), hog);
}
-
- mutex_unlock(&gpio_machine_hogs_mutex);
}
EXPORT_SYMBOL_GPL(gpiod_add_hogs);
@@ -4432,10 +4454,10 @@ void gpiod_remove_hogs(struct gpiod_hog *hogs)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
+
for (hog = &hogs[0]; hog->chip_label; hog++)
list_del(&hog->list);
- mutex_unlock(&gpio_machine_hogs_mutex);
}
EXPORT_SYMBOL_GPL(gpiod_remove_hogs);
@@ -5114,8 +5136,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_optional);
*/
void gpiod_put(struct gpio_desc *desc)
{
- if (desc)
- gpiod_free(desc);
+ gpiod_free(desc);
}
EXPORT_SYMBOL_GPL(gpiod_put);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f01925ed8176..f094797f3b2b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -26,6 +26,11 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+menu "DRM debugging options"
+depends on DRM
+source "drivers/gpu/drm/Kconfig.debug"
+endmenu
+
if DRM
config DRM_MIPI_DBI
@@ -37,65 +42,6 @@ config DRM_MIPI_DSI
bool
depends on DRM
-config DRM_DEBUG_MM
- bool "Insert extra checks and debug info into the DRM range managers"
- default n
- depends on DRM
- depends on STACKTRACE_SUPPORT
- select STACKDEPOT
- help
- Enable allocation tracking of memory manager and leak detection on
- shutdown.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
-
-config DRM_USE_DYNAMIC_DEBUG
- bool "use dynamic debug to implement drm.debug"
- default n
- depends on BROKEN
- depends on DRM
- depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
- depends on JUMP_LABEL
- help
- Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
- Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
- bytes per callsite, the .data costs can be substantial, and
- are therefore configurable.
-
-config DRM_KUNIT_TEST_HELPERS
- tristate
- depends on DRM && KUNIT
- select DRM_KMS_HELPER
- help
- KUnit Helpers for KMS drivers.
-
-config DRM_KUNIT_TEST
- tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT && MMU
- select DRM_BUDDY
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_STATE_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_EXEC
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_GEM_SHMEM_HELPER
- select DRM_KUNIT_TEST_HELPERS
- select DRM_LIB_RANDOM
- select PRIME_NUMBERS
- default KUNIT_ALL_TESTS
- help
- This builds unit tests for DRM. This option is not useful for
- distributions or general kernels, but only for kernel
- developers working on DRM and associated drivers.
-
- For more information on KUnit and unit tests in general,
- please refer to the KUnit documentation in
- Documentation/dev-tools/kunit/.
-
- If in doubt, say "N".
-
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -247,23 +193,6 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
-config DRM_TTM_KUNIT_TEST
- tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
- default n
- depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
- select DRM_TTM
- select DRM_BUDDY
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_KUNIT_TEST_HELPERS
- default KUNIT_ALL_TESTS
- help
- Enables unit tests for TTM, a GPU memory manager subsystem used
- to manage memory buffers. This option is mostly useful for kernel
- developers. It depends on (UML || COMPILE_TEST) since no other driver
- which uses TTM can be loaded while running the tests.
-
- If in doubt, say "N".
-
config DRM_EXEC
tristate
depends on DRM
@@ -335,6 +264,8 @@ config DRM_SCHED
tristate
depends on DRM
+source "drivers/gpu/drm/sysfb/Kconfig"
+
source "drivers/gpu/drm/arm/Kconfig"
source "drivers/gpu/drm/radeon/Kconfig"
@@ -343,6 +274,8 @@ source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
+source "drivers/gpu/drm/nova/Kconfig"
+
source "drivers/gpu/drm/i915/Kconfig"
source "drivers/gpu/drm/xe/Kconfig"
@@ -454,6 +387,8 @@ source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+source "drivers/gpu/drm/sitronix/Kconfig"
+
source "drivers/gpu/drm/solomon/Kconfig"
source "drivers/gpu/drm/sprd/Kconfig"
@@ -474,9 +409,6 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-config DRM_EXPORT_FOR_TESTS
- bool
-
# Separate option as not all DRM drivers use it
config DRM_PANEL_BACKLIGHT_QUIRKS
tristate
@@ -489,31 +421,6 @@ config DRM_PRIVACY_SCREEN
bool
default n
-config DRM_WERROR
- bool "Compile the drm subsystem with warnings as errors"
- depends on DRM && EXPERT
- depends on !WERROR
- default n
- help
- A kernel build should not cause any compiler warnings, and this
- enables the '-Werror' flag to enforce that rule in the drm subsystem.
-
- The drm subsystem enables more warnings than the kernel default, so
- this config option is disabled by default.
-
- If in doubt, say N.
-
-config DRM_HEADER_TEST
- bool "Ensure DRM headers are self-contained and pass kernel-doc"
- depends on DRM && EXPERT && BROKEN
- default n
- help
- Ensure the DRM subsystem headers both under drivers/gpu/drm and
- include/drm compile, are self-contained, have header guards, and have
- no kernel-doc warnings.
-
- If in doubt, say N.
-
endif
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
new file mode 100644
index 000000000000..fa6ee76f4d3c
--- /dev/null
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -0,0 +1,116 @@
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default n
+ depends on BROKEN
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
+config DRM_WERROR
+ bool "Compile the drm subsystem with warnings as errors"
+ depends on DRM && EXPERT
+ depends on !WERROR
+ default n
+ help
+ A kernel build should not cause any compiler warnings, and this
+ enables the '-Werror' flag to enforce that rule in the drm subsystem.
+
+ The drm subsystem enables more warnings than the kernel default, so
+ this config option is disabled by default.
+
+ If in doubt, say N.
+
+config DRM_HEADER_TEST
+ bool "Ensure DRM headers are self-contained and pass kernel-doc"
+ depends on DRM && EXPERT && BROKEN
+ default n
+ help
+ Ensure the DRM subsystem headers both under drivers/gpu/drm and
+ include/drm compile, are self-contained, have header guards, and have
+ no kernel-doc warnings.
+
+ If in doubt, say N.
+
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ select DRM_KMS_HELPER
+ help
+ KUnit Helpers for KMS drivers.
+
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT && MMU
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_BUDDY
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_EXEC
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KUNIT_TEST_HELPERS
+ select DRM_LIB_RANDOM
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for DRM. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
+
+config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
+ select DRM_TTM
+ select DRM_BUDDY
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
+
+ If in doubt, say "N".
+
+config DRM_SCHED_KUNIT_TEST
+ tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS
+ select DRM_SCHED
+ depends on DRM && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Choose this option to build unit tests for the DRM scheduler.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_EXPORT_FOR_TESTS
+ bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ed54a546bbe2..5050ac32bba2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -134,6 +134,7 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
+ drm_bridge_helper.o \
drm_crtc_helper.o \
drm_damage_helper.o \
drm_flip_work.o \
@@ -176,6 +177,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_NOVA) += nova/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
@@ -204,6 +206,7 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
obj-y += mxsfb/
+obj-y += sysfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
@@ -219,6 +222,7 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
+obj-y += sitronix/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
@@ -236,7 +240,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
- $(srctree)/scripts/kernel-doc -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
index ad80542b60ed..2b60128e2c69 100644
--- a/drivers/gpu/drm/adp/adp-mipi.c
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -212,12 +212,13 @@ static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
};
static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adp_mipi_drv_private *adp =
container_of(bridge, struct adp_mipi_drv_private, bridge);
- return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index aacc810cabb3..87080c06e5fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -66,7 +66,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
- amdgpu_cper.o
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -174,7 +174,10 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_mes.o \
mes_v11_0.o \
- mes_v12_0.o
+ mes_v12_0.o \
+
+# add GFX userqueue support
+amdgpu-y += mes_userqueue.o
# add UVD block
amdgpu-y += \
@@ -253,6 +256,8 @@ amdgpu-y += \
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
+# add gfx usermode queue
+amdgpu-y += amdgpu_userq.o
ifneq ($(CONFIG_HSA_AMD),)
AMDKFD_PATH := ../amdkfd
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c3641331d4de..836ea081088a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -113,6 +113,8 @@
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
#include "amdgpu_reg_state.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_eviction_fence.h"
#if defined(CONFIG_DRM_AMD_ISP)
#include "amdgpu_isp.h"
#endif
@@ -228,7 +230,7 @@ extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p;
extern int amdgpu_mtype_local;
-extern bool enforce_isolation;
+extern int amdgpu_enforce_isolation;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
@@ -266,8 +268,10 @@ extern int amdgpu_umsch_mm_fwlog;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
+extern int amdgpu_rebar;
extern int amdgpu_wbrf;
+extern int amdgpu_user_queue;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -488,7 +492,6 @@ struct amdgpu_flip_work {
bool async;
};
-
/*
* file private structure
*/
@@ -501,6 +504,11 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
+ struct amdgpu_userq_mgr userq_mgr;
+
+ /* Eviction fence infra */
+ struct amdgpu_eviction_fence_mgr evf_mgr;
+
/** GPU partition selection */
uint32_t xcp_id;
};
@@ -512,12 +520,62 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
*/
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+/**
+ * amdgpu_wb - This struct is used for small GPU memory allocation.
+ *
+ * This struct is used to allocate a small amount of GPU memory that can be
+ * used to shadow certain states into the memory. This is especially useful for
+ * providing easy CPU access to some states without requiring register access
+ * (e.g., if some block is power gated, reading register may be problematic).
+ *
+ * Note: the term writeback was initially used because many of the amdgpu
+ * components had some level of writeback memory, and this struct initially
+ * described those components.
+ */
struct amdgpu_wb {
+
+ /**
+ * @wb_obj:
+ *
+ * Buffer Object used for the writeback memory.
+ */
struct amdgpu_bo *wb_obj;
+
+ /**
+ * @wb:
+ *
+ * Pointer to the first writeback slot. In terms of CPU address
+ * this value can be accessed directly by using the offset as an index.
+ * For the GPU address, it is necessary to use gpu_addr and the offset.
+ */
volatile uint32_t *wb;
+
+ /**
+ * @gpu_addr:
+ *
+ * Writeback base address in the GPU.
+ */
uint64_t gpu_addr;
- u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
+
+ /**
+ * @num_wb:
+ *
+ * Number of writeback slots reserved for amdgpu.
+ */
+ u32 num_wb;
+
+ /**
+ * @used:
+ *
+ * Track the writeback slot already used.
+ */
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
+
+ /**
+ * @lock:
+ *
+ * Protects read and write of the used field array.
+ */
spinlock_t lock;
};
@@ -551,6 +609,7 @@ struct amdgpu_allowed_register_entry {
* are reset depends on the ASIC. Notably doesn't reset IPs
* shared with the CPU on APUs or the memory controllers (so
* VRAM is not lost). Not available on all ASICs.
+ * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs
* @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
* but without powering off the PCI bus. Suitable only for
* discrete GPUs.
@@ -568,6 +627,7 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
AMD_RESET_METHOD_MODE2,
+ AMD_RESET_METHOD_LINK,
AMD_RESET_METHOD_BACO,
AMD_RESET_METHOD_PCI,
AMD_RESET_METHOD_ON_INIT,
@@ -821,6 +881,11 @@ struct amdgpu_mqd_prop {
uint32_t hqd_queue_priority;
bool allow_tunneling;
bool hqd_active;
+ uint64_t shadow_addr;
+ uint64_t gds_bkup_addr;
+ uint64_t csa_addr;
+ uint64_t fence_address;
+ bool tmz_queue;
};
struct amdgpu_mqd {
@@ -829,6 +894,12 @@ struct amdgpu_mqd {
struct amdgpu_mqd_prop *p);
};
+struct amdgpu_pcie_reset_ctx {
+ bool in_link_reset;
+ bool occurs_dpc;
+ bool audio_suspended;
+};
+
/*
* Custom Init levels could be defined for different situations where a full
* initialization of all hardware blocks are not expected. Sample cases are
@@ -853,6 +924,14 @@ struct amdgpu_init_level {
struct amdgpu_reset_domain;
struct amdgpu_fru_info;
+enum amdgpu_enforce_isolation_mode {
+ AMDGPU_ENFORCE_ISOLATION_DISABLE = 0,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE = 1,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2,
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
+};
+
+
/*
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
*/
@@ -1081,6 +1160,13 @@ struct amdgpu_device {
bool enable_uni_mes;
struct amdgpu_mes mes;
struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
+ const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM];
+
+ /* xarray used to retrieve the user queue fence driver reference
+ * in the EOP interrupt handler to signal the particular user
+ * queue fence.
+ */
+ struct xarray userq_xa;
/* df */
struct amdgpu_df df;
@@ -1160,6 +1246,8 @@ struct amdgpu_device {
struct pci_saved_state *pci_state;
pci_channel_state_t pci_channel_state;
+ struct amdgpu_pcie_reset_ctx pcie_reset_ctx;
+
/* Track auto wait count on s_barrier settings */
bool barrier_has_auto_waitcnt;
@@ -1193,10 +1281,11 @@ struct amdgpu_device {
bool debug_enable_ras_aca;
bool debug_exp_resets;
bool debug_disable_gpu_ring_reset;
+ bool debug_vm_userptr;
/* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
- bool enforce_isolation[MAX_XCP];
+ enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP];
struct amdgpu_isolation {
void *owner;
struct dma_fence *spearhead;
@@ -1210,6 +1299,10 @@ struct amdgpu_device {
* in KFD: VRAM or GTT.
*/
bool apu_prefer_gtt;
+
+ struct list_head userq_mgr_list;
+ struct mutex userq_mutex;
+ bool userq_halt_for_enforce_isolation;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1464,6 +1557,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size);
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
+int amdgpu_device_link_reset(struct amdgpu_device *adev);
bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index b4ad163f42a7..3835f2592914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -120,6 +120,9 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+
+ if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS]))
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
}
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d2ec4130a316..260165bbe373 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2559,6 +2559,18 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (ret != -EFAULT)
return ret;
+ /* If applications unmap memory before destroying the userptr
+ * from the KFD, trigger a segmentation fault in VM debug mode.
+ */
+ if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
+ pid_nr(process_info->pid), mem->va);
+
+ // Send GPU VM fault to user space
+ kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid),
+ mem->va);
+ }
+
ret = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index eb015bdda8a7..c7d32fb216e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -281,6 +281,9 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
case ATOM_DGPU_VRAM_TYPE_GDDR6:
vram_type = AMDGPU_VRAM_TYPE_GDDR6;
break;
+ case ATOM_DGPU_VRAM_TYPE_HBM3E:
+ vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+ break;
default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 68bce6a6d09d..004a6a9d6b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -252,83 +252,22 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
- case CHIP_TAHITI:
- strcpy(fw_name, "radeon/tahiti_smc.bin");
- break;
- case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/pitcairn_smc.bin");
- }
- break;
- case CHIP_VERDE:
- if (((adev->pdev->device == 0x6820) &&
- ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83))) ||
- ((adev->pdev->device == 0x6821) &&
- ((adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87))) ||
- ((adev->pdev->revision == 0x87) &&
- ((adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682b)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/verde_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/verde_smc.bin");
- }
- break;
- case CHIP_OLAND:
- if (((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6600) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605) ||
- (adev->pdev->device == 0x6610))) ||
- ((adev->pdev->revision == 0x83) &&
- (adev->pdev->device == 0x6610))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/oland_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/oland_smc.bin");
- }
- break;
- case CHIP_HAINAN:
- if (((adev->pdev->revision == 0x81) &&
- (adev->pdev->device == 0x6660)) ||
- ((adev->pdev->revision == 0x83) &&
- ((adev->pdev->device == 0x6660) ||
- (adev->pdev->device == 0x6663) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/hainan_k_smc.bin");
- } else if ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665)) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/banks_k_2_smc.bin");
- } else {
- strcpy(fw_name, "radeon/hainan_smc.bin");
- }
- break;
case CHIP_BONAIRE:
if ((adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/bonaire_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/hawaii_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
@@ -338,76 +277,76 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
- strcpy(fw_name, "amdgpu/fiji_smc.bin");
+ strscpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k_smc.bin");
} else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_smc.bin");
}
break;
case CHIP_VEGAM:
- strcpy(fw_name, "amdgpu/vegam_smc.bin");
+ strscpy(fw_name, "amdgpu/vegam_smc.bin");
break;
case CHIP_VEGA10:
if ((adev->pdev->device == 0x687f) &&
((adev->pdev->revision == 0xc0) ||
(adev->pdev->revision == 0xc1) ||
(adev->pdev->revision == 0xc3)))
- strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_acg_smc.bin");
else
- strcpy(fw_name, "amdgpu/vega10_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_smc.bin");
break;
case CHIP_VEGA12:
- strcpy(fw_name, "amdgpu/vega12_smc.bin");
+ strscpy(fw_name, "amdgpu/vega12_smc.bin");
break;
case CHIP_VEGA20:
- strcpy(fw_name, "amdgpu/vega20_smc.bin");
+ strscpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 360e07a5c7c1..5a234eadae8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -549,7 +549,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev)
{
int r;
- if (!amdgpu_aca_is_enabled(adev))
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
return 0;
r = amdgpu_cper_ring_init(adev);
@@ -568,7 +568,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev)
int amdgpu_cper_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_aca_is_enabled(adev))
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
return 0;
adev->cper.enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82df06a72ee0..9ea0d9b71f48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -296,7 +296,25 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
num_ibs[i], &p->jobs[i]);
if (ret)
goto free_all_kdata;
- p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
+ switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
+ case AMDGPU_ENFORCE_ISOLATION_DISABLE:
+ default:
+ p->jobs[i]->enforce_isolation = false;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = true;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ }
}
p->gang_leader = p->jobs[p->gang_leader_idx];
@@ -349,6 +367,10 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
ring = amdgpu_job_ring(job);
ib = &job->ibs[job->num_ibs++];
+ /* submissions to kernel queues are disabled */
+ if (ring->no_user_submission)
+ return -EINVAL;
+
/* MM engine doesn't support user fences */
if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a1450f13d963..8e626f50b362 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2105,6 +2105,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
+ amdgpu_psp_debugfs_init(adev);
debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f8b3e04d71ed..4d1b54f58495 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -85,6 +85,7 @@
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#endif
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
@@ -1680,6 +1681,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ if (!amdgpu_rebar)
+ return 0;
+
/* resizing on Dell G5 SE platforms causes problems with runtime pm */
if ((amdgpu_runtime_pm != 0) &&
adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
@@ -1870,6 +1874,35 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
return true;
}
+static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
+ return false;
+
+ if (c->x86 == 6 &&
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
+ switch (c->x86_model) {
+ case VFM_MODEL(INTEL_ALDERLAKE):
+ case VFM_MODEL(INTEL_ALDERLAKE_L):
+ case VFM_MODEL(INTEL_RAPTORLAKE):
+ case VFM_MODEL(INTEL_RAPTORLAKE_P):
+ case VFM_MODEL(INTEL_RAPTORLAKE_S):
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
/**
* amdgpu_device_should_use_aspm - check if the device should program ASPM
*
@@ -1894,7 +1927,7 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
}
if (adev->flags & AMD_IS_APU)
return false;
- if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
+ if (amdgpu_device_aspm_support_quirk(adev))
return false;
return pcie_aspm_enabled(adev->pdev);
}
@@ -2112,8 +2145,31 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- for (i = 0; i < MAX_XCP; i++)
- adev->enforce_isolation[i] = !!enforce_isolation;
+ for (i = 0; i < MAX_XCP; i++) {
+ switch (amdgpu_enforce_isolation) {
+ case -1:
+ case 0:
+ default:
+ /* disable */
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ /* enable */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ /* enable legacy mode */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ /* enable only process isolation without submitting cleaner shader */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
return 0;
}
@@ -2689,6 +2745,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
}
+ /* Check for IP version 9.4.3 with A0 hardware */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ !amdgpu_device_get_rev_id(adev)) {
+ dev_err(adev->dev, "Unsupported A0 hardware\n");
+ return -ENODEV; /* device unsupported - no device error */
+ }
+
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
@@ -2701,7 +2764,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}
-
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -3172,6 +3234,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
return true;
@@ -3455,6 +3518,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_userq_suspend(adev);
/* Workaround for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
@@ -4307,9 +4371,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_sync_create(&adev->isolation[i].active);
amdgpu_sync_create(&adev->isolation[i].prev);
}
- mutex_init(&adev->gfx.kfd_sch_mutex);
+ mutex_init(&adev->gfx.userq_sch_mutex);
mutex_init(&adev->gfx.workload_profile_mutex);
mutex_init(&adev->vcn.workload_profile_mutex);
+ mutex_init(&adev->userq_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4329,12 +4394,16 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->virt.rlcg_reg_lock);
spin_lock_init(&adev->wb.lock);
+ xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
+
INIT_LIST_HEAD(&adev->reset_list);
INIT_LIST_HEAD(&adev->ras_list);
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
+ INIT_LIST_HEAD(&adev->userq_mgr_list);
+
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -5003,8 +5072,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
- if (!adev->in_s0ix)
+ if (!adev->in_s0ix) {
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ amdgpu_userq_suspend(adev);
+ }
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -5071,6 +5142,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r)
goto exit;
+
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
}
r = amdgpu_device_ip_late_init(adev);
@@ -5119,9 +5194,6 @@ exit:
}
adev->in_suspend = false;
- if (adev->enable_mes)
- amdgpu_mes_self_test(adev);
-
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");
@@ -5502,6 +5574,29 @@ mode1_reset_failed:
return ret;
}
+int amdgpu_device_link_reset(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU link reset\n");
+
+ if (!adev->pcie_reset_ctx.occurs_dpc)
+ ret = amdgpu_dpm_link_reset(adev);
+
+ if (ret)
+ goto link_reset_failed;
+
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto link_reset_failed;
+
+ return 0;
+
+link_reset_failed:
+ dev_err(adev->dev, "GPU link reset failed\n");
+ return ret;
+}
+
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
@@ -5806,6 +5901,7 @@ static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
+ case AMD_RESET_METHOD_LINK:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
break;
case AMD_RESET_METHOD_MODE2:
@@ -5922,94 +6018,42 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
return ret;
}
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu_device pointer
- * @job: which job trigger hang
- * @reset_context: amdgpu reset context pointer
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Attempt to do soft-reset or full-reset and reinitialize Asic
- * Returns 0 for success or an error on failure.
- */
-
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context)
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
{
- struct list_head device_list, *device_list_handle = NULL;
- bool job_signaled = false;
- struct amdgpu_hive_info *hive = NULL;
+ struct list_head *device_list_handle = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
- bool need_emergency_restart = false;
- bool audio_suspended = false;
- int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
/*
- * If it reaches here because of hang/timeout and a RAS error is
- * detected at the same time, let RAS recovery take care of it.
- */
- if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
- !amdgpu_sriov_vf(adev) &&
- reset_context->src != AMDGPU_RESET_SRC_RAS) {
- dev_dbg(adev->dev,
- "Gpu recovery from source: %d yielding to RAS error recovery handling",
- reset_context->src);
- return 0;
- }
- /*
- * Special case: RAS triggered and full reset isn't supported
- */
- need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
-
- /*
- * Flush RAM to disk so that after reboot
- * the user can read log and see why the system rebooted.
- */
- if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
- amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
-
- ksys_sync_helper();
- emergency_restart();
- }
-
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
-
- if (!amdgpu_sriov_vf(adev))
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive)
- mutex_lock(&hive->hive_lock);
-
- reset_context->job = job;
- reset_context->hive = hive;
- /*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
* to put adev in the 1st position.
*/
- INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- list_add_tail(&tmp_adev->reset_list, &device_list);
+ list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
- if (!list_is_first(&adev->reset_list, &device_list))
- list_rotate_to_front(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ if (!list_is_first(&adev->reset_list, device_list))
+ list_rotate_to_front(&adev->reset_list, device_list);
+ device_list_handle = device_list;
} else {
- list_add_tail(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ list_add_tail(&adev->reset_list, device_list);
+ device_list_handle = device_list;
}
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
r = amdgpu_device_health_check(device_list_handle);
if (r)
- goto end_reset;
+ return r;
}
/* We need to lock reset domain only once both for XGMI and single device */
@@ -6033,7 +6077,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* some audio codec errors.
*/
if (!amdgpu_device_suspend_display_audio(tmp_adev))
- audio_suspended = true;
+ tmp_adev->pcie_reset_ctx.audio_suspended = true;
amdgpu_ras_set_error_query_ready(tmp_adev, false);
@@ -6051,6 +6095,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* disable ras on ALL IPs */
if (!need_emergency_restart &&
+ (!adev->pcie_reset_ctx.occurs_dpc) &&
amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
@@ -6068,24 +6113,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
atomic_inc(&tmp_adev->gpu_reset_counter);
}
- if (need_emergency_restart)
- goto skip_sched_resume;
+ return r;
+}
- /*
- * Must check guilty signal here since after this point all old
- * HW fences are force signaled.
- *
- * job->base holds a reference to parent fence
- */
- if (job && dma_fence_is_signaled(&job->hw_fence)) {
- job_signaled = true;
- dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
- goto skip_hw_reset;
- }
+static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
+ int r = 0;
retry: /* Rest of adevs pre asic reset from XGMI hive. */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->no_hw_access = false;
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -6097,6 +6142,11 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
+
+ /* Bail out of reset early */
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
amdgpu_ras_set_fed(adev, true);
@@ -6111,12 +6161,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(device_list_handle, reset_context);
+ r = amdgpu_do_asic_reset(device_list, reset_context);
if (r && r == -EAGAIN)
goto retry;
}
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/*
* Drop any pending non scheduler resets queued before reset is done.
* Any reset scheduled after this point would be valid. Scheduler resets
@@ -6126,10 +6176,18 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_stop_pending_resets(tmp_adev);
}
-skip_hw_reset:
+ return r;
+}
+
+static int amdgpu_device_sched_resume(struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context,
+ bool job_signaled)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i, r = 0;
/* Post ASIC reset for all devs .*/
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -6165,8 +6223,16 @@ skip_hw_reset:
}
}
-skip_sched_resume:
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ return r;
+}
+
+static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
@@ -6177,18 +6243,114 @@ skip_sched_resume:
if (!adev->kfd.init_complete)
amdgpu_amdkfd_device_init(adev);
- if (audio_suspended)
+ if (tmp_adev->pcie_reset_ctx.audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unset_mp1_state(tmp_adev);
amdgpu_ras_set_error_query_ready(tmp_adev, true);
+
}
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ tmp_adev = list_first_entry(device_list, struct amdgpu_device,
reset_list);
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu_device pointer
+ * @job: which job trigger hang
+ * @reset_context: amdgpu reset context pointer
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head device_list;
+ bool job_signaled = false;
+ struct amdgpu_hive_info *hive = NULL;
+ int r = 0;
+ bool need_emergency_restart = false;
+
+ /*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+
+ /*
+ * Special case: RAS triggered and full reset isn't supported
+ */
+ need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
+ amdgpu_ras_get_context(adev)->reboot) {
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
+
+ dev_info(adev->dev, "GPU %s begin!\n",
+ need_emergency_restart ? "jobs stop":"reset");
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+
+ reset_context->job = job;
+ reset_context->hive = hive;
+ INIT_LIST_HEAD(&device_list);
+
+ r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
+ if (r)
+ goto end_reset;
+
+ if (need_emergency_restart)
+ goto skip_sched_resume;
+ /*
+ * Must check guilty signal here since after this point all old
+ * HW fences are force signaled.
+ *
+ * job->base holds a reference to parent fence
+ */
+ if (job && dma_fence_is_signaled(&job->hw_fence)) {
+ job_signaled = true;
+ dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+ goto skip_hw_reset;
+ }
+
+ r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
+ if (r)
+ goto end_reset;
+skip_hw_reset:
+ r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
+ if (r)
+ goto end_reset;
+skip_sched_resume:
+ amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -6572,12 +6734,15 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_reset_context reset_context;
+ struct list_head device_list;
+ int r = 0;
- DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
+ dev_info(adev->dev, "PCI error: detected callback!!\n");
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- DRM_WARN("No support for XGMI hive yet...");
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev, "No support for XGMI hive yet...\n");
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6585,32 +6750,30 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
switch (state) {
case pci_channel_io_normal:
+ dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
return PCI_ERS_RESULT_CAN_RECOVER;
- /* Fatal error, prepare for slot reset */
case pci_channel_io_frozen:
- /*
- * Locking adev->reset_domain->sem will prevent any external access
- * to GPU during PCI error recovery
- */
- amdgpu_device_lock_reset_domain(adev->reset_domain);
- amdgpu_device_set_mp1_state(adev);
-
- /*
- * Block any work scheduling as we do for regular GPU reset
- * for the duration of the recovery
- */
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!amdgpu_ring_sched_ready(ring))
- continue;
-
- drm_sched_stop(&ring->sched, NULL);
+ /* Fatal error, prepare for slot reset */
+ dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ adev->pcie_reset_ctx.occurs_dpc = true;
+ memset(&reset_context, 0, sizeof(reset_context));
+ INIT_LIST_HEAD(&device_list);
+
+ r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
- atomic_inc(&adev->gpu_reset_counter);
+ if (r)
+ return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
+ dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6623,8 +6786,10 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
*/
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
- DRM_INFO("PCI error: mmio enabled callback!!\n");
+ dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
@@ -6648,10 +6813,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int r, i;
struct amdgpu_reset_context reset_context;
- u32 memsize;
+ struct amdgpu_device *tmp_adev;
+ struct amdgpu_hive_info *hive;
struct list_head device_list;
+ int r = 0, i;
+ u32 memsize;
/* PCI error slot reset should be skipped During RAS recovery */
if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
@@ -6659,15 +6826,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
amdgpu_ras_in_recovery(adev))
return PCI_ERS_RESULT_RECOVERED;
- DRM_INFO("PCI error: slot reset callback!!\n");
+ dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->reset_list, &device_list);
-
/* wait for asic to come out of reset */
- msleep(500);
+ msleep(700);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -6688,26 +6852,40 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
-
- adev->no_hw_access = true;
- r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- adev->no_hw_access = false;
- if (r)
- goto out;
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+ INIT_LIST_HEAD(&device_list);
- r = amdgpu_do_asic_reset(&device_list, &reset_context);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ reset_context.hive = hive;
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else {
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ list_add_tail(&adev->reset_list, &device_list);
+ }
+ r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
out:
if (!r) {
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(adev->pdev);
-
- DRM_INFO("PCIe error recovery succeeded\n");
+ dev_info(adev->dev, "PCIe error recovery succeeded\n");
} else {
- DRM_ERROR("PCIe error recovery failed, err:%d", r);
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
+ if (hive) {
+ list_for_each_entry(tmp_adev, &device_list, reset_list)
+ amdgpu_device_unset_mp1_state(tmp_adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ }
+ }
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
@@ -6724,26 +6902,36 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
-
+ struct list_head device_list;
+ struct amdgpu_hive_info *hive = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
- DRM_INFO("PCI error: resume callback!!\n");
+ dev_info(adev->dev, "PCI error: resume callback!!\n");
/* Only continue execution for the case of pci_channel_io_frozen */
if (adev->pci_channel_state != pci_channel_io_frozen)
return;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
+ INIT_LIST_HEAD(&device_list);
- if (!amdgpu_ring_sched_ready(ring))
- continue;
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = false;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else
+ list_add_tail(&adev->reset_list, &device_list);
- drm_sched_start(&ring->sched, 0);
- }
+ amdgpu_device_sched_resume(&device_list, NULL, NULL);
+ amdgpu_device_gpu_resume(adev, &device_list, false);
+ adev->pcie_reset_ctx.occurs_dpc = false;
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
}
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 72c807f5822e..4ddd08ce8885 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -51,6 +51,8 @@
#include "amdgpu_reset.h"
#include "amdgpu_sched.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
#include "../amdxcp/amdgpu_xcp_drv.h"
/*
@@ -123,9 +125,10 @@
* - 3.61.0 - Contains fix for RV/PCO compute queues
* - 3.62.0 - Add AMDGPU_IDS_FLAGS_MODE_PF, AMDGPU_IDS_FLAGS_MODE_VF & AMDGPU_IDS_FLAGS_MODE_PT
* - 3.63.0 - GFX12 display DCC supports 256B max compressed block size
+ * - 3.64.0 - Userq IP support query
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 63
+#define KMS_DRIVER_MINOR 64
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -140,6 +143,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
AMDGPU_DEBUG_SMU_POOL = BIT(7),
+ AMDGPU_DEBUG_VM_USERPTR = BIT(8),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -176,7 +180,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
-bool enforce_isolation;
+int amdgpu_enforce_isolation = -1;
int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer
@@ -238,6 +242,8 @@ int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
+int amdgpu_rebar = -1; /* auto */
+int amdgpu_user_queue = -1;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -1033,11 +1039,13 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
/**
- * DOC: enforce_isolation (bool)
- * enforce process isolation between graphics and compute via using the same reserved vmid.
+ * DOC: enforce_isolation (int)
+ * enforce process isolation between graphics and compute.
+ * (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)
*/
-module_param(enforce_isolation, bool, 0444);
-MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
+module_param_named(enforce_isolation, amdgpu_enforce_isolation, int, 0444);
+MODULE_PARM_DESC(enforce_isolation,
+"enforce process isolation between graphics and compute. (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)");
/**
* DOC: modeset (int)
@@ -1096,6 +1104,28 @@ MODULE_PARM_DESC(wbrf,
"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
module_param_named(wbrf, amdgpu_wbrf, int, 0444);
+/**
+ * DOC: rebar (int)
+ * Allow BAR resizing. Disable this to prevent the driver from attempting
+ * to resize the BAR if the GPU supports it and there is available MMIO space.
+ * Note that this just prevents the driver from resizing the BAR. The BIOS
+ * may have already resized the BAR at boot time.
+ */
+MODULE_PARM_DESC(rebar, "Resizable BAR (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(rebar, amdgpu_rebar, int, 0444);
+
+/**
+ * DOC: user_queue (int)
+ * Enable user queues on systems that support user queues. Possible values:
+ *
+ * - -1 = auto (ASIC specific default)
+ * - 0 = user queues disabled
+ * - 1 = user queues enabled and kernel queues enabled (if supported)
+ * - 2 = user queues enabled and kernel queues disabled
+ */
+MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
+module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -2244,6 +2274,10 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: use vram for smu pool\n");
adev->pm.smu_debug_mask |= SMU_DEBUG_POOL_USE_VRAM;
}
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_VM_USERPTR) {
+ pr_info("debug: VM mode debug for userptr is enabled\n");
+ adev->debug_vm_userptr = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2700,6 +2734,29 @@ static int amdgpu_runtime_idle_check_display(struct device *dev)
return 0;
}
+static int amdgpu_runtime_idle_check_userq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+done:
+ mutex_unlock(&adev->userq_mutex);
+
+ return ret;
+}
+
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2715,6 +2772,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
ret = amdgpu_runtime_idle_check_display(dev);
if (ret)
return ret;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+ if (ret)
+ return ret;
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -2836,12 +2896,30 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
}
ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ goto done;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+done:
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return ret;
}
+static int amdgpu_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+ if (fpriv) {
+ fpriv->evf_mgr.fd_closing = true;
+ amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
+ }
+
+ return drm_release(inode, filp);
+}
+
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
@@ -2893,7 +2971,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.flush = amdgpu_flush,
- .release = drm_release,
+ .release = amdgpu_drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
@@ -2940,6 +3018,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -3030,6 +3111,10 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
+ r = amdgpu_userq_fence_slab_init();
+ if (r)
+ goto error_fence;
+
DRM_INFO("amdgpu kernel modesetting enabled.\n");
amdgpu_register_atpx_handler();
amdgpu_acpi_detect();
@@ -3061,6 +3146,7 @@ static void __exit amdgpu_exit(void)
amdgpu_acpi_release();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
+ amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
new file mode 100644
index 000000000000..73b629b5f56f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/sched.h>
+#include <drm/drm_exec.h>
+#include "amdgpu.h"
+
+#define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name)
+#define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr)
+
+static const char *
+amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu_eviction_fence";
+}
+
+static const char *
+amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence *ef;
+
+ ef = container_of(f, struct amdgpu_eviction_fence, base);
+ return ef->timeline_name;
+}
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec)
+{
+ struct amdgpu_eviction_fence *old_ef, *new_ef;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ if (evf_mgr->ev_fence &&
+ !dma_fence_is_signaled(&evf_mgr->ev_fence->base))
+ return 0;
+ /*
+ * Steps to replace eviction fence:
+ * * lock all objects in exec (caller)
+ * * create a new eviction fence
+ * * update new eviction fence in evf_mgr
+ * * attach the new eviction fence to BOs
+ * * release the old fence
+ * * unlock the objects (caller)
+ */
+ new_ef = amdgpu_eviction_fence_create(evf_mgr);
+ if (!new_ef) {
+ DRM_ERROR("Failed to create new eviction fence\n");
+ return -ENOMEM;
+ }
+
+ /* Update the eviction fence now */
+ spin_lock(&evf_mgr->ev_fence_lock);
+ old_ef = evf_mgr->ev_fence;
+ evf_mgr->ev_fence = new_ef;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ /* Attach the new fence */
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ if (!bo)
+ continue;
+ ret = amdgpu_eviction_fence_attach(evf_mgr, bo);
+ if (ret) {
+ DRM_ERROR("Failed to attch new eviction fence\n");
+ goto free_err;
+ }
+ }
+
+ /* Free old fence */
+ if (old_ef)
+ dma_fence_put(&old_ef->base);
+ return 0;
+
+free_err:
+ kfree(new_ef);
+ return ret;
+}
+
+static void
+amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
+ struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ ev_fence = evf_mgr->ev_fence;
+ if (!ev_fence)
+ goto unlock;
+
+ amdgpu_userq_evict(uq_mgr, ev_fence);
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ if (!f)
+ return true;
+
+ ev_fence = to_ev_fence(f);
+ evf_mgr = ev_fence->evf_mgr;
+
+ schedule_delayed_work(&evf_mgr->suspend_work, 0);
+ return true;
+}
+
+static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = amdgpu_eviction_fence_get_driver_name,
+ .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
+ .enable_signaling = amdgpu_eviction_fence_enable_signaling,
+};
+
+void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ spin_lock(&evf_mgr->ev_fence_lock);
+ dma_fence_signal(&ev_fence->base);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+}
+
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
+ if (!ev_fence)
+ return NULL;
+
+ ev_fence->evf_mgr = evf_mgr;
+ get_task_comm(ev_fence->timeline_name, current);
+ spin_lock_init(&ev_fence->lock);
+ dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
+ return ev_fence;
+}
+
+void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ /* Wait for any pending work to execute */
+ flush_delayed_work(&evf_mgr->suspend_work);
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ if (!ev_fence)
+ return;
+
+ dma_fence_wait(&ev_fence->base, false);
+
+ /* Last unref of ev_fence */
+ dma_fence_put(&ev_fence->base);
+}
+
+int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ int ret;
+
+ if (!resv)
+ return 0;
+
+ ret = dma_resv_reserve_fences(resv, 1);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to resv fence space\n");
+ return ret;
+ }
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ return 0;
+}
+
+void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct dma_fence *stub = dma_fence_get_stub();
+
+ dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx,
+ stub, DMA_RESV_USAGE_BOOKKEEP);
+ dma_fence_put(stub);
+}
+
+int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ /* This needs to be done one time per open */
+ atomic_set(&evf_mgr->ev_fence_seq, 0);
+ evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
+ spin_lock_init(&evf_mgr->ev_fence_lock);
+
+ INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
new file mode 100644
index 000000000000..fcd867b7147d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_EV_FENCE_H_
+#define AMDGPU_EV_FENCE_H_
+
+struct amdgpu_eviction_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+ char timeline_name[TASK_COMM_LEN];
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+};
+
+struct amdgpu_eviction_fence_mgr {
+ u64 ev_fence_ctx;
+ atomic_t ev_fence_seq;
+ spinlock_t ev_fence_lock;
+ struct amdgpu_eviction_fence *ev_fence;
+ struct delayed_work suspend_work;
+ uint8_t fd_closing;
+};
+
+/* Eviction fence helper functions */
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+int
+amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+void
+amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+int
+amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 69429df09477..2c68118fe9fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,6 +36,7 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
@@ -44,6 +45,114 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_vm.h"
+static int
+amdgpu_gem_add_input_fence(struct drm_file *filp,
+ uint64_t syncobj_handles_array,
+ uint32_t num_syncobj_handles)
+{
+ struct dma_fence *fence;
+ uint32_t *syncobj_handles;
+ int ret, i;
+
+ if (!num_syncobj_handles)
+ return 0;
+
+ syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array),
+ sizeof(uint32_t) * num_syncobj_handles);
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ for (i = 0; i < num_syncobj_handles; i++) {
+
+ if (!syncobj_handles[i]) {
+ ret = -EINVAL;
+ goto free_memdup;
+ }
+
+ ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence);
+ if (ret)
+ goto free_memdup;
+
+ dma_fence_wait(fence, false);
+
+ /* TODO: optimize async handling */
+ dma_fence_put(fence);
+ }
+
+free_memdup:
+ kfree(syncobj_handles);
+ return ret;
+}
+
+static int
+amdgpu_gem_update_timeline_node(struct drm_file *filp,
+ uint32_t syncobj_handle,
+ uint64_t point,
+ struct drm_syncobj **syncobj,
+ struct dma_fence_chain **chain)
+{
+ if (!syncobj_handle)
+ return 0;
+
+ /* Find the sync object */
+ *syncobj = drm_syncobj_find(filp, syncobj_handle);
+ if (!*syncobj)
+ return -ENOENT;
+
+ if (!point)
+ return 0;
+
+ /* Allocate the chain node */
+ *chain = dma_fence_chain_alloc();
+ if (!*chain) {
+ drm_syncobj_put(*syncobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+amdgpu_gem_update_bo_mapping(struct drm_file *filp,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation,
+ uint64_t point,
+ struct dma_fence *fence,
+ struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain)
+{
+ struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct dma_fence *last_update;
+
+ if (!syncobj)
+ return;
+
+ /* Find the last update fence */
+ switch (operation) {
+ case AMDGPU_VA_OP_MAP:
+ case AMDGPU_VA_OP_REPLACE:
+ if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv))
+ last_update = vm->last_update;
+ else
+ last_update = bo_va->last_pt_update;
+ break;
+ case AMDGPU_VA_OP_UNMAP:
+ case AMDGPU_VA_OP_CLEAR:
+ last_update = fence;
+ break;
+ default:
+ return;
+ }
+
+ /* Add fence to timeline */
+ if (!point)
+ drm_syncobj_replace_fence(syncobj, last_update);
+ else
+ drm_syncobj_add_point(syncobj, chain, last_update, point);
+}
+
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -184,6 +293,15 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
else
++bo_va->ref_count;
+
+ /* attach gfx eviction fence */
+ r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo);
+ if (r) {
+ DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n");
+ amdgpu_bo_unreserve(abo);
+ return r;
+ }
+
amdgpu_bo_unreserve(abo);
/* Validate and add eviction fence to DMABuf imports with dynamic
@@ -247,6 +365,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
}
+ if (!amdgpu_vm_is_bo_always_valid(vm, bo))
+ amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo);
+
bo_va = amdgpu_vm_bo_find(vm, bo);
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
@@ -321,10 +442,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle, initial_domain;
int r;
- /* reject DOORBELLs until userspace code to use it is available */
- if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
- return -EINVAL;
-
/* reject invalid gem flags */
if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
@@ -638,18 +755,23 @@ out:
*
* Update the bo_va directly after setting its address. Errors are not
* vital here, so they are not reported back to userspace.
+ *
+ * Returns resulting fence if freed BO(s) got cleared from the PT.
+ * otherwise stub fence in case of error.
*/
-static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo_va *bo_va,
- uint32_t operation)
+static struct dma_fence *
+amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
+ struct dma_fence *fence = dma_fence_get_stub();
int r;
if (!amdgpu_vm_ready(vm))
- return;
+ return fence;
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (r)
goto error;
@@ -665,6 +787,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+
+ return fence;
}
/**
@@ -713,6 +837,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence_chain *timeline_chain = NULL;
+ struct dma_fence *fence;
struct drm_exec exec;
uint64_t va_flags;
uint64_t vm_size;
@@ -774,6 +901,12 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
abo = NULL;
}
+ r = amdgpu_gem_add_input_fence(filp,
+ args->input_fence_syncobj_handles,
+ args->num_syncobj_handles);
+ if (r)
+ goto error_put_gobj;
+
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
@@ -802,6 +935,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = NULL;
}
+ r = amdgpu_gem_update_timeline_node(filp,
+ args->vm_timeline_syncobj_out,
+ args->vm_timeline_point,
+ &timeline_syncobj,
+ &timeline_chain);
+ if (r)
+ goto error;
+
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
@@ -827,12 +968,24 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
- args->operation);
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
+ fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
+ args->operation);
+
+ if (timeline_syncobj)
+ amdgpu_gem_update_bo_mapping(filp, bo_va,
+ args->operation,
+ args->vm_timeline_point,
+ fence, timeline_syncobj,
+ timeline_chain);
+ else
+ dma_fence_put(fence);
+
+ }
error:
drm_exec_fini(&exec);
+error_put_gobj:
drm_gem_object_put(gobj);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index cf2df7790077..1db1e6ec0184 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -74,14 +75,15 @@ bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
}
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
- int me, int pipe, int queue)
+static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
+ int me, int pipe, int queue)
{
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
int bit = 0;
bit += me * adev->gfx.me.num_pipe_per_me
- * adev->gfx.me.num_queue_per_pipe;
- bit += pipe * adev->gfx.me.num_queue_per_pipe;
+ * num_queue_per_pipe;
+ bit += pipe * num_queue_per_pipe;
bit += queue;
return bit;
@@ -238,8 +240,8 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
- int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
- adev->gfx.me.num_queue_per_pipe;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+ int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
if (multipipe_policy) {
/* policy: amdgpu owns the first queue per pipe at this stage
@@ -247,9 +249,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
for (i = 0; i < max_queues_per_me; i++) {
pipe = i % adev->gfx.me.num_pipe_per_me;
queue = (i / adev->gfx.me.num_pipe_per_me) %
- adev->gfx.me.num_queue_per_pipe;
+ num_queue_per_pipe;
- set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
+ set_bit(pipe * num_queue_per_pipe + queue,
adev->gfx.me.queue_bitmap);
}
} else {
@@ -258,8 +260,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
}
/* update the number of active graphics rings */
- adev->gfx.num_gfx_rings =
- bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
+ if (adev->gfx.num_gfx_rings)
+ adev->gfx.num_gfx_rings =
+ bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
}
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
@@ -1351,6 +1354,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int mode;
+ /* Only minimal precaution taken to reject requests while in reset.*/
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE);
@@ -1394,8 +1401,14 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
return -EINVAL;
}
+ /* Don't allow a switch while under reset */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EPERM;
+
ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
+ up_read(&adev->reset_domain->sem);
+
if (ret)
return ret;
@@ -1466,6 +1479,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
job->enforce_isolation = true;
+ /* always run the cleaner shader */
+ job->run_cleaner_shader = true;
ib = &job->ibs[0];
for (i = 0; i <= ring->funcs->align_mask; ++i)
@@ -1552,6 +1567,9 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ if (adev->gfx.disable_kq)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1594,7 +1612,8 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
* Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
* feature for each GPU partition. Reading from the 'enforce_isolation'
* sysfs file returns the isolation settings for all partitions, where '0'
- * indicates disabled and '1' indicates enabled.
+ * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
+ * and '3' indicates enabled without cleaner shader.
*
* Return: The number of bytes read from the sysfs file.
*/
@@ -1629,9 +1648,12 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
* @count: The size of the input data
*
* This function allows control over the 'enforce_isolation' feature, which
- * serializes access to the graphics engine. Writing '1' or '0' to the
- * 'enforce_isolation' sysfs file enables or disables process isolation for
- * each partition. The input should specify the setting for all partitions.
+ * serializes access to the graphics engine. Writing '0' to disable, '1' to
+ * enable isolation with cleaner shader, '2' to enable legacy isolation without
+ * cleaner shader, or '3' to enable process isolation without submitting the
+ * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
+ * for each partition. The input should specify the setting for all
+ * partitions.
*
* Return: The number of bytes written to the sysfs file.
*/
@@ -1668,13 +1690,34 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
return -EINVAL;
for (i = 0; i < num_partitions; i++) {
- if (partition_values[i] != 0 && partition_values[i] != 1)
+ if (partition_values[i] != 0 &&
+ partition_values[i] != 1 &&
+ partition_values[i] != 2 &&
+ partition_values[i] != 3)
return -EINVAL;
}
mutex_lock(&adev->enforce_isolation_mutex);
- for (i = 0; i < num_partitions; i++)
- adev->enforce_isolation[i] = partition_values[i];
+ for (i = 0; i < num_partitions; i++) {
+ switch (partition_values[i]) {
+ case 0:
+ default:
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
mutex_unlock(&adev->enforce_isolation_mutex);
amdgpu_mes_update_enforce_isolation(adev);
@@ -1923,39 +1966,41 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
bool enable)
{
- mutex_lock(&adev->gfx.kfd_sch_mutex);
+ mutex_lock(&adev->gfx.userq_sch_mutex);
if (enable) {
/* If the count is already 0, it means there's an imbalance bug somewhere.
* Note that the bug may be in a different caller than the one which triggers the
* WARN_ON_ONCE.
*/
- if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+ if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
goto unlock;
}
- adev->gfx.kfd_sch_req_count[idx]--;
+ adev->gfx.userq_sch_req_count[idx]--;
- if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
- adev->gfx.kfd_sch_inactive[idx]) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0 &&
+ adev->gfx.userq_sch_inactive[idx]) {
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
}
} else {
- if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0) {
cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
- if (!adev->gfx.kfd_sch_inactive[idx]) {
- amdgpu_amdkfd_stop_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = true;
+ if (!adev->gfx.userq_sch_inactive[idx]) {
+ amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = true;
}
}
- adev->gfx.kfd_sch_req_count[idx]++;
+ adev->gfx.userq_sch_req_count[idx]++;
}
unlock:
- mutex_unlock(&adev->gfx.kfd_sch_mutex);
+ mutex_unlock(&adev->gfx.userq_sch_mutex);
}
/**
@@ -2000,12 +2045,13 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
msecs_to_jiffies(1));
} else {
/* Tell KFD to resume the runqueue */
- if (adev->kfd.init_complete) {
- WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
- WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
+ WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
+
+ amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
amdgpu_amdkfd_start_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = false;
- }
+ adev->gfx.userq_sch_inactive[idx] = false;
}
mutex_unlock(&adev->enforce_isolation_mutex);
}
@@ -2029,7 +2075,7 @@ amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
bool wait = false;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
/* set the initial values if nothing is set */
if (!adev->gfx.enforce_isolation_jiffies[idx]) {
adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
@@ -2096,7 +2142,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
sched_work = true;
}
@@ -2133,7 +2179,7 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
return;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
sched_work = true;
}
@@ -2217,6 +2263,74 @@ void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
}
+/**
+ * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble setup.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
+{
+ u32 count = 0;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_data_parser - Parser CS data
+ *
+ * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
+{
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ u32 i;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ }
+ }
+ }
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ */
+void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
+{
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
/*
* debugfs for to enable/disable gfx job submission to specific core.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 87e862188766..08f268dab8f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -170,10 +170,46 @@ struct amdgpu_kiq {
#define AMDGPU_GFX_MAX_SE 4
#define AMDGPU_GFX_MAX_SH_PER_SE 2
+/**
+ * amdgpu_rb_config - Configure a single Render Backend (RB)
+ *
+ * Bad RBs are fused off and there is a harvest register the driver reads to
+ * determine which RB(s) are fused off so that the driver can configure the
+ * hardware state so that nothing gets sent to them. There are also user
+ * harvest registers that the driver can program to disable additional RBs,
+ * etc., for testing purposes.
+ */
struct amdgpu_rb_config {
+ /**
+ * @rb_backend_disable:
+ *
+ * The value captured from register RB_BACKEND_DISABLE indicates if the
+ * RB backend is disabled or not.
+ */
uint32_t rb_backend_disable;
+
+ /**
+ * @user_rb_backend_disable:
+ *
+ * The value captured from register USER_RB_BACKEND_DISABLE indicates
+ * if the User RB backend is disabled or not.
+ */
uint32_t user_rb_backend_disable;
+
+ /**
+ * @raster_config:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the first register.
+ */
uint32_t raster_config;
+
+ /**
+ * @raster_config_1:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the second register.
+ */
uint32_t raster_config_1;
};
@@ -221,6 +257,13 @@ struct amdgpu_gfx_config {
uint32_t macrotile_mode_array[16];
struct gb_addr_config gb_addr_config_fields;
+
+ /**
+ * @rb_config:
+ *
+ * Matrix that keeps all the Render Backend (color and depth buffer
+ * handling) configuration on the 3D engine.
+ */
struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
/* gfx configure feature */
@@ -305,7 +348,8 @@ struct amdgpu_gfx_funcs {
void (*init_spm_golden)(struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
- struct amdgpu_gfx_shadow_info *shadow_info);
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check);
enum amdgpu_gfx_partition
(*query_partition_mode)(struct amdgpu_device *adev);
int (*switch_partition_mode)(struct amdgpu_device *adev,
@@ -474,9 +518,9 @@ struct amdgpu_gfx {
bool enable_cleaner_shader;
struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
/* Mutex for synchronizing KFD scheduler operations */
- struct mutex kfd_sch_mutex;
- u64 kfd_sch_req_count[MAX_XCP];
- bool kfd_sch_inactive[MAX_XCP];
+ struct mutex userq_sch_mutex;
+ u64 userq_sch_req_count[MAX_XCP];
+ bool userq_sch_inactive[MAX_XCP];
unsigned long enforce_isolation_jiffies[MAX_XCP];
unsigned long enforce_isolation_time[MAX_XCP];
@@ -484,6 +528,9 @@ struct amdgpu_gfx {
struct delayed_work idle_work;
bool workload_profile_active;
struct mutex workload_profile_mutex;
+
+ bool disable_kq;
+ bool disable_uq;
};
struct amdgpu_gfx_ras_reg_entry {
@@ -503,7 +550,7 @@ struct amdgpu_gfx_ras_mem_id_entry {
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
-#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si)))
+#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si), false))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
@@ -550,8 +597,6 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
- int pipe, int queue);
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
@@ -597,6 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
+u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index ecb74ccf1d90..6b0fbbb91e57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -1230,6 +1230,10 @@ static ssize_t current_memory_partition_show(
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amdgpu_memory_partition mode;
+ /* Only minimal precaution taken to reject requests while in reset */
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
if ((mode >= ARRAY_SIZE(nps_desc)) ||
(BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index bd7fc123b8f9..80fa29c26e9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -62,6 +62,9 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+/* XNACK flags */
+#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0)
+
struct firmware;
enum amdgpu_memory_partition {
@@ -301,6 +304,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
int noretry;
+ uint32_t xnack_flags;
uint32_t vmid0_page_table_block_size;
uint32_t vmid0_page_table_depth;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index b6cf801939aa..6e02fb9ac2f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -22,6 +22,7 @@
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
+#include <uapi/linux/kfd_ioctl.h>
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
{
@@ -46,3 +47,22 @@ int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
/* hdp ras follows amdgpu_ras_block_late_init_default for late init */
return 0;
}
+
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32((adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ } else {
+ amdgpu_ring_emit_wreg(ring,
+ (adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 7b8a6152dc8d..4cfd932b7e91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -44,4 +44,6 @@ struct amdgpu_hdp {
};
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 2ea98ec60220..802743efa3b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -163,12 +163,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
init_shadow = false;
}
- if (!ring->sched.ready && !ring->is_mes_queue) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
- if (vm && !job->vmid && !ring->is_mes_queue) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 4c4e087230ac..5dd78a9cb12d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -576,8 +576,16 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
INIT_LIST_HEAD(&id_mgr->ids_lru);
id_mgr->reserved_use_count = 0;
- /* manage only VMIDs not used by KFD */
- id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ else if (AMDGPU_IS_MMHUB0(i) ||
+ AMDGPU_IS_MMHUB1(i))
+ id_mgr->num_ids = 16;
+ else
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
/* skip over VMID 0, since it is the system VM */
for (j = 1; j < id_mgr->num_ids; ++j) {
@@ -588,7 +596,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
}
/* alloc a default reserved vmid to enforce isolation */
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
+ if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE)
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 901f8b12c672..30f16968b578 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_ih.h"
+#include "amdgpu_reset.h"
/**
* amdgpu_ih_ring_init - initialize the IH state
@@ -227,13 +228,23 @@ restart_ih:
ih->rptr &= ih->ptr_mask;
}
- amdgpu_ih_set_rptr(adev, ih);
+ if (!ih->overflow)
+ amdgpu_ih_set_rptr(adev, ih);
+
wake_up_all(&ih->wait_process);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev, ih);
if (wptr != ih->rptr)
- goto restart_ih;
+ if (!ih->overflow)
+ goto restart_ih;
+
+ if (ih->overflow)
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index b0a88f92cd82..7f7ea046e209 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -72,6 +72,7 @@ struct amdgpu_ih_ring {
/* For waiting on IH processing at checkpoint. */
wait_queue_head_t wait_process;
uint64_t processed_timestamp;
+ bool overflow;
};
/* return true if time stamp t2 is after t1 with 48bit wrap around */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 19ce4da285e8..13c60cac4261 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -619,6 +619,10 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
+ /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
+ if (amdgpu_ras_is_rma(adev))
+ return -EINVAL;
+
if (!adev->irq.installed)
return -ENOENT;
@@ -725,8 +729,8 @@ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
- adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
- &amdgpu_hw_irqdomain_ops, adev);
+ adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
+ &amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
DRM_ERROR("GPU irq add domain failed\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index ce6b9ba967ff..f2c049129661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -78,6 +78,7 @@ struct amdgpu_job {
/* enforce isolation */
bool enforce_isolation;
+ bool run_cleaner_shader;
uint32_t num_ibs;
struct amdgpu_ib ibs[];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 27bfe9c8af06..9fbb04aee97b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -45,6 +45,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amd_pcie.h"
+#include "amdgpu_userq.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -370,6 +371,26 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
return 0;
}
+static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev,
+ struct drm_amdgpu_info *info,
+ struct drm_amdgpu_info_uq_metadata_gfx *meta)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow = {};
+
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true);
+ meta->shadow_size = shadow.shadow_size;
+ meta->shadow_alignment = shadow.shadow_alignment;
+ meta->csa_size = shadow.csa_size;
+ meta->csa_alignment = shadow.csa_alignment;
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
struct drm_amdgpu_info *info,
struct drm_amdgpu_info_hw_ip *result)
@@ -387,7 +408,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- if (adev->gfx.gfx_ring[i].sched.ready)
+ if (adev->gfx.gfx_ring[i].sched.ready &&
+ !adev->gfx.gfx_ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -395,7 +417,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- if (adev->gfx.compute_ring[i].sched.ready)
+ if (adev->gfx.compute_ring[i].sched.ready &&
+ !adev->gfx.compute_ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -403,7 +426,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (adev->sdma.instance[i].ring.sched.ready)
+ if (adev->sdma.instance[i].ring.sched.ready &&
+ !adev->sdma.instance[i].ring.no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -414,7 +438,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->uvd.harvest_config & (1 << i))
continue;
- if (adev->uvd.inst[i].ring.sched.ready)
+ if (adev->uvd.inst[i].ring.sched.ready &&
+ !adev->uvd.inst[i].ring.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -423,7 +448,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- if (adev->vce.ring[i].sched.ready)
+ if (adev->vce.ring[i].sched.ready &&
+ !adev->vce.ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -435,7 +461,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- if (adev->uvd.inst[i].ring_enc[j].sched.ready)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
+ !adev->uvd.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -447,7 +474,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->vcn.inst[i].ring_dec.sched.ready)
+ if (adev->vcn.inst[i].ring_dec.sched.ready &&
+ !adev->vcn.inst[i].ring_dec.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -460,7 +488,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
- if (adev->vcn.inst[i].ring_enc[j].sched.ready)
+ if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
+ !adev->vcn.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -475,7 +504,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
- if (adev->jpeg.inst[i].ring_dec[j].sched.ready)
+ if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
+ !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -483,7 +513,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VPE:
type = AMD_IP_BLOCK_TYPE_VPE;
- if (adev->vpe.ring.sched.ready)
+ if (adev->vpe.ring.sched.ready &&
+ !adev->vpe.ring.no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -978,6 +1009,8 @@ out:
}
}
+ dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
@@ -1293,6 +1326,22 @@ out:
return copy_to_user(out, &gpuvm_fault,
min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_UQ_FW_AREAS: {
+ struct drm_amdgpu_info_uq_metadata meta_info = {};
+
+ switch (info->query_hw_ip.type) {
+ case AMDGPU_HW_IP_GFX:
+ ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(out, &meta_info,
+ min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1376,6 +1425,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
+ r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
+ if (r)
+ DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
+
+ r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
+ if (r)
+ goto error_vm;
+
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
@@ -1445,6 +1502,11 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unreserve(pd);
}
+ if (!fpriv->evf_mgr.fd_closing) {
+ fpriv->evf_mgr.fd_closing = true;
+ amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
+ }
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index fb212f0a1136..2febb63ab232 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -39,42 +39,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
PAGE_SIZE);
}
-static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
- int ip_type, uint64_t *doorbell_index)
-{
- unsigned int offset, found;
- struct amdgpu_mes *mes = &adev->mes;
-
- if (ip_type == AMDGPU_RING_TYPE_SDMA)
- offset = adev->doorbell_index.sdma_engine[0];
- else
- offset = 0;
-
- found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
- if (found >= mes->num_mes_dbs) {
- DRM_WARN("No doorbell available\n");
- return -ENOSPC;
- }
-
- set_bit(found, mes->doorbell_bitmap);
-
- /* Get the absolute doorbell index on BAR */
- *doorbell_index = mes->db_start_dw_offset + found * 2;
- return 0;
-}
-
-static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
- uint32_t doorbell_index)
-{
- unsigned int old, rel_index;
- struct amdgpu_mes *mes = &adev->mes;
-
- /* Find the relative index of the doorbell in this object */
- rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
- old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
- WARN_ON(!old);
-}
-
static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
{
int i;
@@ -126,7 +90,7 @@ static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
int amdgpu_mes_init(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, num_pipes;
adev->mes.adev = adev;
@@ -142,19 +106,52 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = 0xffffff00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+
+ num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
+ if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
+ dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
+
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
+ if (i >= num_pipes)
+ break;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(12, 0, 0))
+ /*
+ * GFX V12 has only one GFX pipe, but 8 queues in it.
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1-7 for MES scheduling
+ * mask = 1111 1110b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
+ else
+ /*
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1 for MES scheduling
+ * mask = 10b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
+ }
+
+ num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
+ if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
+ dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
- if (i >= (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec))
+ if (i >= num_pipes)
break;
- adev->mes.compute_hqd_mask[i] = 0xc;
+ adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
}
- for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
- adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
+ num_pipes = adev->sdma.num_instances;
+ if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
+ dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
- if (i >= adev->sdma.num_instances)
+ if (i >= num_pipes)
break;
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
@@ -240,244 +237,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->mes.mutex_hidden);
}
-static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
-{
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
-}
-
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm)
-{
- struct amdgpu_mes_process *process;
- int r;
-
- /* allocate the mes process buffer */
- process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
- if (!process) {
- DRM_ERROR("no more memory to create mes process\n");
- return -ENOMEM;
- }
-
- /* allocate the process context bo and map it */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_memory;
- }
- memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* add the mes process to idr list */
- r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to lock pasid=%d\n", pasid);
- goto clean_up_ctx;
- }
-
- INIT_LIST_HEAD(&process->gang_list);
- process->vm = vm;
- process->pasid = pasid;
- process->process_quantum = adev->mes.default_process_quantum;
- process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
-clean_up_memory:
- kfree(process);
- return r;
-}
-
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang, *tmp1;
- struct amdgpu_mes_queue *queue, *tmp2;
- struct mes_remove_queue_input queue_input;
- unsigned long flags;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_WARN("pasid %d doesn't exist\n", pasid);
- amdgpu_mes_unlock(&adev->mes);
- return;
- }
-
- /* Remove all queues from hardware */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes,
- &queue_input);
- if (r)
- DRM_WARN("failed to remove hardware queue\n");
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- }
-
- idr_remove(&adev->mes.pasid_idr, pasid);
- amdgpu_mes_unlock(&adev->mes);
-
- /* free all memory allocated by the process */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- /* free all queues in the gang */
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- amdgpu_mes_queue_free_mqd(queue);
- list_del(&queue->list);
- kfree(queue);
- }
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- list_del(&gang->list);
- kfree(gang);
-
- }
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- kfree(process);
-}
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
- int r;
-
- /* allocate the mes gang buffer */
- gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
- if (!gang) {
- return -ENOMEM;
- }
-
- /* allocate the gang context bo and map it to cpu space */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_mem;
- }
- memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_ERROR("pasid %d doesn't exist\n", pasid);
- r = -EINVAL;
- goto clean_up_ctx;
- }
-
- /* add the mes gang to idr list */
- r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to allocate idr for gang\n");
- goto clean_up_ctx;
- }
-
- gang->gang_id = r;
- *gang_id = r;
-
- INIT_LIST_HEAD(&gang->queue_list);
- gang->process = process;
- gang->priority = gprops->priority;
- gang->gang_quantum = gprops->gang_quantum ?
- gprops->gang_quantum : adev->mes.default_gang_quantum;
- gang->global_priority_level = gprops->global_priority_level;
- gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
- list_add_tail(&gang->list, &process->gang_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-clean_up_mem:
- kfree(gang);
- return r;
-}
-
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
-{
- struct amdgpu_mes_gang *gang;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
-
- if (!list_empty(&gang->queue_list)) {
- DRM_ERROR("queue list is not empty\n");
- amdgpu_mes_unlock(&adev->mes);
- return -EBUSY;
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- list_del(&gang->list);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-
- kfree(gang);
-
- return 0;
-}
-
int amdgpu_mes_suspend(struct amdgpu_device *adev)
{
struct mes_suspend_gang_input input;
@@ -526,304 +285,6 @@ int amdgpu_mes_resume(struct amdgpu_device *adev)
return r;
}
-static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- u32 mqd_size = mqd_mgr->mqd_size;
- int r;
-
- r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &q->mqd_obj,
- &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
- if (r) {
- dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
- return r;
- }
- memset(q->mqd_cpu_ptr, 0, mqd_size);
-
- r = amdgpu_bo_reserve(q->mqd_obj, false);
- if (unlikely(r != 0))
- goto clean_up;
-
- return 0;
-
-clean_up:
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
- return r;
-}
-
-static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- struct amdgpu_mqd_prop mqd_prop = {0};
-
- mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
- mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
- mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
- mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
- mqd_prop.queue_size = p->queue_size;
- mqd_prop.use_doorbell = true;
- mqd_prop.doorbell_index = p->doorbell_off;
- mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
- mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
- mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
- mqd_prop.hqd_active = false;
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- mutex_lock(&adev->srbm_mutex);
- amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
- }
-
- mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- }
-
- amdgpu_bo_unreserve(q->mqd_obj);
-}
-
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id)
-{
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_add_queue_input queue_input;
- unsigned long flags;
- int r;
-
- memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
-
- /* allocate the mes queue buffer */
- queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
- if (!queue) {
- DRM_ERROR("Failed to allocate memory for queue\n");
- return -ENOMEM;
- }
-
- /* Allocate the queue mqd */
- r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
- if (r)
- goto clean_up_memory;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- r = -EINVAL;
- goto clean_up_mqd;
- }
-
- /* add the mes gang to idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
- GFP_ATOMIC);
- if (r < 0) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- goto clean_up_mqd;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- *queue_id = queue->queue_id = r;
-
- /* allocate a doorbell index for the queue */
- r = amdgpu_mes_kernel_doorbell_get(adev,
- qprops->queue_type,
- &qprops->doorbell_off);
- if (r)
- goto clean_up_queue_id;
-
- /* initialize the queue mqd */
- amdgpu_mes_queue_init_mqd(adev, queue, qprops);
-
- /* add hw queue to mes */
- queue_input.process_id = gang->process->pasid;
-
- queue_input.page_table_base_addr =
- adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
- adev->gmc.vram_start;
-
- queue_input.process_va_start = 0;
- queue_input.process_va_end =
- (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
- queue_input.process_quantum = gang->process->process_quantum;
- queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
- queue_input.gang_quantum = gang->gang_quantum;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
- queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
- queue_input.gang_global_priority_level = gang->global_priority_level;
- queue_input.doorbell_offset = qprops->doorbell_off;
- queue_input.mqd_addr = queue->mqd_gpu_addr;
- queue_input.wptr_addr = qprops->wptr_gpu_addr;
- queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
- queue_input.queue_type = qprops->queue_type;
- queue_input.paging = qprops->paging;
- queue_input.is_kfd_process = 0;
-
- r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
- if (r) {
- DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
- qprops->doorbell_off);
- goto clean_up_doorbell;
- }
-
- DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
- "queue type=%d, doorbell=0x%llx\n",
- gang->process->pasid, gang_id, qprops->queue_type,
- qprops->doorbell_off);
-
- queue->ring = qprops->ring;
- queue->doorbell_off = qprops->doorbell_off;
- queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
- queue->queue_type = qprops->queue_type;
- queue->paging = qprops->paging;
- queue->gang = gang;
- queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
- list_add_tail(&queue->list, &gang->queue_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_doorbell:
- amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
-clean_up_queue_id:
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-clean_up_mqd:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_mes_queue_free_mqd(queue);
-clean_up_memory:
- kfree(queue);
- return r;
-}
-
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_remove_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
-
- idr_remove(&adev->mes.queue_id_idr, queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
- queue_id);
-
- list_del(&queue->list);
- amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_mes_queue_free_mqd(queue);
- kfree(queue);
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_reset_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
- queue_id);
-
- amdgpu_mes_unlock(&adev->mes);
-
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid)
-{
- struct mes_reset_queue_input queue_input;
- int r;
-
- queue_input.queue_type = queue_type;
- queue_input.use_mmio = true;
- queue_input.me_id = me_id;
- queue_input.pipe_id = pipe_id;
- queue_input.queue_id = queue_id;
- queue_input.vmid = vmid;
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
- queue_id);
- return r;
-}
-
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -874,7 +335,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio)
{
- struct mes_reset_legacy_queue_input queue_input;
+ struct mes_reset_queue_input queue_input;
int r;
memset(&queue_input, 0, sizeof(queue_input));
@@ -888,8 +349,11 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
queue_input.wptr_addr = ring->wptr_gpu_addr;
queue_input.vmid = vmid;
queue_input.use_mmio = use_mmio;
+ queue_input.is_kq = true;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+ queue_input.legacy_gfx = true;
- r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
if (r)
DRM_ERROR("failed to reset legacy queue\n");
@@ -905,7 +369,7 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
uint32_t *read_val_ptr;
if (amdgpu_device_wb_get(adev, &addr_offset)) {
- DRM_ERROR("critical bug! too many mes readers\n");
+ dev_err(adev->dev, "critical bug! too many mes readers\n");
goto error;
}
read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
@@ -915,13 +379,13 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
op_input.read_reg.buffer_addr = read_val_gpu_addr;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes rreg is not supported!\n");
+ dev_err(adev->dev, "mes rreg is not supported!\n");
goto error;
}
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to read reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
else
val = *(read_val_ptr);
@@ -942,14 +406,14 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
op_input.write_reg.reg_value = val;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes wreg is not supported!\n");
+ dev_err(adev->dev, "mes wreg is not supported!\n");
r = -EINVAL;
goto error;
}
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to write reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
error:
return r;
@@ -969,14 +433,14 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
op_input.wrm_reg.mask = mask;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
+ dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
r = -EINVAL;
goto error;
}
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
+ dev_err(adev->dev, "failed to reg_write_reg_wait\n");
error:
return r;
@@ -994,14 +458,14 @@ int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
op_input.wrm_reg.mask = mask;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg wait is not supported!\n");
+ dev_err(adev->dev, "mes reg wait is not supported!\n");
r = -EINVAL;
goto error;
}
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
+ dev_err(adev->dev, "failed to reg_write_reg_wait\n");
error:
return r;
@@ -1075,25 +539,6 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
return r;
}
-static void
-amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_mes_queue_properties *props)
-{
- props->queue_type = ring->funcs->type;
- props->hqd_base_gpu_addr = ring->gpu_addr;
- props->rptr_gpu_addr = ring->rptr_gpu_addr;
- props->wptr_gpu_addr = ring->wptr_gpu_addr;
- props->wptr_mc_addr =
- ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
- props->queue_size = ring->ring_size;
- props->eop_gpu_addr = ring->eop_gpu_addr;
- props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
- props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
- props->paging = false;
- props->ring = ring;
-}
-
#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
do { \
if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
@@ -1130,453 +575,12 @@ int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
return -EINVAL;
}
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang *gang;
- struct amdgpu_mes_queue_properties qprops = {0};
- int r, queue_id, pasid;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
- pasid = gang->process->pasid;
-
- ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
- if (!ring) {
- amdgpu_mes_unlock(&adev->mes);
- return -ENOMEM;
- }
-
- ring->ring_obj = NULL;
- ring->use_doorbell = true;
- ring->is_mes_queue = true;
- ring->mes_ctx = ctx_data;
- ring->idx = idx;
- ring->no_scheduler = true;
-
- if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- compute[ring->idx].mec_hpd);
- ring->eop_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- }
-
- switch (queue_type) {
- case AMDGPU_RING_TYPE_GFX:
- ring->funcs = adev->gfx.gfx_ring[0].funcs;
- ring->me = adev->gfx.gfx_ring[0].me;
- ring->pipe = adev->gfx.gfx_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- ring->funcs = adev->gfx.compute_ring[0].funcs;
- ring->me = adev->gfx.compute_ring[0].me;
- ring->pipe = adev->gfx.compute_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ring->funcs = adev->sdma.instance[0].ring.funcs;
- break;
- default:
- BUG();
- }
-
- r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r) {
- amdgpu_mes_unlock(&adev->mes);
- goto clean_up_memory;
- }
-
- amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
-
- dma_fence_wait(gang->process->vm->last_update, false);
- dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
- amdgpu_mes_unlock(&adev->mes);
-
- r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
- if (r)
- goto clean_up_ring;
-
- ring->hw_queue_id = queue_id;
- ring->doorbell_index = qprops.doorbell_off;
-
- if (queue_type == AMDGPU_RING_TYPE_GFX)
- sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
- sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
- queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_SDMA)
- sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
- queue_id);
- else
- BUG();
-
- *out = ring;
- return 0;
-
-clean_up_ring:
- amdgpu_ring_fini(ring);
-clean_up_memory:
- kfree(ring);
- return r;
-}
-
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring)
- return;
-
- amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
- timer_delete_sync(&ring->fence_drv.fallback_timer);
- amdgpu_ring_fini(ring);
- kfree(ring);
-}
-
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio)
{
return adev->mes.aggregated_doorbells[prio];
}
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- int r;
-
- r = amdgpu_bo_create_kernel(adev,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
- return r;
- }
-
- if (!ctx_data->meta_data_obj)
- return -ENOMEM;
-
- memset(ctx_data->meta_data_ptr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data));
-
- return 0;
-}
-
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
-{
- if (ctx_data->meta_data_obj)
- amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
-}
-
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va;
- struct amdgpu_sync sync;
- struct drm_exec exec;
- int r;
-
- amdgpu_sync_create(&sync);
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
- }
-
- bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
- if (!bo_va) {
- DRM_ERROR("failed to create bo_va for meta data BO\n");
- r = -ENOMEM;
- goto error_fini_exec;
- }
-
- r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
- goto error_del_bo_va;
- }
-
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- DRM_ERROR("failed to do vm_bo_update on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, bo_va->last_pt_update, GFP_KERNEL);
-
- r = amdgpu_vm_update_pdes(adev, vm, false);
- if (r) {
- DRM_ERROR("failed to update pdes on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, vm->last_update, GFP_KERNEL);
-
- amdgpu_sync_wait(&sync, false);
- drm_exec_fini(&exec);
-
- amdgpu_sync_free(&sync);
- ctx_data->meta_data_va = bo_va;
- return 0;
-
-error_del_bo_va:
- amdgpu_vm_bo_del(adev, bo_va);
-
-error_fini_exec:
- drm_exec_fini(&exec);
- amdgpu_sync_free(&sync);
- return r;
-}
-
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
- struct amdgpu_bo *bo = ctx_data->meta_data_obj;
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct dma_fence *fence;
- struct drm_exec exec;
- long r;
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
- }
-
- amdgpu_vm_bo_del(adev, bo_va);
- if (!amdgpu_vm_ready(vm))
- goto out_unlock;
-
- r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
- &fence);
- if (r)
- goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- fence = NULL;
- }
-
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (r || !fence)
- goto out_unlock;
-
- dma_fence_wait(fence, false);
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
-
-out_unlock:
- if (unlikely(r < 0))
- dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
- drm_exec_fini(&exec);
-
- return r;
-}
-
-static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
- int pasid, int *gang_id,
- int queue_type, int num_queue,
- struct amdgpu_ring **added_rings,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang_properties gprops = {0};
- int r, j;
-
- /* create a gang for the process */
- gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.gang_quantum = adev->mes.default_gang_quantum;
- gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
-
- r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
- if (r) {
- DRM_ERROR("failed to add gang\n");
- return r;
- }
-
- /* create queues for the gang */
- for (j = 0; j < num_queue; j++) {
- r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
- ctx_data, &ring);
- if (r) {
- DRM_ERROR("failed to add ring\n");
- break;
- }
-
- DRM_INFO("ring %s was added\n", ring->name);
- added_rings[j] = ring;
- }
-
- return 0;
-}
-
-static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
-{
- struct amdgpu_ring *ring;
- int i, r;
-
- for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
- ring = added_rings[i];
- if (!ring)
- continue;
-
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
-
- r = amdgpu_ring_test_ib(ring, 1000 * 10);
- if (r) {
- DRM_DEV_ERROR(ring->adev->dev,
- "ring %s ib test failed (%d)\n",
- ring->name, r);
- return r;
- } else
- DRM_INFO("ring %s ib test pass\n", ring->name);
- }
-
- return 0;
-}
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev)
-{
- struct amdgpu_vm *vm = NULL;
- struct amdgpu_mes_ctx_data ctx_data = {0};
- struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
- int gang_ids[3] = {0};
- int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
- { AMDGPU_RING_TYPE_COMPUTE, 1 },
- { AMDGPU_RING_TYPE_SDMA, 1} };
- int i, r, pasid, k = 0;
-
- pasid = amdgpu_pasid_alloc(16);
- if (pasid < 0) {
- dev_warn(adev->dev, "No more PASIDs available!");
- pasid = 0;
- }
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm) {
- r = -ENOMEM;
- goto error_pasid;
- }
-
- r = amdgpu_vm_init(adev, vm, -1);
- if (r) {
- DRM_ERROR("failed to initialize vm\n");
- goto error_pasid;
- }
-
- r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
- if (r) {
- DRM_ERROR("failed to alloc ctx meta data\n");
- goto error_fini;
- }
-
- ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
- r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
- if (r) {
- DRM_ERROR("failed to map ctx meta data\n");
- goto error_vm;
- }
-
- r = amdgpu_mes_create_process(adev, pasid, vm);
- if (r) {
- DRM_ERROR("failed to create MES process\n");
- goto error_vm;
- }
-
- for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
- /* On GFX v10.3, fw hasn't supported to map sdma queue. */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
- IP_VERSION(10, 3, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) <
- IP_VERSION(11, 0, 0) &&
- queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
- continue;
-
- r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
- &gang_ids[i],
- queue_types[i][0],
- queue_types[i][1],
- &added_rings[k],
- &ctx_data);
- if (r)
- goto error_queues;
-
- k += queue_types[i][1];
- }
-
- /* start ring test and ib test for MES queues */
- amdgpu_mes_test_queues(added_rings);
-
-error_queues:
- /* remove all queues */
- for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
- if (!added_rings[i])
- continue;
- amdgpu_mes_remove_ring(adev, added_rings[i]);
- }
-
- for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
- if (!gang_ids[i])
- continue;
- amdgpu_mes_remove_gang(adev, gang_ids[i]);
- }
-
- amdgpu_mes_destroy_process(adev, pasid);
-
-error_vm:
- amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
-
-error_fini:
- amdgpu_vm_fini(adev, vm);
-
-error_pasid:
- if (pasid)
- amdgpu_pasid_free(pasid);
-
- amdgpu_mes_ctx_free_meta_data(&ctx_data);
- kfree(vm);
- return 0;
-}
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
{
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -1705,7 +709,7 @@ int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
mutex_lock(&adev->enforce_isolation_mutex);
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
+ if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
else
r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index da2c9a8cb3e0..a41f65b4f733 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -111,8 +111,8 @@ struct amdgpu_mes {
uint32_t vmid_mask_gfxhub;
uint32_t vmid_mask_mmhub;
- uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
+ uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
@@ -149,19 +149,6 @@ struct amdgpu_mes {
};
-struct amdgpu_mes_process {
- int pasid;
- struct amdgpu_vm *vm;
- uint64_t pd_gpu_addr;
- struct amdgpu_bo *proc_ctx_bo;
- uint64_t proc_ctx_gpu_addr;
- void *proc_ctx_cpu_ptr;
- uint64_t process_quantum;
- struct list_head gang_list;
- uint32_t doorbell_index;
- struct mutex doorbell_lock;
-};
-
struct amdgpu_mes_gang {
int gang_id;
int priority;
@@ -248,18 +235,6 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
-struct mes_reset_queue_input {
- uint32_t doorbell_offset;
- uint64_t gang_context_addr;
- bool use_mmio;
- uint32_t queue_type;
- uint32_t me_id;
- uint32_t pipe_id;
- uint32_t queue_id;
- uint32_t xcc_id;
- uint32_t vmid;
-};
-
struct mes_map_legacy_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
@@ -291,7 +266,7 @@ struct mes_resume_gang_input {
uint64_t gang_context_addr;
};
-struct mes_reset_legacy_queue_input {
+struct mes_reset_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
bool use_mmio;
@@ -301,6 +276,8 @@ struct mes_reset_legacy_queue_input {
uint64_t mqd_addr;
uint64_t wptr_addr;
uint32_t vmid;
+ bool legacy_gfx;
+ bool is_kq;
};
enum mes_misc_opcode {
@@ -388,9 +365,6 @@ struct amdgpu_mes_funcs {
int (*misc_op)(struct amdgpu_mes *mes,
struct mes_misc_op_input *input);
- int (*reset_legacy_queue)(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input);
-
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
};
@@ -404,26 +378,9 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm);
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid);
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id);
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id);
-
int amdgpu_mes_suspend(struct amdgpu_device *adev);
int amdgpu_mes_resume(struct amdgpu_device *adev);
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id);
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid);
-
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
@@ -451,27 +408,10 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
bool trap_en);
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr);
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out);
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio);
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev);
-
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0b9987781f76..73403744331a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1044,7 +1044,8 @@ static const char * const amdgpu_vram_names[] = {
"GDDR6",
"DDR5",
"LPDDR4",
- "LPDDR5"
+ "LPDDR5",
+ "HBM3E"
};
/**
@@ -1644,7 +1645,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
-
+ /* Add the gem obj resv fence dump*/
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ dma_resv_describe(bo->tbo.base.resv, m);
+ dma_resv_unlock(bo->tbo.base.resv);
+ }
seq_puts(m, "\n");
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index df5d5dbd7f0f..e6f0b035e20b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2214,7 +2214,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
- dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+ dev_info(psp->adev->dev,
+ "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
return 0;
}
@@ -4185,6 +4186,110 @@ const struct attribute_group amdgpu_flash_attr_group = {
.is_visible = amdgpu_flash_attr_is_visible,
};
+#if defined(CONFIG_DEBUG_FS)
+static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet;
+ int ret;
+
+ /* serialize the open() file calling */
+ if (!mutex_trylock(&adev->psp.mutex))
+ return -EBUSY;
+
+ /*
+ * make sure only one userpace process is alive for dumping so that
+ * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
+ * let's say the case where one process try opening the file while
+ * another one has proceeded to read or release. In this way, eliminate
+ * the use of mutex for read() or release() callback as well.
+ */
+ if (adev->psp.spirom_dump_trip) {
+ mutex_unlock(&adev->psp.mutex);
+ return -EBUSY;
+ }
+
+ bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
+ if (!bo_triplet) {
+ mutex_unlock(&adev->psp.mutex);
+ return -ENOMEM;
+ }
+
+ ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo_triplet->bo,
+ &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ if (ret)
+ goto rel_trip;
+
+ ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
+ if (ret)
+ goto rel_bo;
+
+ adev->psp.spirom_dump_trip = bo_triplet;
+ mutex_unlock(&adev->psp.mutex);
+ return 0;
+rel_bo:
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+rel_trip:
+ kfree(bo_triplet);
+ mutex_unlock(&adev->psp.mutex);
+ dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
+ return ret;
+}
+
+static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (!bo_triplet)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf,
+ size,
+ pos, bo_triplet->cpu_addr,
+ AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+}
+
+static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (bo_triplet) {
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ kfree(bo_triplet);
+ }
+
+ adev->psp.spirom_dump_trip = NULL;
+ return 0;
+}
+
+static const struct file_operations psp_dump_spirom_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psp_read_spirom_debugfs_open,
+ .read = psp_read_spirom_debugfs_read,
+ .release = psp_read_spirom_debugfs_release,
+ .llseek = default_llseek,
+};
+#endif
+
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
+ adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+#endif
+}
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8d5acc415d38..428adc7f741d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -39,6 +39,18 @@
#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
+/* VBIOS gfl defines */
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0x0000FFFF
+#define MBOX_COMMAND_MASK 0x00FF0000
+#define MBOX_READY_FLAG 0x80000000
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
+#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
+#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -107,6 +119,7 @@ enum psp_reg_prog_id {
PSP_REG_IH_RB_CNTL = 0, /* register IH_RB_CNTL */
PSP_REG_IH_RB_CNTL_RING1 = 1, /* register IH_RB_CNTL_RING1 */
PSP_REG_IH_RB_CNTL_RING2 = 2, /* register IH_RB_CNTL_RING2 */
+ PSP_REG_MMHUB_L1_TLB_CNTL = 25,
PSP_REG_LAST
};
@@ -137,11 +150,14 @@ struct psp_funcs {
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
+ int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
bool (*is_aux_sos_load_required)(struct psp_context *psp);
bool (*is_reload_needed)(struct psp_context *psp);
+ int (*reg_program_no_ring)(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
};
struct ta_funcs {
@@ -319,6 +335,14 @@ struct psp_runtime_scpm_entry {
enum psp_runtime_scpm_authentication scpm_status;
};
+#if defined(CONFIG_DEBUG_FS)
+struct spirom_bo {
+ struct amdgpu_bo *bo;
+ uint64_t mc_addr;
+ void *cpu_addr;
+};
+#endif
+
struct psp_context {
struct amdgpu_device *adev;
struct psp_ring km_ring;
@@ -406,6 +430,9 @@ struct psp_context {
char *vbflash_tmp_buf;
size_t vbflash_image_size;
bool vbflash_done;
+#if defined(CONFIG_DEBUG_FS)
+ struct spirom_bo *spirom_dump_trip;
+#endif
};
struct amdgpu_psp_funcs {
@@ -464,6 +491,10 @@ struct amdgpu_psp_funcs {
((psp)->funcs->update_spirom ? \
(psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+#define psp_dump_spirom(psp, fw_pri_mc_addr) \
+ ((psp)->funcs->dump_spirom ? \
+ (psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+
#define psp_vbflash_status(psp) \
((psp)->funcs->vbflash_stat ? \
(psp)->funcs->vbflash_stat((psp)) : -EINVAL)
@@ -475,6 +506,10 @@ struct amdgpu_psp_funcs {
#define psp_is_aux_sos_load_required(psp) \
((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0)
+#define psp_reg_program_no_ring(psp, val, id) \
+ ((psp)->funcs->reg_program_no_ring ? \
+ (psp)->funcs->reg_program_no_ring((psp), val, id) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -569,5 +604,9 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
+int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 443409d4f4b0..dc07936d2fcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1498,6 +1498,9 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
!amdgpu_ras_get_aca_debug_mode(adev))
return -EOPNOTSUPP;
+ if (amdgpu_sriov_vf(adev))
+ return -EOPNOTSUPP;
+
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
((smu_funcs && smu_funcs->set_debug_mode) ||
@@ -2161,7 +2164,7 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
/* Fatal error events are handled on host side */
if (amdgpu_sriov_vf(adev))
return;
- /**
+ /*
* If the current interrupt is caused by a non-fatal RAS error, skip
* check for fatal error. For fatal errors, FED status of all devices
* in XGMI hive gets set when the first device gets fatal error
@@ -2886,6 +2889,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
return -EINVAL;
}
+
return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
adev->umc.retire_unit);
}
@@ -2900,7 +2904,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
&adev->psp.ras_context.ras->eeprom_control;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i;
+ uint32_t i = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
@@ -2921,34 +2925,36 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
if (from_rom) {
- for (i = 0; i < pages; i++) {
- if (control->ras_num_recs - i >= adev->umc.retire_unit) {
- if ((bps[i].address == bps[i + 1].address) &&
- (bps[i].mem_channel == bps[i + 1].mem_channel)) {
- //deal with retire_unit records a time
- ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
- &bps[i], &err_data, nps);
- if (ret)
- goto free;
- i += (adev->umc.retire_unit - 1);
+ /* there is no pa recs in V3, so skip pa recs processing */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < pages; i++) {
+ if (control->ras_num_recs - i >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ /* deal with retire_unit records a time */
+ ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ control->ras_num_bad_pages -= adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ break;
+ }
} else {
break;
}
- } else {
- break;
}
}
for (; i < pages; i++) {
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- goto free;
+ control->ras_num_bad_pages -= adev->umc.retire_unit;
}
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
-free:
if (from_rom)
kfree(err_data.err_addr);
mutex_unlock(&con->recovery_lock);
@@ -3037,21 +3043,28 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
dev_err(adev->dev, "Failed to load EEPROM table records!");
} else {
if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
- for (i = 0; i < control->ras_num_recs; i++) {
- if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
- if ((bps[i].address == bps[i + 1].address) &&
- (bps[i].mem_channel == bps[i + 1].mem_channel)) {
- control->ras_num_pa_recs += adev->umc.retire_unit;
- i += (adev->umc.retire_unit - 1);
+ /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
+ as pa recs, so add verion check to avoid it.
+ */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < control->ras_num_recs; i++) {
+ if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ control->ras_num_pa_recs += adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ control->ras_num_mca_recs +=
+ (control->ras_num_recs - i);
+ break;
+ }
} else {
- control->ras_num_mca_recs +=
- (control->ras_num_recs - i);
+ control->ras_num_mca_recs += (control->ras_num_recs - i);
break;
}
- } else {
- control->ras_num_mca_recs += (control->ras_num_recs - i);
- break;
}
+ } else {
+ control->ras_num_mca_recs = control->ras_num_recs;
}
}
@@ -3460,6 +3473,10 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
control->ras_num_pa_recs = control->ras_num_recs;
+ if (adev->umc.ras &&
+ adev->umc.ras->get_retire_flip_bits)
+ adev->umc.ras->get_retire_flip_bits(adev);
+
if (control->ras_num_recs) {
ret = amdgpu_ras_load_bad_pages(adev);
if (ret)
@@ -3793,10 +3810,12 @@ init_ras_enabled_flag:
adev->ras_hw_enabled & amdgpu_ras_mask;
/* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
- adev->aca.is_enabled =
- (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
- amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
- amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->aca.is_enabled =
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ }
/* bad page feature is not applicable to specific app platform */
if (adev->gmc.is_app_apu &&
@@ -4479,8 +4498,11 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
u64 event_id;
- if (amdgpu_ras_mark_ras_event(adev, type))
+ if (amdgpu_ras_mark_ras_event(adev, type)) {
+ dev_err(adev->dev,
+ "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
return;
+ }
event_id = amdgpu_ras_acquire_event_id(adev, type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 0ea7cfaf3587..2c58e09e56f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -418,6 +418,7 @@ static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control
hdr->version = RAS_TABLE_VER_V2_1;
return;
case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
hdr->version = RAS_TABLE_VER_V3;
return;
default:
@@ -1392,17 +1393,39 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
__decode_table_header_from_buf(hdr, buf);
- if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ dev_info(adev->dev, "Creating a new EEPROM table");
+ return amdgpu_ras_eeprom_reset_table(control);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
control->ras_record_offset = RAS_RECORD_START_V2_1;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
- } else {
+ break;
+ case RAS_TABLE_VER_V1:
control->ras_num_recs = RAS_NUM_RECS(hdr);
control->ras_record_offset = RAS_RECORD_START;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
}
- control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ dev_err(adev->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
control->ras_num_mca_recs = 0;
control->ras_num_pa_recs = 0;
return 0;
@@ -1413,7 +1436,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- int res;
+ int res = 0;
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1494,10 +1517,6 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
"User defined threshold is set, runtime service will be halt when threshold is reached\n");
}
}
- } else {
- DRM_INFO("Creating a new EEPROM table");
-
- res = amdgpu_ras_eeprom_reset_table(control);
}
return res < 0 ? res : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 59acdbfe28d8..426834806fbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -187,14 +187,10 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
#define amdgpu_ring_get_gpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : \
- (ring->adev->wb.gpu_addr + offset * 4))
+ (ring->adev->wb.gpu_addr + offset * 4)
#define amdgpu_ring_get_cpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- (&ring->adev->wb.wb[offset]))
+ (&ring->adev->wb.wb[offset])
/**
* amdgpu_ring_init - init driver ring struct.
@@ -243,57 +239,42 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->sched_score = sched_score;
ring->vmid_wait = dma_fence_get_stub();
- if (!ring->is_mes_queue) {
- ring->idx = adev->num_rings++;
- adev->rings[ring->idx] = ring;
- }
+ ring->idx = adev->num_rings++;
+ adev->rings[ring->idx] = ring;
r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
}
- if (ring->is_mes_queue) {
- ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RPTR_OFFS);
- ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_WPTR_OFFS);
- ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_FENCE_OFFS);
- ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
- ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_COND_EXE_OFFS);
- } else {
- r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
+ return r;
}
ring->fence_gpu_addr =
@@ -353,18 +334,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->cached_rptr = 0;
/* Allocate ring buffer */
- if (ring->is_mes_queue) {
- int offset = 0;
-
- BUG_ON(ring->ring_size > PAGE_SIZE*4);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RING_OFFS);
- ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- amdgpu_ring_clear_ring(ring);
-
- } else if (ring->ring_obj == NULL) {
+ if (ring->ring_obj == NULL) {
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
@@ -401,32 +371,26 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
/* Not to finish a ring which is not initialized */
- if (!(ring->adev) ||
- (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
ring->sched.ready = false;
- if (!ring->is_mes_queue) {
- amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_device_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
- amdgpu_bo_free_kernel(&ring->ring_obj,
- &ring->gpu_addr,
- (void **)&ring->ring);
- } else {
- kfree(ring->fence_drv.fences);
- }
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
- if (!ring->is_mes_queue)
- ring->adev->rings[ring->idx] = NULL;
+ ring->adev->rings[ring->idx] = NULL;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bb2b66385223..b95b47110769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -164,8 +164,24 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {
+ /**
+ * @type:
+ *
+ * GFX, Compute, SDMA, UVD, VCE, VCN, VPE, KIQ, MES, UMSCH, and CPER
+ * use ring buffers. The type field just identifies which component the
+ * ring buffer is associated with.
+ */
enum amdgpu_ring_type type;
uint32_t align_mask;
+
+ /**
+ * @nop:
+ *
+ * Every block in the amdgpu has no-op instructions (e.g., GFX 10
+ * uses PACKET3(PACKET3_NOP, 0x3FFF), VCN 5 uses VCN_ENC_CMD_NO_OP,
+ * etc). This field receives the specific no-op for the component
+ * that initializes the ring.
+ */
u32 nop;
bool support_64bit_ptrs;
bool no_user_fence;
@@ -241,6 +257,9 @@ struct amdgpu_ring_funcs {
bool (*is_guilty)(struct amdgpu_ring *ring);
};
+/**
+ * amdgpu_ring - Holds ring information
+ */
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
@@ -252,13 +271,61 @@ struct amdgpu_ring {
unsigned rptr_offs;
u64 rptr_gpu_addr;
volatile u32 *rptr_cpu_addr;
+
+ /**
+ * @wptr:
+ *
+ * This is part of the Ring buffer implementation and represents the
+ * write pointer. The wptr determines where the host has written.
+ */
u64 wptr;
+
+ /**
+ * @wptr_old:
+ *
+ * Before update wptr with the new value, usually the old value is
+ * stored in the wptr_old.
+ */
u64 wptr_old;
unsigned ring_size;
+
+ /**
+ * @max_dw:
+ *
+ * Maximum number of DWords for ring allocation. This information is
+ * provided at the ring initialization time, and each IP block can
+ * specify a specific value. Check places that invoke
+ * amdgpu_ring_init() to see the maximum size per block.
+ */
unsigned max_dw;
+
+ /**
+ * @count_dw:
+ *
+ * This value starts with the maximum amount of DWords supported by the
+ * ring. This value is updated based on the ring manipulation.
+ */
int count_dw;
uint64_t gpu_addr;
+
+ /**
+ * @ptr_mask:
+ *
+ * Some IPs provide support for 64-bit pointers and others for 32-bit
+ * only; this behavior is component-specific and defined by the field
+ * support_64bit_ptr. If the IP block supports 64-bits, the mask
+ * 0xffffffffffffffff is set; otherwise, this value assumes buf_mask.
+ * Notice that this field is used to keep wptr under a valid range.
+ */
uint64_t ptr_mask;
+
+ /**
+ * @buf_mask:
+ *
+ * Buffer mask is a value used to keep wptr count under its
+ * thresholding. Buffer mask initialized during the ring buffer
+ * initialization time, and it is defined as (ring_size / 4) -1.
+ */
uint32_t buf_mask;
u32 idx;
u32 xcc_id;
@@ -276,6 +343,13 @@ struct amdgpu_ring {
bool use_pollmem;
unsigned wptr_offs;
u64 wptr_gpu_addr;
+
+ /**
+ * @wptr_cpu_addr:
+ *
+ * This is the CPU address pointer in the writeback slot. This is used
+ * to commit changes to the GPU.
+ */
volatile u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
@@ -297,20 +371,15 @@ struct amdgpu_ring {
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
bool no_scheduler;
+ bool no_user_submission;
int hw_prio;
unsigned num_hw_submission;
atomic_t *sched_score;
- /* used for mes */
- bool is_mes_queue;
- uint32_t hw_queue_id;
- struct amdgpu_mes_ctx_data *mes_ctx;
-
bool is_sw_ring;
unsigned int entry_index;
/* store the cached rptr to restore after reset */
uint64_t cached_rptr;
-
};
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
@@ -435,15 +504,6 @@ static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
ring->ring[offset] = cur - offset;
}
-#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
-
-#define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- NULL)
-
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index fce22d3f816b..c210625be220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -237,6 +237,20 @@ struct amdgpu_rlc_funcs {
void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id);
int (*init)(struct amdgpu_device *adev);
u32 (*get_csb_size)(struct amdgpu_device *adev);
+
+ /**
+ * @get_csb_buffer: Get the clear state to be put into the hardware.
+ *
+ * The parameter adev is used to get the CS data and other gfx info,
+ * and buffer is the RLC CS pointer
+ *
+ * Sometimes, the user space puts a request to clear the state in the
+ * command buffer; this function provides the clear state that gets put
+ * into the hardware. Note that the driver programs Clear State
+ * Indirect Buffer (CSB) explicitly when it sets up the kernel rings,
+ * and it also provides a pointer to it which is used by the firmware
+ * to load the clear state in some cases.
+ */
void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 529c9696c2f3..6716ac281c49 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -26,6 +26,8 @@
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_3_0_sh_mask.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -76,22 +78,14 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
return 0;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
+ r = amdgpu_sdma_get_index_from_ring(ring, &index);
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- sdma[ring->idx].sdma_meta_data);
- csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- r = amdgpu_sdma_get_index_from_ring(ring, &index);
-
- if (r || index > 31)
- csa_mc_addr = 0;
- else
- csa_mc_addr = amdgpu_csa_vaddr(adev) +
- AMDGPU_CSA_SDMA_OFFSET +
- index * AMDGPU_CSA_SDMA_SIZE;
- }
+ if (r || index > 31)
+ csa_mc_addr = 0;
+ else
+ csa_mc_addr = amdgpu_csa_vaddr(adev) +
+ AMDGPU_CSA_SDMA_OFFSET +
+ index * AMDGPU_CSA_SDMA_SIZE;
return csa_mc_addr;
}
@@ -537,28 +531,38 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin
return false;
}
-/**
- * amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks
- * @funcs: Pointer to the callback structure containing pre_reset and post_reset functions
- *
- * This function allows KFD and AMDGPU to register their own callbacks for handling
- * pre-reset and post-reset operations for engine reset. These are needed because engine
- * reset will stop all queues on that engine.
- */
-void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs)
+static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
{
- if (!funcs)
- return;
-
- /* Ensure the reset_callback_list is initialized */
- if (!adev->sdma.reset_callback_list.next) {
- INIT_LIST_HEAD(&adev->sdma.reset_callback_list);
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+ int r = -EOPNOTSUPP;
+
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(4, 4, 2):
+ case IP_VERSION(4, 4, 4):
+ case IP_VERSION(4, 4, 5):
+ /* For SDMA 4.x, use the existing DPM interface for backward compatibility */
+ r = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 6):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 7):
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ r = sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
+ break;
+ default:
+ break;
}
- /* Initialize the list node in the callback structure */
- INIT_LIST_HEAD(&funcs->list);
- /* Add the callback structure to the global list */
- list_add_tail(&funcs->list, &adev->sdma.reset_callback_list);
+ return r;
}
/**
@@ -566,16 +570,10 @@ void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct
* @adev: Pointer to the AMDGPU device
* @instance_id: ID of the SDMA engine instance to reset
*
- * This function performs the following steps:
- * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state.
- * 2. Resets the specified SDMA engine instance.
- * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state.
- *
* Returns: 0 on success, or a negative error code on failure.
*/
int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
{
- struct sdma_on_reset_funcs *funcs;
int ret = 0;
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
@@ -597,38 +595,18 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
page_sched_stopped = true;
}
- /* Invoke all registered pre_reset callbacks */
- list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
- if (funcs->pre_reset) {
- ret = funcs->pre_reset(adev, instance_id);
- if (ret) {
- dev_err(adev->dev,
- "beforeReset callback failed for instance %u: %d\n",
- instance_id, ret);
- goto exit;
- }
- }
- }
+ if (sdma_instance->funcs->stop_kernel_queue)
+ sdma_instance->funcs->stop_kernel_queue(gfx_ring);
/* Perform the SDMA reset for the specified instance */
- ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
+ ret = amdgpu_sdma_soft_reset(adev, instance_id);
if (ret) {
dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
goto exit;
}
- /* Invoke all registered post_reset callbacks */
- list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
- if (funcs->post_reset) {
- ret = funcs->post_reset(adev, instance_id);
- if (ret) {
- dev_err(adev->dev,
- "afterReset callback failed for instance %u: %d\n",
- instance_id, ret);
- goto exit;
- }
- }
- }
+ if (sdma_instance->funcs->start_kernel_queue)
+ sdma_instance->funcs->start_kernel_queue(gfx_ring);
exit:
/* Restart the scheduler's work queue for the GFX and page rings
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 47d56fd0589f..5605921212f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -50,6 +50,12 @@ enum amdgpu_sdma_irq {
#define NUM_SDMA(x) hweight32(x)
+struct amdgpu_sdma_funcs {
+ int (*stop_kernel_queue)(struct amdgpu_ring *ring);
+ int (*start_kernel_queue)(struct amdgpu_ring *ring);
+ int (*soft_reset_kernel_queue)(struct amdgpu_device *adev, u32 instance_id);
+};
+
struct amdgpu_sdma_instance {
/* SDMA firmware */
const struct firmware *fw;
@@ -68,7 +74,7 @@ struct amdgpu_sdma_instance {
/* track guilty state of GFX and PAGE queues */
bool gfx_guilty;
bool page_guilty;
-
+ const struct amdgpu_sdma_funcs *funcs;
};
enum amdgpu_sdma_ras_memory_id {
@@ -103,13 +109,6 @@ struct amdgpu_sdma_ras {
struct amdgpu_ras_block_object ras_block;
};
-struct sdma_on_reset_funcs {
- int (*pre_reset)(struct amdgpu_device *adev, uint32_t instance_id);
- int (*post_reset)(struct amdgpu_device *adev, uint32_t instance_id);
- /* Linked list node to store this structure in a list; */
- struct list_head list;
-};
-
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
@@ -131,6 +130,8 @@ struct amdgpu_sdma {
uint32_t *ip_dump;
uint32_t supported_reset;
struct list_head reset_callback_list;
+ bool no_user_submission;
+ bool disable_uq;
};
/*
@@ -170,7 +171,6 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs);
int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id);
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index e22cb2b5cd92..3939761be31c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -45,7 +45,11 @@
*/
static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
{
- return AMDGPU_VA_RESERVED_SEQ64_START(adev);
+ u64 addr = AMDGPU_VA_RESERVED_SEQ64_START(adev);
+
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
}
/**
@@ -63,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
+ u64 seq64_addr, va_flags;
struct amdgpu_bo *bo;
struct drm_exec exec;
- u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -88,9 +92,11 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error;
}
- seq64_addr = amdgpu_seq64_get_va_base(adev);
+ seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- AMDGPU_PTE_READABLE);
+ va_flags);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
@@ -156,6 +162,7 @@ error:
*
* @adev: amdgpu_device pointer
* @va: VA to access the seq in process address space
+ * @gpu_addr: GPU address to access the seq
* @cpu_addr: CPU address to access the seq
*
* Alloc a 64 bit memory from seq64 pool.
@@ -163,7 +170,8 @@ error:
* Returns:
* 0 on success or a negative error code on failure
*/
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va,
+ u64 *gpu_addr, u64 **cpu_addr)
{
unsigned long bit_pos;
@@ -172,7 +180,12 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
return -ENOSPC;
__set_bit(bit_pos, adev->seq64.used);
+
*va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+
+ if (gpu_addr)
+ *gpu_addr = bit_pos * sizeof(u64) + adev->seq64.gpu_addr;
+
*cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
return 0;
@@ -233,7 +246,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
*/
r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &adev->seq64.sbo, NULL,
+ &adev->seq64.sbo, &adev->seq64.gpu_addr,
(void **)&adev->seq64.cpu_base_addr);
if (r) {
dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
index 4203b2ab318d..26a249aaaee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -32,13 +32,14 @@
struct amdgpu_seq64 {
struct amdgpu_bo *sbo;
u32 num_sem;
+ u64 gpu_addr;
u64 *cpu_base_addr;
DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS);
};
void amdgpu_seq64_fini(struct amdgpu_device *adev);
int amdgpu_seq64_init(struct amdgpu_device *adev);
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 *gpu_addr, u64 **cpu_addr);
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5576ed0b508f..d6ae9974c952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -249,9 +249,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (resv == NULL)
return -EINVAL;
-
- /* TODO: Use DMA_RESV_USAGE_READ here */
- dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
+ /* Implicitly sync only to KERNEL, WRITE and READ */
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
dma_fence_chain_for_each(f, f) {
struct dma_fence *tmp = dma_fence_chain_contained(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 53b71e9d8076..9c5df35f05b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2081,6 +2081,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
+ amdgpu_doorbell_fini(adev);
+
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3d9e9fdc10b4..4a72c2bbd49e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -767,6 +767,7 @@ FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
static struct attribute *fw_attrs[] = {
&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
@@ -781,7 +782,7 @@ static struct attribute *fw_attrs[] = {
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
&dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
&dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
- NULL
+ &dev_attr_pldm_fw_version.attr, NULL
};
#define to_dev_attr(x) container_of(x, struct device_attribute, attr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4eedd92f000b..9e89c3487be5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -25,6 +25,8 @@
#include "amdgpu_socbb.h"
+#define RS64_FW_UC_START_ADDR_LO 0x3000
+
struct common_firmware_header {
uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
uint32_t header_size_bytes; /* size of just the header in bytes */
@@ -600,6 +602,7 @@ struct amdgpu_firmware {
void *fw_buf_ptr;
uint64_t fw_buf_mc;
+ uint32_t pldm_version;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 0a1ef95b2866..8c6e55b5b967 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -529,6 +529,7 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
pfns[i] = err_data.err_addr[i].retired_page;
}
ret = i;
+ adev->umc.err_addr_cnt = err_data.err_addr_cnt;
out:
kfree(err_data.err_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 857693bcd8d4..29ce6b1d214a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -78,6 +78,18 @@
#define UMC_NPS_SHIFT 40
#define UMC_NPS_MASK 0xffULL
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define RETIRE_FLIP_BITS_NUM 4
+
+struct amdgpu_umc_flip_bits {
+ uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -100,6 +112,7 @@ struct amdgpu_umc_ras {
bool dump_addr);
uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
uint64_t mca_addr, uint64_t retired_page);
+ void (*get_retire_flip_bits)(struct amdgpu_device *adev);
};
struct amdgpu_umc_funcs {
@@ -130,6 +143,10 @@ struct amdgpu_umc {
/* active mask for umc node instance */
unsigned long active_mask;
+
+ struct amdgpu_umc_flip_bits flip_bits;
+
+ unsigned long err_addr_cnt;
};
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
new file mode 100644
index 000000000000..295e7186e156
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -0,0 +1,924 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drm_auth.h>
+#include <drm/drm_exec.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
+{
+ int i;
+ u32 userq_ip_mask = 0;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ userq_ip_mask |= (1 << i);
+ }
+
+ return userq_ip_mask;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->unmap(uq_mgr, queue);
+ if (r)
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ else
+ queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
+ return r;
+}
+
+static int
+amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
+ r = userq_funcs->map(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+ return r;
+}
+
+static void
+amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct dma_fence *f = queue->last_fence;
+ int ret;
+
+ if (f && !dma_fence_is_signaled(f)) {
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0)
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ }
+}
+
+static void
+amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ int queue_id)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ amdgpu_userq_fence_driver_free(queue);
+ idr_remove(&uq_mgr->userq_idr, queue_id);
+ kfree(queue);
+}
+
+int
+amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
+ ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
+
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return ret;
+}
+
+static struct amdgpu_usermode_queue *
+amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
+{
+ return idr_find(&uq_mgr->userq_idr, qid);
+}
+
+void
+amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+retry:
+ /* Flush any pending resume work to create ev_fence */
+ flush_delayed_work(&uq_mgr->resume_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+ mutex_unlock(&uq_mgr->userq_mutex);
+ /*
+ * Looks like there was no pending resume work,
+ * add one now to create a valid eviction fence
+ */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+ goto retry;
+ }
+}
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ bp.type = ttm_bo_type_kernel;
+ bp.size = size;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(userq_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
+ goto free_obj;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
+ goto unresv;
+ }
+
+ r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
+ goto unresv;
+ }
+
+ userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
+ amdgpu_bo_unreserve(userq_obj->obj);
+ memset(userq_obj->cpu_ptr, 0, size);
+ return 0;
+
+unresv:
+ amdgpu_bo_unreserve(userq_obj->obj);
+
+free_obj:
+ amdgpu_bo_unref(&userq_obj->obj);
+ return r;
+}
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj)
+{
+ amdgpu_bo_kunmap(userq_obj->obj);
+ amdgpu_bo_unref(&userq_obj->obj);
+}
+
+uint64_t
+amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp)
+{
+ uint64_t index;
+ struct drm_gem_object *gobj;
+ struct amdgpu_userq_obj *db_obj = db_info->db_obj;
+ int r, db_size;
+
+ gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
+ if (gobj == NULL) {
+ drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ drm_gem_object_put(gobj);
+
+ r = amdgpu_bo_reserve(db_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unref_bo;
+ }
+
+ /* Pin the BO before generating the index, unpin in queue destroy */
+ r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unresv_bo;
+ }
+
+ switch (db_info->queue_type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_HW_IP_COMPUTE:
+ case AMDGPU_HW_IP_DMA:
+ db_size = sizeof(u64);
+ break;
+
+ case AMDGPU_HW_IP_VCN_ENC:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
+ break;
+
+ case AMDGPU_HW_IP_VPE:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
+ break;
+
+ default:
+ drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
+ db_info->queue_type);
+ r = -EINVAL;
+ goto unpin_bo;
+ }
+
+ index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
+ db_info->doorbell_offset, db_size);
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev),
+ "[Usermode queues] doorbell index=%lld\n", index);
+ amdgpu_bo_unreserve(db_obj->obj);
+ return index;
+
+unpin_bo:
+ amdgpu_bo_unpin(db_obj->obj);
+unresv_bo:
+ amdgpu_bo_unreserve(db_obj->obj);
+unref_bo:
+ amdgpu_bo_unref(&db_obj->obj);
+ return r;
+}
+
+static int
+amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ int r = 0;
+
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ queue = amdgpu_userq_find(uq_mgr, queue_id);
+ if (!queue) {
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return -EINVAL;
+ }
+ amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
+ r = amdgpu_bo_reserve(queue->db_obj.obj, true);
+ if (!r) {
+ amdgpu_bo_unpin(queue->db_obj.obj);
+ amdgpu_bo_unreserve(queue->db_obj.obj);
+ }
+ amdgpu_bo_unref(&queue->db_obj.obj);
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
+ mutex_unlock(&uq_mgr->userq_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+static int amdgpu_userq_priority_permit(struct drm_file *filp,
+ int priority)
+{
+ if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+static int
+amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_db_info db_info;
+ bool skip_map_queue;
+ uint64_t index;
+ int qid, r = 0;
+ int priority =
+ (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
+
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ r = amdgpu_userq_priority_permit(filp, priority);
+ if (r)
+ return r;
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ /*
+ * There could be a situation that we are creating a new queue while
+ * the other queues under this UQ_mgr are suspended. So if there is any
+ * resume work pending, wait for it to get done.
+ *
+ * This will also make sure we have a valid eviction fence ready to be used.
+ */
+ mutex_lock(&adev->userq_mutex);
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ uq_funcs = adev->userq_funcs[args->in.ip_type];
+ if (!uq_funcs) {
+ drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
+ args->in.ip_type);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
+ if (!queue) {
+ drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
+ r = -ENOMEM;
+ goto unlock;
+ }
+ queue->doorbell_handle = args->in.doorbell_handle;
+ queue->queue_type = args->in.ip_type;
+ queue->vm = &fpriv->vm;
+ queue->priority = priority;
+
+ db_info.queue_type = queue->queue_type;
+ db_info.doorbell_handle = queue->doorbell_handle;
+ db_info.db_obj = &queue->db_obj;
+ db_info.doorbell_offset = args->in.doorbell_offset;
+
+ /* Convert relative doorbell offset into absolute doorbell index */
+ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
+ if (index == (uint64_t)-EINVAL) {
+ drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
+ kfree(queue);
+ goto unlock;
+ }
+
+ queue->doorbell_index = index;
+ xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
+ r = amdgpu_userq_fence_driver_alloc(adev, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
+ goto unlock;
+ }
+
+ r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to create Queue\n");
+ amdgpu_userq_fence_driver_free(queue);
+ kfree(queue);
+ goto unlock;
+ }
+
+
+ qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
+ if (qid < 0) {
+ drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+ /* don't map the queue if scheduling is halted */
+ if (adev->userq_halt_for_enforce_isolation &&
+ ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
+ skip_map_queue = true;
+ else
+ skip_map_queue = false;
+ if (!skip_map_queue) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map Queue\n");
+ idr_remove(&uq_mgr->userq_idr, qid);
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ goto unlock;
+ }
+ }
+
+
+ args->out.queue_id = qid;
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+
+ return r;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
+ return -EINVAL;
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
+ if (args->in.ip_type ||
+ args->in.doorbell_handle ||
+ args->in.doorbell_offset ||
+ args->in.flags ||
+ args->in.queue_va ||
+ args->in.queue_size ||
+ args->in.rptr_va ||
+ args->in.wptr_va ||
+ args->in.wptr_va ||
+ args->in.mqd ||
+ args->in.mqd_size)
+ return -EINVAL;
+ r = amdgpu_userq_destroy(filp, args->in.queue_id);
+ if (r)
+ drm_file_err(filp, "Failed to destroy usermode queue\n");
+ break;
+
+ default:
+ drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int ret;
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ DRM_ERROR("Fail to validate\n");
+
+ return ret;
+}
+
+static int
+amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_va *bo_va;
+ struct ww_acquire_ctx *ticket;
+ struct drm_exec exec;
+ struct amdgpu_bo *bo;
+ struct dma_resv *resv;
+ bool clear, unlock;
+ int ret = 0;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = amdgpu_vm_lock_pd(vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret)) {
+ drm_file_err(uq_mgr->file, "Failed to lock PD\n");
+ goto unlock_all;
+ }
+
+ /* Lock the done list */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status) {
+ bo = bo_va->base.bo;
+ if (!bo)
+ continue;
+
+ ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+ }
+ }
+
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ /* Per VM BOs never need to bo cleared in the page tables */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ goto unlock_all;
+ spin_lock(&vm->status_lock);
+ }
+
+ ticket = &exec.ticket;
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
+ base.vm_status);
+ resv = bo_va->base.bo->tbo.base.resv;
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ ret = amdgpu_userq_validate_vm_bo(NULL, bo);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BO\n");
+ goto unlock_all;
+ }
+
+ /* Try to reserve the BO to avoid clearing its ptes */
+ if (!adev->debug_vm && dma_resv_trylock(resv)) {
+ clear = false;
+ unlock = true;
+ /* The caller is already holding the reservation lock */
+ } else if (dma_resv_locking_ctx(resv) == ticket) {
+ clear = false;
+ unlock = false;
+ /* Somebody else is using the BO right now */
+ } else {
+ clear = true;
+ unlock = false;
+ }
+
+ ret = amdgpu_vm_bo_update(adev, bo_va, clear);
+
+ if (unlock)
+ dma_resv_unlock(resv);
+ if (ret)
+ goto unlock_all;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
+
+unlock_all:
+ drm_exec_fini(&exec);
+ return ret;
+}
+
+static void amdgpu_userq_restore_worker(struct work_struct *work)
+{
+ struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ int ret;
+
+ flush_work(&fpriv->evf_mgr.suspend_work.work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ ret = amdgpu_userq_validate_bos(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
+ goto unlock;
+ }
+
+ ret = amdgpu_userq_restore_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static int
+amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Try to unmap all the queues in this process ctx */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id, ret;
+
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ struct dma_fence *f = queue->last_fence;
+
+ if (!f || dma_fence_is_signaled(f))
+ continue;
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0) {
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ int ret;
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+
+ /* Wait for any pending userqueue fence work to finish */
+ ret = amdgpu_userq_wait_for_signal(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
+ return;
+ }
+
+ ret = amdgpu_userq_evict_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
+ return;
+ }
+
+ /* Signal current eviction fence */
+ amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
+
+ if (evf_mgr->fd_closing) {
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ return;
+ }
+
+ /* Schedule a resume work */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+}
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev)
+{
+ mutex_init(&userq_mgr->userq_mutex);
+ idr_init_base(&userq_mgr->userq_idr, 1);
+ userq_mgr->adev = adev;
+ userq_mgr->file = file_priv;
+
+ mutex_lock(&adev->userq_mutex);
+ list_add(&userq_mgr->list, &adev->userq_mgr_list);
+ mutex_unlock(&adev->userq_mutex);
+
+ INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
+ return 0;
+}
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
+{
+ struct amdgpu_device *adev = userq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ uint32_t queue_id;
+
+ cancel_delayed_work_sync(&userq_mgr->resume_work);
+
+ mutex_lock(&adev->userq_mutex);
+ mutex_lock(&userq_mgr->userq_mutex);
+ idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
+ amdgpu_userq_unmap_helper(userq_mgr, queue);
+ amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
+ }
+
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ if (uqm == userq_mgr) {
+ list_del(&uqm->list);
+ break;
+ }
+ }
+ idr_destroy(&userq_mgr->userq_idr);
+ mutex_unlock(&userq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+ mutex_destroy(&userq_mgr->userq_mutex);
+}
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_resume(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already stopped!\n");
+ adev->userq_halt_for_enforce_isolation = true;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (!adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already started!\n");
+ adev->userq_halt_for_enforce_isolation = false;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
new file mode 100644
index 000000000000..ec040c2fd6c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_USERQ_H_
+#define AMDGPU_USERQ_H_
+#include "amdgpu_eviction_fence.h"
+
+#define AMDGPU_MAX_USERQ_COUNT 512
+
+#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
+#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
+#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
+
+enum amdgpu_userq_state {
+ AMDGPU_USERQ_STATE_UNMAPPED = 0,
+ AMDGPU_USERQ_STATE_MAPPED,
+ AMDGPU_USERQ_STATE_PREEMPTED,
+ AMDGPU_USERQ_STATE_HUNG,
+};
+
+struct amdgpu_mqd_prop;
+
+struct amdgpu_userq_obj {
+ void *cpu_ptr;
+ uint64_t gpu_addr;
+ struct amdgpu_bo *obj;
+};
+
+struct amdgpu_usermode_queue {
+ int queue_type;
+ enum amdgpu_userq_state state;
+ uint64_t doorbell_handle;
+ uint64_t doorbell_index;
+ uint64_t flags;
+ struct amdgpu_mqd_prop *userq_prop;
+ struct amdgpu_userq_mgr *userq_mgr;
+ struct amdgpu_vm *vm;
+ struct amdgpu_userq_obj mqd;
+ struct amdgpu_userq_obj db_obj;
+ struct amdgpu_userq_obj fw_obj;
+ struct amdgpu_userq_obj wptr_obj;
+ struct xarray fence_drv_xa;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *last_fence;
+ u32 xcp_id;
+ int priority;
+};
+
+struct amdgpu_userq_funcs {
+ int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args,
+ struct amdgpu_usermode_queue *queue);
+ void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *uq);
+ int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*map)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+};
+
+/* Usermode queues for gfx */
+struct amdgpu_userq_mgr {
+ struct idr userq_idr;
+ struct mutex userq_mutex;
+ struct amdgpu_device *adev;
+ struct delayed_work resume_work;
+ struct list_head list;
+ struct drm_file *file;
+};
+
+struct amdgpu_db_info {
+ uint64_t doorbell_handle;
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ struct amdgpu_userq_obj *db_obj;
+};
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev);
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size);
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj);
+
+void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
+
+void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp);
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev);
+int amdgpu_userq_resume(struct amdgpu_device *adev);
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
new file mode 100644
index 000000000000..fc4d0d42e223
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -0,0 +1,968 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_syncobj.h>
+
+#include "amdgpu.h"
+#include "amdgpu_userq_fence.h"
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops;
+static struct kmem_cache *amdgpu_userq_fence_slab;
+
+int amdgpu_userq_fence_slab_init(void)
+{
+ amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
+ sizeof(struct amdgpu_userq_fence),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!amdgpu_userq_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amdgpu_userq_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(amdgpu_userq_fence_slab);
+}
+
+static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
+{
+ if (!f || f->ops != &amdgpu_userq_fence_ops)
+ return NULL;
+
+ return container_of(f, struct amdgpu_userq_fence, base);
+}
+
+static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ return le64_to_cpu(*fence_drv->cpu_addr);
+}
+
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long flags;
+ int r;
+
+ fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
+ if (!fence_drv)
+ return -ENOMEM;
+
+ /* Acquire seq64 memory */
+ r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
+ &fence_drv->cpu_addr);
+ if (r)
+ goto free_fence_drv;
+
+ memset(fence_drv->cpu_addr, 0, sizeof(u64));
+
+ kref_init(&fence_drv->refcount);
+ INIT_LIST_HEAD(&fence_drv->fences);
+ spin_lock_init(&fence_drv->fence_list_lock);
+
+ fence_drv->adev = adev;
+ fence_drv->context = dma_fence_context_alloc(1);
+ get_task_comm(fence_drv->timeline_name, current);
+
+ xa_lock_irqsave(&adev->userq_xa, flags);
+ r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
+ fence_drv, GFP_KERNEL));
+ xa_unlock_irqrestore(&adev->userq_xa, flags);
+ if (r)
+ goto free_seq64;
+
+ userq->fence_drv = fence_drv;
+
+ return 0;
+
+free_seq64:
+ amdgpu_seq64_free(adev, fence_drv->va);
+free_fence_drv:
+ kfree(fence_drv);
+
+ return r;
+}
+
+static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long index;
+
+ if (xa_empty(xa))
+ return;
+
+ xa_lock(xa);
+ xa_for_each(xa, index, fence_drv) {
+ __xa_erase(xa, index);
+ amdgpu_userq_fence_driver_put(fence_drv);
+ }
+
+ xa_unlock(xa);
+}
+
+void
+amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
+{
+ amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
+ xa_destroy(&userq->fence_drv_xa);
+ /* Drop the fence_drv reference held by user queue */
+ amdgpu_userq_fence_driver_put(userq->fence_drv);
+}
+
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ struct amdgpu_userq_fence *userq_fence, *tmp;
+ struct dma_fence *fence;
+ u64 rptr;
+ int i;
+
+ if (!fence_drv)
+ return;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+
+ spin_lock(&fence_drv->fence_list_lock);
+ list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
+ fence = &userq_fence->base;
+
+ if (rptr < fence->seqno)
+ break;
+
+ dma_fence_signal(fence);
+
+ for (i = 0; i < userq_fence->fence_drv_array_count; i++)
+ amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
+
+ list_del(&userq_fence->link);
+ dma_fence_put(fence);
+ }
+ spin_unlock(&fence_drv->fence_list_lock);
+}
+
+void amdgpu_userq_fence_driver_destroy(struct kref *ref)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
+ struct amdgpu_userq_fence_driver,
+ refcount);
+ struct amdgpu_userq_fence_driver *xa_fence_drv;
+ struct amdgpu_device *adev = fence_drv->adev;
+ struct amdgpu_userq_fence *fence, *tmp;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long index, flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
+ f = &fence->base;
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_set_error(f, -ECANCELED);
+ dma_fence_signal(f);
+ }
+
+ list_del(&fence->link);
+ dma_fence_put(f);
+ }
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ xa_lock_irqsave(xa, flags);
+ xa_for_each(xa, index, xa_fence_drv)
+ if (xa_fence_drv == fence_drv)
+ __xa_erase(xa, index);
+ xa_unlock_irqrestore(xa, flags);
+
+ /* Free seq64 memory */
+ amdgpu_seq64_free(adev, fence_drv->va);
+ kfree(fence_drv);
+}
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_get(&fence_drv->refcount);
+}
+
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
+}
+
+static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
+{
+ *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
+ return *userq_fence ? 0 : -ENOMEM;
+}
+
+static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
+ struct amdgpu_userq_fence *userq_fence,
+ u64 seq, struct dma_fence **f)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *fence;
+ unsigned long flags;
+
+ fence_drv = userq->fence_drv;
+ if (!fence_drv)
+ return -EINVAL;
+
+ spin_lock_init(&userq_fence->lock);
+ INIT_LIST_HEAD(&userq_fence->link);
+ fence = &userq_fence->base;
+ userq_fence->fence_drv = fence_drv;
+
+ dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+ dma_fence_get(fence);
+
+ if (!xa_empty(&userq->fence_drv_xa)) {
+ struct amdgpu_userq_fence_driver *stored_fence_drv;
+ unsigned long index, count = 0;
+ int i = 0;
+
+ xa_lock(&userq->fence_drv_xa);
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
+ count++;
+
+ userq_fence->fence_drv_array =
+ kvmalloc_array(count,
+ sizeof(struct amdgpu_userq_fence_driver *),
+ GFP_ATOMIC);
+
+ if (userq_fence->fence_drv_array) {
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
+ userq_fence->fence_drv_array[i] = stored_fence_drv;
+ __xa_erase(&userq->fence_drv_xa, index);
+ i++;
+ }
+ }
+
+ userq_fence->fence_drv_array_count = i;
+ xa_unlock(&userq->fence_drv_xa);
+ } else {
+ userq_fence->fence_drv_array = NULL;
+ userq_fence->fence_drv_array_count = 0;
+ }
+
+ /* Check if hardware has already processed the job */
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ if (!dma_fence_is_signaled_locked(fence))
+ list_add_tail(&userq_fence->link, &fence_drv->fences);
+ else
+ dma_fence_put(fence);
+
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ *f = fence;
+
+ return 0;
+}
+
+static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
+{
+ return "amdgpu_userq_fence";
+}
+
+static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+
+ return fence->fence_drv->timeline_name;
+}
+
+static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 rptr, wptr;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+ wptr = fence->base.seqno;
+
+ if (rptr >= wptr)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
+ struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
+ struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
+
+ /* Release the fence driver reference */
+ amdgpu_userq_fence_driver_put(fence_drv);
+
+ kvfree(userq_fence->fence_drv_array);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+}
+
+static void amdgpu_userq_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_userq_fence_free);
+}
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = amdgpu_userq_fence_get_driver_name,
+ .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
+ .signaled = amdgpu_userq_fence_signaled,
+ .release = amdgpu_userq_fence_release,
+};
+
+/**
+ * amdgpu_userq_fence_read_wptr - Read the userq wptr value
+ *
+ * @queue: user mode queue structure pointer
+ * @wptr: write pointer value
+ *
+ * Read the wptr value from userq's MQD. The userq signal IOCTL
+ * creates a dma_fence for the shared buffers that expects the
+ * RPTR value written to seq64 memory >= WPTR.
+ *
+ * Returns wptr value on success, error on failure.
+ */
+static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
+ u64 *wptr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ u64 addr, *ptr;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ addr = queue->userq_prop->wptr_gpu_addr;
+ addr &= AMDGPU_GMC_HOLE_MASK;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
+ if (!mapping) {
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
+ return -EINVAL;
+ }
+
+ bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("Failed to reserve userqueue wptr bo");
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, (void **)&ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the userqueue wptr bo");
+ goto map_error;
+ }
+
+ *wptr = le64_to_cpu(*ptr);
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+
+map_error:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return r;
+}
+
+static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
+{
+ dma_fence_put(fence);
+}
+
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct drm_amdgpu_userq_signal *args = data;
+ struct drm_gem_object **gobj_write = NULL;
+ struct drm_gem_object **gobj_read = NULL;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_fence *userq_fence;
+ struct drm_syncobj **syncobj = NULL;
+ u32 *bo_handles_write, num_write_bo_handles;
+ u32 *syncobj_handles, num_syncobj_handles;
+ u32 *bo_handles_read, num_read_bo_handles;
+ int r, i, entry, rentry, wentry;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ u64 wptr;
+
+ num_syncobj_handles = args->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
+ sizeof(u32) * num_syncobj_handles);
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ /* Array of pointers to the looked up syncobjs */
+ syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
+ if (!syncobj) {
+ r = -ENOMEM;
+ goto free_syncobj_handles;
+ }
+
+ for (entry = 0; entry < num_syncobj_handles; entry++) {
+ syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
+ if (!syncobj[entry]) {
+ r = -ENOENT;
+ goto free_syncobj;
+ }
+ }
+
+ num_read_bo_handles = args->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
+ sizeof(u32) * num_read_bo_handles);
+ if (IS_ERR(bo_handles_read)) {
+ r = PTR_ERR(bo_handles_read);
+ goto free_syncobj;
+ }
+
+ /* Array of pointers to the GEM read objects */
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_bo_handles_read;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ num_write_bo_handles = args->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
+ sizeof(u32) * num_write_bo_handles);
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto put_gobj_read;
+ }
+
+ /* Array of pointers to the GEM write objects */
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto free_bo_handles_write;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ /* Retrieve the user queue */
+ queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ if (!queue) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+
+ r = amdgpu_userq_fence_read_wptr(queue, &wptr);
+ if (r)
+ goto put_gobj_write;
+
+ r = amdgpu_userq_fence_alloc(&userq_fence);
+ if (r)
+ goto put_gobj_write;
+
+ /* We are here means UQ is active, make sure the eviction fence is valid */
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ /* Create a new fence */
+ r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
+ if (r) {
+ mutex_unlock(&userq_mgr->userq_mutex);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+ goto put_gobj_write;
+ }
+
+ dma_fence_put(queue->last_fence);
+ queue->last_fence = dma_fence_get(fence);
+ mutex_unlock(&userq_mgr->userq_mutex);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+ }
+
+ for (i = 0; i < num_read_bo_handles; i++) {
+ if (!gobj_read || !gobj_read[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_read[i]->resv, fence,
+ DMA_RESV_USAGE_READ);
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ if (!gobj_write || !gobj_write[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_write[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ /* Add the created fence to syncobj/BO's */
+ for (i = 0; i < num_syncobj_handles; i++)
+ drm_syncobj_replace_fence(syncobj[i], fence);
+
+ /* drop the reference acquired in fence creation function */
+ dma_fence_put(fence);
+
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+free_syncobj:
+ while (entry-- > 0)
+ if (syncobj[entry])
+ drm_syncobj_put(syncobj[entry]);
+ kfree(syncobj);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+
+ return r;
+}
+
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
+ u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
+ struct drm_amdgpu_userq_fence_info *fence_info = NULL;
+ struct drm_amdgpu_userq_wait *wait_info = data;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_usermode_queue *waitq;
+ struct drm_gem_object **gobj_write;
+ struct drm_gem_object **gobj_read;
+ struct dma_fence **fences = NULL;
+ u16 num_points, num_fences = 0;
+ int r, i, rentry, wentry, cnt;
+ struct drm_exec exec;
+
+ num_read_bo_handles = wait_info->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
+ sizeof(u32) * num_read_bo_handles);
+ if (IS_ERR(bo_handles_read))
+ return PTR_ERR(bo_handles_read);
+
+ num_write_bo_handles = wait_info->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
+ sizeof(u32) * num_write_bo_handles);
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto free_bo_handles_read;
+ }
+
+ num_syncobj = wait_info->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
+ sizeof(u32) * num_syncobj);
+ if (IS_ERR(syncobj_handles)) {
+ r = PTR_ERR(syncobj_handles);
+ goto free_bo_handles_write;
+ }
+
+ num_points = wait_info->num_syncobj_timeline_handles;
+ timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_handles)) {
+ r = PTR_ERR(timeline_handles);
+ goto free_syncobj_handles;
+ }
+
+ timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_points)) {
+ r = PTR_ERR(timeline_points);
+ goto free_timeline_handles;
+ }
+
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_timeline_points;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto put_gobj_read;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+ }
+
+ if (!wait_info->num_fences) {
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ num_fences++;
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Count syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ num_fences++;
+ dma_fence_put(fence);
+ }
+
+ /* Count GEM objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence)
+ num_fences++;
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence)
+ num_fences++;
+ }
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve userq_fence_info. If num_fences = 0 we skip filling
+ * userq_fence_info and return the actual number of fences on
+ * args->num_fences.
+ */
+ wait_info->num_fences = num_fences;
+ } else {
+ /* Array of fence info */
+ fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
+ if (!fence_info) {
+ r = -ENOMEM;
+ goto exec_fini;
+ }
+
+ /* Array of fences */
+ fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ r = -ENOMEM;
+ goto free_fence_info;
+ }
+
+ /* Retrieve GEM read objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ /* Retrieve GEM write objects fence */
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ dma_fence_unwrap_for_each(f, &iter, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ dma_fence_get(f);
+ fences[num_fences++] = f;
+ }
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Retrieve syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ }
+
+ /*
+ * Keep only the latest fences to reduce the number of values
+ * given back to userspace.
+ */
+ num_fences = dma_fence_dedup_array(fences, num_fences);
+
+ waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ if (!waitq) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ for (i = 0, cnt = 0; i < num_fences; i++) {
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence *userq_fence;
+ u32 index;
+
+ userq_fence = to_amdgpu_userq_fence(fences[i]);
+ if (!userq_fence) {
+ /*
+ * Just waiting on other driver fences should
+ * be good for now
+ */
+ r = dma_fence_wait(fences[i], true);
+ if (r) {
+ dma_fence_put(fences[i]);
+ goto free_fences;
+ }
+
+ dma_fence_put(fences[i]);
+ continue;
+ }
+
+ fence_drv = userq_fence->fence_drv;
+ /*
+ * We need to make sure the user queue release their reference
+ * to the fence drivers at some point before queue destruction.
+ * Otherwise, we would gather those references until we don't
+ * have any more space left and crash.
+ */
+ r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
+ xa_limit_32b, GFP_KERNEL);
+ if (r)
+ goto free_fences;
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+
+ /* Store drm syncobj's gpu va address and value */
+ fence_info[cnt].va = fence_drv->va;
+ fence_info[cnt].value = fences[i]->seqno;
+
+ dma_fence_put(fences[i]);
+ /* Increment the actual userq fence count */
+ cnt++;
+ }
+
+ wait_info->num_fences = cnt;
+ /* Copy userq fence info to user space */
+ if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
+ fence_info, wait_info->num_fences * sizeof(*fence_info))) {
+ r = -EFAULT;
+ goto free_fences;
+ }
+
+ kfree(fences);
+ kfree(fence_info);
+ }
+
+ drm_exec_fini(&exec);
+ for (i = 0; i < num_read_bo_handles; i++)
+ drm_gem_object_put(gobj_read[i]);
+ kfree(gobj_read);
+
+ for (i = 0; i < num_write_bo_handles; i++)
+ drm_gem_object_put(gobj_write[i]);
+ kfree(gobj_write);
+
+ kfree(timeline_points);
+ kfree(timeline_handles);
+ kfree(syncobj_handles);
+ kfree(bo_handles_write);
+ kfree(bo_handles_read);
+
+ return 0;
+
+free_fences:
+ while (num_fences-- > 0)
+ dma_fence_put(fences[num_fences]);
+ kfree(fences);
+free_fence_info:
+ kfree(fence_info);
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_timeline_points:
+ kfree(timeline_points);
+free_timeline_handles:
+ kfree(timeline_handles);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
new file mode 100644
index 000000000000..97a125ab8a78
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_USERQ_FENCE_H__
+#define __AMDGPU_USERQ_FENCE_H__
+
+#include <linux/types.h>
+
+#include "amdgpu_userq.h"
+
+struct amdgpu_userq_fence {
+ struct dma_fence base;
+ /*
+ * This lock is necessary to synchronize the
+ * userqueue dma fence operations.
+ */
+ spinlock_t lock;
+ struct list_head link;
+ unsigned long fence_drv_array_count;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence_driver **fence_drv_array;
+};
+
+struct amdgpu_userq_fence_driver {
+ struct kref refcount;
+ u64 va;
+ u64 gpu_addr;
+ u64 *cpu_addr;
+ u64 context;
+ /*
+ * This lock is necesaary to synchronize the access
+ * to the fences list by the fence driver.
+ */
+ spinlock_t fence_list_lock;
+ struct list_head fences;
+ struct amdgpu_device *adev;
+ char timeline_name[TASK_COMM_LEN];
+};
+
+int amdgpu_userq_fence_slab_init(void);
+void amdgpu_userq_fence_slab_fini(void);
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_destroy(struct kref *ref);
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 1991dd3d1056..c8885c3d54b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -353,9 +353,9 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
- /* err_event_athub will corrupt VCPU buffer, so we need to
+ /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
- if (in_ras_intr)
+ if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
return 0;
return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 0bb8cbe0dcc0..13f0cdeb59c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -1323,6 +1323,9 @@ static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bo
{
struct amdgpu_virt *virt = &adev->virt;
+ if (!virt->ops || !virt->ops->req_ras_err_count)
+ return -EOPNOTSUPP;
+
/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
* will ignore incoming guest messages. Ratelimit the guest messages to
* prevent guest self DOS.
@@ -1378,14 +1381,16 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
used_size = host_telemetry->header.used_size;
if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
- return 0;
+ return -EINVAL;
cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
if (!cper_dump)
return -ENOMEM;
- if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0))
+ if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) {
+ ret = -EINVAL;
goto out;
+ }
*more = cper_dump->more;
@@ -1425,7 +1430,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
int ret = 0;
uint32_t more = 0;
- if (!amdgpu_sriov_ras_cper_en(adev))
+ if (!virt->ops || !virt->ops->req_ras_cper_dump)
return -EOPNOTSUPP;
do {
@@ -1434,7 +1439,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
adev, virt->fw_reserve.ras_telemetry, &more);
else
ret = 0;
- } while (more);
+ } while (more && !ret);
return ret;
}
@@ -1444,6 +1449,9 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
struct amdgpu_virt *virt = &adev->virt;
int ret = 0;
+ if (!amdgpu_sriov_ras_cper_en(adev))
+ return -EOPNOTSUPP;
+
if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
down_read_trylock(&adev->reset_domain->sem)) {
mutex_lock(&virt->ras.ras_telemetry_mutex);
@@ -1480,3 +1488,16 @@ bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
return true;
}
+
+/*
+ * amdgpu_virt_request_bad_pages() - request bad pages
+ * @adev: amdgpu device.
+ * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region
+ */
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_bad_pages)
+ virt->ops->req_bad_pages(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index df03dba67ab8..577c6194db78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -97,6 +97,7 @@ struct amdgpu_virt_ops {
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
int (*req_ras_err_count)(struct amdgpu_device *adev);
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
+ int (*req_bad_pages)(struct amdgpu_device *adev);
};
/*
@@ -146,11 +147,13 @@ enum AMDGIM_FEATURE_FLAG {
enum AMDGIM_REG_ACCESS_FLAG {
/* Use PSP to program IH_RB_CNTL */
- AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
+ AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
/* Use RLC to program MMHUB regs */
- AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
+ AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
/* Use RLC to program GC regs */
- AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ /* Use PSP to program L1_TLB_CNTL*/
+ AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
};
struct amdgim_pf2vf_info_v1 {
@@ -260,7 +263,10 @@ struct amdgpu_virt {
uint32_t reg_val_offs;
struct amdgpu_irq_src ack_irq;
struct amdgpu_irq_src rcv_irq;
+
struct work_struct flr_work;
+ struct work_struct bad_pages_work;
+
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
@@ -330,6 +336,10 @@ struct amdgpu_video_codec_info;
(amdgpu_sriov_vf((adev)) && \
((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+#define amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN)))
+
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
@@ -423,4 +433,5 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ce52b4d75e94..3911c78f8282 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -787,7 +787,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
- cleaner_shader_needed = adev->gfx.enable_cleaner_shader &&
+ cleaner_shader_needed = job->run_cleaner_shader &&
+ adev->gfx.enable_cleaner_shader &&
ring->funcs->emit_cleaner_shader && job->base.s_fence &&
&job->base.s_fence->scheduled == isolation->spearhead;
@@ -817,7 +818,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
- if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
+ if (ring->funcs->emit_gds_switch &&
gds_switch_needed) {
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index 23b6f7a4aa4a..b03c3895897b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -709,10 +709,10 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
struct amdgpu_xcp_cfg *xcp_cfg;
int i;
- if (!adev->xcp_mgr)
+ if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
return;
- xcp_cfg = adev->xcp_mgr->xcp_cfg;
+ xcp_cfg = adev->xcp_mgr->xcp_cfg;
for (i = 0; i < xcp_cfg->num_res; i++) {
xcp_res = &xcp_cfg->xcp_res[i];
kobject_put(&xcp_res->kobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 477424472bbe..f51ef4cf16e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -296,15 +296,27 @@ static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num)
{
- const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070;
- const int xgmi_inst = 2;
- u32 link_inst;
+ const u32 smn_xgmi_6_4_pcs_state_hist1[2] = { 0x11a00070, 0x11b00070 };
+ const u32 smn_xgmi_6_4_1_pcs_state_hist1[2] = { 0x12100070,
+ 0x11b00070 };
+ u32 i, n;
u64 addr;
- link_inst = global_link_num % xgmi_inst;
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ n = ARRAY_SIZE(smn_xgmi_6_4_pcs_state_hist1);
+ addr = smn_xgmi_6_4_pcs_state_hist1[global_link_num % n];
+ break;
+ case IP_VERSION(6, 4, 1):
+ n = ARRAY_SIZE(smn_xgmi_6_4_1_pcs_state_hist1);
+ addr = smn_xgmi_6_4_1_pcs_state_hist1[global_link_num % n];
+ break;
+ default:
+ return U32_MAX;
+ }
- addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) +
- adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst);
+ i = global_link_num / n;
+ addr += adev->asic_funcs->encode_ext_smn_addressing(i);
return RREG32_PCIE_EXT(addr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index d6ac2652f0ac..92ca13097aaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -109,10 +109,11 @@ union amd_sriov_msg_feature_flags {
union amd_sriov_reg_access_flags {
struct {
- uint32_t vf_reg_access_ih : 1;
- uint32_t vf_reg_access_mmhub : 1;
- uint32_t vf_reg_access_gc : 1;
- uint32_t reserved : 29;
+ uint32_t vf_reg_access_ih : 1;
+ uint32_t vf_reg_access_mmhub : 1;
+ uint32_t vf_reg_access_gc : 1;
+ uint32_t vf_reg_access_l1_tlb_cntl : 1;
+ uint32_t reserved : 28;
} flags;
uint32_t all;
};
@@ -330,6 +331,7 @@ enum amd_sriov_mailbox_request_message {
MB_REQ_MSG_RAS_POISON = 202,
MB_REQ_RAS_ERROR_COUNT = 203,
MB_REQ_RAS_CPER_DUMP = 204,
+ MB_REQ_RAS_BAD_PAGES = 205,
};
/* mailbox message send from host to guest */
@@ -347,6 +349,9 @@ enum amd_sriov_mailbox_response_message {
MB_RES_MSG_GPU_RMA = 10,
MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
MB_REQ_RAS_CPER_DUMP_READY = 14,
+ MB_RES_MSG_RAS_BAD_PAGES_READY = 15,
+ MB_RES_MSG_RAS_BAD_PAGES_NOTIFICATION = 16,
+ MB_RES_MSG_UNRECOV_ERR_NOTIFICATION = 17,
MB_RES_MSG_TEXT_MESSAGE = 255
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index ae071985f26e..1c083304ae77 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -448,53 +448,71 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
return 0;
}
-static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
- int mode,
- struct amdgpu_xcp_cfg *xcp_cfg)
+static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int px_mode, int *num_xcp,
+ uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int max_res[AMDGPU_XCP_RES_MAX] = {};
- bool res_lt_xcp;
- int num_xcp, i;
- u16 nps_modes;
- if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
- max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
- max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
- max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
- max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
-
- switch (mode) {
+ switch (px_mode) {
case AMDGPU_SPX_PARTITION_MODE:
- num_xcp = 1;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ *num_xcp = 1;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
break;
case AMDGPU_DPX_PARTITION_MODE:
- num_xcp = 2;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS2_PARTITION_MODE);
+ *num_xcp = 2;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_TPX_PARTITION_MODE:
- num_xcp = 3;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 3;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_QPX_PARTITION_MODE:
- num_xcp = 4;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 4;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
- num_xcp = NUM_XCC(adev->gfx.xcc_mask);
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = NUM_XCC(adev->gfx.xcc_mask);
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (amdgpu_sriov_vf(adev))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
return -EINVAL;
}
+ return 0;
+}
+
+static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int max_res[AMDGPU_XCP_RES_MAX] = {};
+ bool res_lt_xcp;
+ int num_xcp, i, r;
+ u16 nps_modes;
+
+ if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ return -EINVAL;
+
+ max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
+ max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
+ max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
+ max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
+
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
+ if (r)
+ return r;
+
xcp_cfg->compatible_nps_modes =
(adev->gmc.supported_nps_modes & nps_modes);
xcp_cfg->num_res = ARRAY_SIZE(max_res);
@@ -543,30 +561,31 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
enum amdgpu_gfx_partition mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int num_xcc, num_xccs_per_xcp;
+ int num_xcc, num_xccs_per_xcp, r;
+ int num_xcp, nps_mode;
+ u16 supp_nps_modes;
+ bool comp_mode;
+
+ nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
+ &supp_nps_modes);
+ if (r)
+ return false;
+ comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
+ return comp_mode && num_xcc > 0;
case AMDGPU_DPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
+ return comp_mode && (num_xcc % 4) == 0;
case AMDGPU_TPX_PARTITION_MODE:
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 3) &&
- ((num_xcc % 3) == 0);
+ return comp_mode && ((num_xcc % 3) == 0);
case AMDGPU_QPX_PARTITION_MODE:
num_xccs_per_xcp = num_xcc / 4;
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 4) &&
- (num_xccs_per_xcp >= 2);
+ return comp_mode && (num_xccs_per_xcp >= 2);
case AMDGPU_CPX_PARTITION_MODE:
- /* (num_xcc > 1) because 1 XCC is considered SPX, not CPX.
- * (num_xcc % adev->gmc.num_mem_partitions) == 0 because
- * num_compute_partitions can't be less than num_mem_partitions
- */
- return ((num_xcc > 1) &&
- (num_xcc % adev->gmc.num_mem_partitions) == 0);
+ return comp_mode && (num_xcc > 1);
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 81d195d366ce..427b073de2fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1444,6 +1444,7 @@ static void atom_get_vbios_pn(struct atom_context *ctx)
if (vbios_str == NULL)
vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
}
+ OPTIMIZER_HIDE_VAR(vbios_str);
if (vbios_str != NULL && *vbios_str == 0)
vbios_str++;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 521b9faab180..492813ab1b54 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect
u8 link_status[DP_LINK_STATUS_SIZE];
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 508cea965983..9e8715b4739d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -56,6 +56,8 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
+u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
@@ -67,9 +69,6 @@ MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
-u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
-
-
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
@@ -993,14 +992,9 @@ static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block)
static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block)
{
- int r;
struct amdgpu_device *adev = ip_block->adev;
- r = cik_sdma_start(adev);
- if (r)
- return r;
-
- return r;
+ return cik_sdma_start(adev);
}
static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1040,14 +1034,10 @@ static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block)
static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
- SRBM_STATUS2__SDMA1_BUSY_MASK);
-
- if (!tmp)
+ if (cik_sdma_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 279288365940..8aca4f2734f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -60,9 +60,6 @@
#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
-#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
-#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
-
#define PIPEID(x) ((x) << 0)
#define MEID(x) ((x) << 2)
#define VMID(x) ((x) << 4)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index df401aded662..bf7c22f81cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3075,7 +3075,7 @@ static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3227,7 +3227,7 @@ static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 80f01c3989cd..47e05783c4a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3206,7 +3206,7 @@ static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3358,7 +3358,7 @@ static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -3488,8 +3488,7 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.set_powergating_state = dce_v11_0_set_powergating_state,
};
-static void
-dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 255c70959343..276c025c4c03 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -287,7 +287,7 @@ static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -412,7 +412,7 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
{
if (!render)
WREG32(mmVGA_RENDER_CONTROL,
- RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
+ RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK);
}
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
@@ -1011,16 +1011,16 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* select wm A */
arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp = arb_control3;
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(1);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* select wm B */
tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(2);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
@@ -1089,7 +1089,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
}
WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
- DC_LB_MEMORY_CONFIG(tmp));
+ (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT));
WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
(buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
@@ -1306,6 +1306,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
@@ -1327,6 +1328,11 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
@@ -1348,7 +1354,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
+ u32 value = 0;
u8 stereo_freqs = 0;
int max_channels = -1;
int j;
@@ -1358,12 +1364,12 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
max_channels = sad->channels;
}
@@ -1374,13 +1380,13 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
}
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
+ value |= (stereo_freqs <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
+
+ WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
}
kfree(sads);
-
}
static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
@@ -1886,7 +1892,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
- u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
+ u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
@@ -1926,76 +1932,76 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
switch (target_fb->format->format) {
case DRM_FORMAT_C8:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
- GRPH_FORMAT(GRPH_FORMAT_INDEXED));
+ fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_RGB565:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB565));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
- fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
- GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
+ (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
default:
@@ -2013,18 +2019,18 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- fb_format |= GRPH_NUM_BANKS(num_banks);
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
- fb_format |= GRPH_TILE_SPLIT(tile_split);
- fb_format |= GRPH_BANK_WIDTH(bankw);
- fb_format |= GRPH_BANK_HEIGHT(bankh);
- fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
+ fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
+ fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
+ fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
+ fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
+ fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
+ fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
+ fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
}
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- fb_format |= GRPH_PIPE_CONFIG(pipe_config);
+ fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
dce_v6_0_vga_enable(crtc, false);
@@ -2040,7 +2046,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
@@ -2108,14 +2114,13 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
- INTERLEAVE_EN);
+ DATA_FORMAT__INTERLEAVE_EN_MASK);
else
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
}
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -2125,15 +2130,15 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+ ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+ (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
- (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
+ ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+ (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
@@ -2160,19 +2165,19 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
- (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
- ICON_DEGAMMA_MODE(0) |
- (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+ ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
- (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+ ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+ (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
- (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+ ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+ (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
+ ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+ (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
@@ -2267,8 +2272,6 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
-
}
static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
@@ -2285,7 +2288,6 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
CUR_CONTROL__CURSOR_EN_MASK |
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
}
static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
@@ -2596,7 +2598,6 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
@@ -2669,7 +2670,7 @@ static void dce_v6_0_panic_flush(struct drm_plane *plane)
/* Disable DC tiling */
fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_ARRAY_MODE(0x7);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
}
@@ -2745,7 +2746,6 @@ static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- bool ret;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2789,8 +2789,7 @@ static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
- if (ret)
+ if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2986,12 +2985,12 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask &= ~VBLANK_INT_MASK;
+ interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
case AMDGPU_IRQ_STATE_ENABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask |= VBLANK_INT_MASK;
+ interrupt_mask |= INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
default:
@@ -3006,28 +3005,28 @@ static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
-static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned hpd,
enum amdgpu_interrupt_state state)
{
u32 dc_hpd_int_cntl;
- if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl |= DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
default:
break;
@@ -3036,7 +3035,7 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3096,7 +3095,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data[0]) {
case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank)
- WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3107,7 +3106,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
- WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3121,7 +3120,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3172,7 +3171,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
@@ -3249,12 +3248,10 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.set_powergating_state = dce_v6_0_set_powergating_state,
};
-static void
-dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3274,7 +3271,6 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{
-
struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3314,7 +3310,6 @@ static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -3325,7 +3320,6 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3541,17 +3535,17 @@ static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
- .set = dce_v6_0_set_crtc_interrupt_state,
+ .set = dce_v6_0_set_crtc_irq_state,
.process = dce_v6_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
- .set = dce_v6_0_set_pageflip_interrupt_state,
+ .set = dce_v6_0_set_pageflip_irq_state,
.process = dce_v6_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
- .set = dce_v6_0_set_hpd_interrupt_state,
+ .set = dce_v6_0_set_hpd_irq_state,
.process = dce_v6_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 07358546581f..e62ccf9eb73d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -271,7 +271,7 @@ static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -3021,7 +3021,7 @@ static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
}
-static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3029,7 +3029,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
u32 dc_hpd_int_cntl;
if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ DRM_DEBUG("invalid hpd %d\n", type);
return 0;
}
@@ -3051,7 +3051,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3136,7 +3136,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3547,17 +3547,17 @@ static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
- .set = dce_v8_0_set_crtc_interrupt_state,
+ .set = dce_v8_0_set_crtc_irq_state,
.process = dce_v8_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
- .set = dce_v8_0_set_pageflip_interrupt_state,
+ .set = dce_v8_0_set_pageflip_irq_state,
.process = dce_v8_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
- .set = dce_v8_0_set_hpd_interrupt_state,
+ .set = dce_v8_0_set_hpd_irq_state,
.process = dce_v8_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 23e6a05359c2..75ea071744eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -368,11 +368,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_LX6_CORE_PDEBUG_INST),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
@@ -421,7 +416,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
@@ -448,7 +452,32 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI),
+ /* gfx header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
@@ -4296,9 +4325,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -4306,39 +4333,15 @@ static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
-
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
@@ -4752,6 +4755,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, j, k, r, ring_id = 0;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
@@ -4763,7 +4767,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 1, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 8;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -4778,7 +4782,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 2;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -4800,7 +4804,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.cleaner_shader_size = sizeof(gfx_10_1_10_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 101 &&
adev->gfx.pfp_fw_version >= 158 &&
- adev->gfx.mec_fw_version >= 152) {
+ adev->gfx.mec_fw_version >= 151) {
adev->gfx.enable_cleaner_shader = true;
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
if (r) {
@@ -4810,7 +4814,9 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
break;
case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
@@ -4826,6 +4832,34 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(10, 3, 6):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 14 &&
+ adev->gfx.pfp_fw_version >= 17 &&
+ adev->gfx.mec_fw_version >= 24) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 4 &&
+ adev->gfx.pfp_fw_version >= 9 &&
+ adev->gfx.mec_fw_version >= 12) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -4886,7 +4920,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
/* set up the gfx ring */
for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
continue;
@@ -9645,9 +9679,14 @@ static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_10[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_10[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -9708,9 +9747,13 @@ static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block)
nv_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_10[reg]));
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_10[reg]));
}
index += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 2a5c2a1ae3c7..afd6d59164bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -48,6 +48,8 @@
#include "gfx_v11_0_3.h"
#include "nbio_v4_3.h"
#include "mes_v11_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX11_NUM_GFX_RINGS 1
#define GFX11_MEC_HPD_SIZE 2048
@@ -177,9 +179,13 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -230,7 +236,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
@@ -259,7 +274,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
@@ -580,33 +612,18 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -633,12 +650,10 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -833,9 +848,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -843,39 +856,15 @@ static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
@@ -1056,14 +1045,21 @@ static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
#define MQD_FWWORKAREA_SIZE 484
#define MQD_FWWORKAREA_ALIGNMENT 256
-static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
struct amdgpu_gfx_shadow_info *shadow_info)
{
- if (adev->gfx.cp_gfx_shadow) {
- shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
- shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
- shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
- shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
return 0;
} else {
memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
@@ -1136,6 +1132,10 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
+ if (adev->gfx.disable_kq) {
+ ring->no_scheduler = true;
+ ring->no_user_submission = true;
+ }
if (!ring_id)
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
@@ -1568,24 +1568,18 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- int i, j, k, r, ring_id = 0;
+ int i, j, k, r, ring_id;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
- adev->gfx.mec.num_mec = 1;
- adev->gfx.mec.num_pipe_per_mec = 4;
- adev->gfx.mec.num_queue_per_pipe = 4;
- break;
- case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
@@ -1593,7 +1587,7 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 5, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -1612,6 +1606,35 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2390 &&
+ adev->gfx.pfp_fw_version >= 2530 &&
+ adev->gfx.mec_fw_version >= 2600 &&
+ adev->mes.fw_version[0] >= 120) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ /* add firmware version checks here */
+ if (0 && !adev->gfx.disable_uq) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 2280 &&
@@ -1640,6 +1663,34 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 5, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 12 &&
+ adev->gfx.pfp_fw_version >= 15 &&
+ adev->gfx.mec_fw_version >= 15) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(11, 5, 3):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 7 &&
+ adev->gfx.pfp_fw_version >= 8 &&
+ adev->gfx.mec_fw_version >= 8) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -1701,37 +1752,42 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v11_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ ring_id = 0;
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v11_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
- k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
+ continue;
- r = gfx_v11_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v11_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
@@ -4061,6 +4117,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4081,6 +4139,16 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr);
+ mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4205,6 +4273,8 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -4256,6 +4326,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4509,11 +4583,23 @@ static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
return r;
}
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ if (adev->gfx.disable_kq) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ /* we don't want to set ring->ready */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+ }
+ if (amdgpu_async_gfx_ring)
+ amdgpu_gfx_disable_kgq(adev, 0);
+ } else {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
@@ -4722,6 +4808,49 @@ static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -4731,9 +4860,11 @@ static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v11_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
- if (amdgpu_async_gfx_ring) {
+ if (amdgpu_async_gfx_ring &&
+ !adev->gfx.disable_kq) {
if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
@@ -5059,11 +5190,36 @@ static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ /* We need one GFX ring temporarily to set up
+ * the clear state.
+ */
+ adev->gfx.num_gfx_rings = 1;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v11_0_set_kiq_pm4_funcs(adev);
gfx_v11_0_set_ring_funcs(adev);
@@ -5094,6 +5250,11 @@ static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
if (r)
return r;
+
+ r = gfx_v11_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -5691,10 +5852,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
}
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -5714,10 +5871,6 @@ static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
/* Currently, there is a high possibility to get wave ID mismatch
* between ME and GDS, leading to a hw deadlock, because ME generates
* different wave IDs than the GDS expects. This situation happens
@@ -5775,8 +5928,7 @@ static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -5804,10 +5956,7 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -6036,28 +6185,13 @@ static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v10_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
@@ -6296,25 +6430,23 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -6481,27 +6613,29 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -6609,6 +6743,69 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+
+static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* FIXME: Sometimes driver can't cache the ME firmware start PC correctly,
+ * so the pipe reset status relies on the later gfx ring test result.
+ */
+ return 0;
+}
+
static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6618,8 +6815,13 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return -EINVAL;
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
- if (r)
- return r;
+ if (r) {
+
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v11_reset_gfx_pipe(ring);
+ if (r)
+ return r;
+ }
r = gfx_v11_0_kgq_init_queue(ring, true);
if (r) {
@@ -6636,6 +6838,136 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return amdgpu_ring_test_ring(ring);
}
+static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ if (ring->me == 1) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec1 fw pc: CP_MEC1_INSTR_PNTR */
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR));
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe
+ * reset status relies on the compute ring test result.
+ */
+ return 0;
+}
+
static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6646,8 +6978,10 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v11_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v11_0_kcq_init_queue(ring, true);
@@ -6693,9 +7027,14 @@ static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_11[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_11[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -6755,9 +7094,16 @@ static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
/* ME0 is for GFX so start from 1 for CP */
soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_11[reg]));
+ if (i &&
+ gc_cp_reg_list_11[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0,
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_11[reg]));
}
index += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 62a257a4a3e9..f09d96bfee16 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -44,6 +44,8 @@
#include "gfx_v12_0.h"
#include "nbif_v6_3_1.h"
#include "mes_v12_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX12_NUM_GFX_RINGS 1
#define GFX12_MEC_HPD_SIZE 2048
@@ -133,11 +135,14 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0),
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR),
-
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -186,7 +191,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
@@ -215,7 +229,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = {
@@ -475,33 +506,18 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -528,12 +544,10 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -881,6 +895,34 @@ static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev,
soc24_grbm_select(adev, me, pipe, q, vm);
}
+/* all sizes are in bytes */
+#define MQD_SHADOW_BASE_SIZE 73728
+#define MQD_SHADOW_BASE_ALIGNMENT 256
+#define MQD_FWWORKAREA_SIZE 484
+#define MQD_FWWORKAREA_ALIGNMENT 256
+
+static void gfx_v12_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info)
+{
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v12_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v12_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
+ return 0;
+ }
+
+ memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
+ return -EINVAL;
+}
+
static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v12_0_select_se_sh,
@@ -889,6 +931,7 @@ static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v12_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
+ .get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
};
static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1346,6 +1389,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
unsigned num_compute_rings;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
@@ -1354,7 +1398,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 1):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 8;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 2;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -1372,6 +1416,22 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2780 &&
+ adev->gfx.pfp_fw_version >= 2840 &&
+ adev->gfx.mec_fw_version >= 3050 &&
+ adev->mes.fw_version[0] >= 123) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
if (adev->gfx.me_fw_version >= 2480 &&
adev->gfx.pfp_fw_version >= 2530 &&
adev->gfx.mec_fw_version >= 2680 &&
@@ -1383,11 +1443,13 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
- /* recalculate compute rings to use based on hardware configuration */
- num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
- adev->gfx.mec.num_queue_per_pipe) / 2;
- adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
- num_compute_rings);
+ if (adev->gfx.num_compute_rings) {
+ /* recalculate compute rings to use based on hardware configuration */
+ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe) / 2;
+ adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
+ num_compute_rings);
+ }
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
@@ -1433,37 +1495,41 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v12_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v12_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev,
- 0, i, k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev,
+ 0, i, k, j))
+ continue;
- r = gfx_v12_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v12_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
@@ -2948,6 +3014,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -2968,6 +3036,14 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -3091,6 +3167,8 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -3142,6 +3220,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -3600,6 +3682,49 @@ static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v12_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3610,6 +3735,7 @@ static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v12_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -3698,11 +3824,33 @@ static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v12_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ adev->gfx.num_gfx_rings = 0;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v12_0_set_kiq_pm4_funcs(adev);
gfx_v12_0_set_ring_funcs(adev);
@@ -3733,6 +3881,10 @@ static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = gfx_v12_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -4172,45 +4324,17 @@ static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always being used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- if (ring->use_doorbell) {
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
- lower_32_bits(ring->wptr));
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
- upper_32_bits(ring->wptr));
- }
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
+ lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
+ upper_32_bits(ring->wptr));
}
}
@@ -4235,42 +4359,14 @@ static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ /* XXX check if swapping is necessary on BE */
+ if (ring->use_doorbell) {
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- /* XXX check if swapping is necessary on BE */
- if (ring->use_doorbell) {
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- BUG(); /* only DOORBELL method supported on gfx12 now */
- }
+ BUG(); /* only DOORBELL method supported on gfx12 now */
}
}
@@ -4317,10 +4413,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4340,10 +4432,6 @@ static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4383,8 +4471,7 @@ static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -4412,10 +4499,7 @@ static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -4749,25 +4833,23 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -4934,27 +5016,29 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -5160,6 +5244,69 @@ static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block)
amdgpu_gfx_off_ctrl(adev, true);
}
+static bool gfx_v12_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Sometimes the ME start pc counter can't cache correctly, so the
+ * PC check only as a reference and pipe reset result rely on the
+ * later ring test.
+ */
+ return 0;
+}
+
static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -5170,8 +5317,10 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
- dev_err(adev->dev, "reset via MES failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v12_reset_gfx_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v12_0_kgq_init_queue(ring, true);
@@ -5189,6 +5338,89 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return amdgpu_ring_test_ring(ring);
}
+static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r = 0;
+
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ /* Doesn't find the F32 MEC instruction pointer register, and suppose
+ * the driver won't run into the F32 mode.
+ */
+ }
+
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Need the ring test to verify the pipe reset result.*/
+ return 0;
+}
+
static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -5199,8 +5431,10 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v12_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v12_0_kcq_init_queue(ring, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 13fbee46417a..70d7a1f434c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -53,6 +53,9 @@
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define GFX6_NUM_GFX_RINGS 1
+#define GFX6_NUM_COMPUTE_RINGS 2
+
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
@@ -1732,10 +1735,14 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
gfx_v6_0_get_cu_info(adev);
gfx_v6_0_config_init(adev);
- WREG32(mmCP_QUEUE_THRESHOLDS, ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
- (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
- WREG32(mmCP_MEQ_THRESHOLDS, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
- (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
+ WREG32(mmCP_QUEUE_THRESHOLDS,
+ ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
+ (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
+
+ /* set HW defaults for 3D engine */
+ WREG32(mmCP_MEQ_THRESHOLDS,
+ (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+ (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
sx_debug_1 = RREG32(mmSX_DEBUG_1);
WREG32(mmSX_DEBUG_1, sx_debug_1);
@@ -2851,44 +2858,21 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8181bd0e4f18..da0534ff1271 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -55,6 +55,9 @@
#define GFX7_NUM_GFX_RINGS 1
#define GFX7_MEC_HPD_SIZE 2048
+#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
+
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
@@ -3882,67 +3885,22 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KAVERI:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_HAWAII:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bfedd487efc5..5ee2237d8ee8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1223,48 +1223,22 @@ out:
static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
- PACKET3_SET_CONTEXT_REG_START);
+ buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d7db4cb907ae..d377a7c57d5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -225,17 +225,36 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
- SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3)
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
+ /* packet headers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
};
static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
@@ -277,6 +296,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
};
enum ta_ras_gfx_subblock {
@@ -1624,42 +1651,16 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
@@ -5441,16 +5442,8 @@ static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_ce_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
@@ -5473,16 +5466,8 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_de_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
IB_COMPLETION_STATUS_PREEMPTED;
@@ -5672,19 +5657,9 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- }
+ offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
@@ -5770,28 +5745,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bo
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
if (usegds) {
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
@@ -7339,9 +7299,14 @@ static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -7378,9 +7343,13 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
soc15_grbm_select(adev, 1 + i, j, k, 0, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_9[reg]));
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_9[reg]));
}
index += reg_count;
}
@@ -7394,8 +7363,14 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
/* Emit the cleaner shader */
- amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ else
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0));
+
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index d81449f9d822..c48cd47b531f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -1547,7 +1547,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
{
uint32_t bank, way, mem;
static const char * const vml2_way_str[] = { "BIGK", "4K" };
- static const char * const utcl2_rounter_str[] = { "VMC", "APT" };
+ static const char * const utcl2_router_str[] = { "VMC", "APT" };
mem = instance % blk->num_mem_blocks;
way = (instance / blk->num_mem_blocks) % blk->num_ways;
@@ -1568,7 +1568,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
dev_info(
adev->dev,
"GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n",
- bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt);
+ bank, utcl2_router_str[mem], sec_cnt, ded_cnt);
break;
case ATC_L2_CACHE_2M:
dev_info(
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 53fbf6ca7cdb..c233edf60569 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -105,9 +105,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
@@ -154,6 +151,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
@@ -1148,6 +1153,12 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
+ case IP_VERSION(9, 5, 0):
+ if (adev->gfx.mec_fw_version >= 21) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
+ }
+ break;
default:
break;
}
@@ -1262,6 +1273,22 @@ static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
}
}
+/* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1
+ * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain
+ * bit in SET_RESOURCES
+ */
+static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id)
+{
+ uint32_t data;
+
+ if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ return;
+
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data);
+}
+
static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
int xcc_id)
{
@@ -1306,6 +1333,7 @@ static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
+ gfx_v9_4_3_xcc_init_sq(adev, xcc_id);
}
static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
@@ -1318,6 +1346,20 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
adev->gfx.config.db_debug2 =
RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ /* ToDo: GC 9.4.4 */
+ case IP_VERSION(9, 4, 3):
+ if (adev->gfx.mec_fw_version >= 184)
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ case IP_VERSION(9, 5, 0):
+ if (adev->gfx.mec_fw_version >= 23)
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_xcc_constants_init(adev, i);
}
@@ -3447,9 +3489,7 @@ static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
{
- /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->gfx.mec_fw_version >= 0x0000009b)
+ if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
return true;
else
dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
@@ -4558,12 +4598,21 @@ static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_pri
"\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
xcc_id, i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p,
- "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9_4_3[reg].reg_name,
- adev->gfx.ip_dump_compute_queues
- [xcc_offset + inst_offset +
- reg]);
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
+ else
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9_4_3[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
}
inst_offset += reg_count;
}
@@ -4612,12 +4661,20 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
GET_INST(GC, xcc_id));
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues
- [xcc_offset +
- inst_offset + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(
- gc_cp_reg_list_9_4_3[reg],
- GET_INST(GC, xcc_id)));
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(
+ gc_cp_reg_list_9_4_3[reg],
+ GET_INST(GC, xcc_id)));
}
inst_offset += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 809b3a882d0d..a3e2787501f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -428,10 +428,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index fec9a007533a..72211409227b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -393,10 +393,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -844,7 +840,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index c6f290704d47..b645d3e6a6c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -413,10 +413,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -820,7 +816,7 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index a992e79d9581..8030fcd64210 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -249,7 +249,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp &= ~VGA_VSTATUS_CNTL;
+ tmp &= VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK;
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
/* Update configuration */
@@ -627,17 +627,16 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
"write" : "read", block, mc_client, mc_id);
}
-/*
static const u32 mc_cg_registers[] = {
- MC_HUB_MISC_HUB_CG,
- MC_HUB_MISC_SIP_CG,
- MC_HUB_MISC_VM_CG,
- MC_XPB_CLK_GAT,
- ATC_MISC_CG,
- MC_CITF_MISC_WR_CG,
- MC_CITF_MISC_RD_CG,
- MC_CITF_MISC_VM_CG,
- VM_L2_CG,
+ mmMC_HUB_MISC_HUB_CG,
+ mmMC_HUB_MISC_SIP_CG,
+ mmMC_HUB_MISC_VM_CG,
+ mmMC_XPB_CLK_GAT,
+ mmATC_MISC_CG,
+ mmMC_CITF_MISC_WR_CG,
+ mmMC_CITF_MISC_RD_CG,
+ mmMC_CITF_MISC_VM_CG,
+ mmVM_L2_CG,
};
static const u32 mc_cg_ls_en[] = {
@@ -672,7 +671,7 @@ static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i];
else
data &= ~mc_cg_ls_en[i];
@@ -689,7 +688,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i];
else
data &= ~mc_cg_en[i];
@@ -705,7 +704,7 @@ static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -728,7 +727,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -744,7 +743,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_MEM_POWER_LS);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
@@ -752,7 +751,6 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
if (orig != data)
WREG32(mmHDP_MEM_POWER_LS, data);
}
-*/
static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
{
@@ -1098,6 +1096,20 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool gate = false;
+
+ if (state == AMD_CG_STATE_GATE)
+ gate = true;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ gmc_v6_0_enable_mc_mgcg(adev, gate);
+ gmc_v6_0_enable_mc_ls(adev, gate);
+ }
+ gmc_v6_0_enable_bif_mgls(adev, gate);
+ gmc_v6_0_enable_hdp_mgcg(adev, gate);
+ gmc_v6_0_enable_hdp_ls(adev, gate);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 83e39f16044a..a8d5795084fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1157,17 +1157,10 @@ static bool gmc_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC_BUSY_MASK);
- if (!tmp)
+ if (gmc_v7_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 5effe8327d29..282197f4ffb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1213,10 +1213,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
if (uncached) {
mtype = MTYPE_UC;
} else if (ext_coherent) {
- if (gc_ip_version == IP_VERSION(9, 5, 0) || adev->rev_id)
- mtype = is_local ? MTYPE_CC : MTYPE_UC;
- else
- mtype = MTYPE_UC;
+ mtype = is_local ? MTYPE_CC : MTYPE_UC;
} else if (adev->flags & AMD_IS_APU) {
mtype = is_local ? mtype_local : MTYPE_NC;
} else {
@@ -1336,7 +1333,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
mtype_local = MTYPE_CC;
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
- } else if (adev->rev_id) {
+ } else {
/* MTYPE_UC case */
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
}
@@ -1505,7 +1502,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
- adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
break;
@@ -2075,6 +2071,9 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
@@ -2411,13 +2410,6 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.flush_tlb_needs_extra_type_2 =
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
adev->gmc.xgmi.num_physical_nodes;
- /*
- * TODO: This workaround is badly documented and had a buggy
- * implementation. We should probably verify what we do here.
- */
- adev->gmc.flush_tlb_needs_extra_type_0 =
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->rev_id == 0;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index cbbeadeb53f7..e6c0d86d3486 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -36,22 +36,6 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
-static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- /* We just need to read back a register to post the write.
- * Reading back the remapped register causes problems on
- * some platforms so just read back the memory size register.
- */
- if (adev->nbio.funcs->get_memsize)
- adev->nbio.funcs->get_memsize(adev);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -185,7 +169,7 @@ struct amdgpu_hdp_ras hdp_v4_0_ras = {
};
const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
- .flush_hdp = hdp_v4_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v4_0_invalidate_hdp,
.update_clock_gating = hdp_v4_0_update_clock_gating,
.get_clock_gating_state = hdp_v4_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index 086a647308df..8bc001dc9f63 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -27,22 +27,6 @@
#include "hdp/hdp_5_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- /* We just need to read back a register to post the write.
- * Reading back the remapped register causes problems on
- * some platforms so just read back the memory size register.
- */
- if (adev->nbio.funcs->get_memsize)
- adev->nbio.funcs->get_memsize(adev);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -222,7 +206,7 @@ static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
- .flush_hdp = hdp_v5_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v5_0_invalidate_hdp,
.update_clock_gating = hdp_v5_0_update_clock_gating,
.get_clock_gating_state = hdp_v5_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index 6ccd31c8bc69..ec20daf4272c 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -30,22 +30,6 @@
#define regHDP_CLK_CNTL_V6_1 0xd5
#define regHDP_CLK_CNTL_V6_1_BASE_IDX 0
-static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- /* We just need to read back a register to post the write.
- * Reading back the remapped register causes problems on
- * some platforms so just read back the memory size register.
- */
- if (adev->nbio.funcs->get_memsize)
- adev->nbio.funcs->get_memsize(adev);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -154,7 +138,7 @@ static void hdp_v6_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v6_0_funcs = {
- .flush_hdp = hdp_v6_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v6_0_update_clock_gating,
.get_clock_gating_state = hdp_v6_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 2c9239a22f39..ed1debc03507 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -27,22 +27,6 @@
#include "hdp/hdp_7_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- /* We just need to read back a register to post the write.
- * Reading back the remapped register causes problems on
- * some platforms so just read back the memory size register.
- */
- if (adev->nbio.funcs->get_memsize)
- adev->nbio.funcs->get_memsize(adev);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -142,7 +126,7 @@ static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v7_0_funcs = {
- .flush_hdp = hdp_v7_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v7_0_update_clock_gating,
.get_clock_gating_state = hdp_v7_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index eb4185dcbd1d..5900b560b7de 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -349,6 +349,7 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
/* update doorbell range for ih ring 0 */
@@ -446,7 +447,10 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 218e16b68f1d..cb94bd71300f 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -28,11 +28,13 @@
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
#include "jpeg_v5_0_1.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -156,21 +158,16 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- ring->use_doorbell = false;
+ ring->use_doorbell = true;
ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
if (!amdgpu_sriov_vf(adev)) {
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
1 + j + 11 * jpeg_inst;
} else {
- if (j < 4)
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 4 + j + 32 * jpeg_inst;
- else
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 8 + j + 32 * jpeg_inst;
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 2 + j + 32 * jpeg_inst;
}
sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
@@ -237,7 +234,10 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
int i, j, r, jpeg_inst;
if (amdgpu_sriov_vf(adev)) {
- /* jpeg_v5_0_1_start_sriov(adev); */
+ r = jpeg_v5_0_1_start_sriov(adev);
+ if (r)
+ return r;
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
@@ -264,7 +264,7 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->jpeg.inst[i].ring_dec[j];
if (ring->use_doorbell)
WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
- (ring->pipe ? (ring->pipe - 0x15) : 0),
+ ring->pipe,
ring->doorbell_index <<
VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
VCN_JPEG_DB_CTRL__EN_MASK);
@@ -291,8 +291,10 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ }
return ret;
}
@@ -422,6 +424,119 @@ static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
reg_offset);
}
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw, item_offset;
+ uint32_t init_status;
+ int i, j, jpeg_inst;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ item_offset = header.total_size;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ table_size = 0;
+
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
+
+ if (j < 5) {
+ header.mjpegdec0[j].table_offset = item_offset;
+ header.mjpegdec0[j].init_status = 0;
+ header.mjpegdec0[j].table_size = table_size;
+ } else {
+ header.mjpegdec1[j - 5].table_offset = item_offset;
+ header.mjpegdec1[j - 5].init_status = 0;
+ header.mjpegdec1[j - 5].table_size = table_size;
+ }
+ header.total_size += table_size;
+ item_offset += table_size;
+ }
+
+ MMSCH_V5_0_INSERT_END();
+
+ /* send init table to MMSCH */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ init_status =
+ ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
+
+ if (resp != 0)
+ break;
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
+ init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
+ resp, init_status);
+
+ }
+ return 0;
+}
+
/**
* jpeg_v5_0_1_start - start JPEG block
*
@@ -581,6 +696,11 @@ static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
int ret;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->jpeg.cur_state)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
new file mode 100644
index 000000000000..d6f50b13e2ba
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
+
+#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
+#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
+
+static int
+mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
+{
+ int ret;
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
+ goto err_reserve_bo_failed;
+ }
+
+ ret = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (ret) {
+ DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
+ goto err_map_bo_gart_failed;
+ }
+
+ amdgpu_bo_unreserve(bo);
+ bo = amdgpu_bo_ref(bo);
+
+ return 0;
+
+err_map_bo_gart_failed:
+ amdgpu_bo_unreserve(bo);
+err_reserve_bo_failed:
+ return ret;
+}
+
+static int
+mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ uint64_t wptr)
+{
+ struct amdgpu_bo_va_mapping *wptr_mapping;
+ struct amdgpu_vm *wptr_vm;
+ struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
+ int ret;
+
+ wptr_vm = queue->vm;
+ ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
+ if (ret)
+ return ret;
+
+ wptr &= AMDGPU_GMC_HOLE_MASK;
+ wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
+ amdgpu_bo_unreserve(wptr_vm->root.bo);
+ if (!wptr_mapping) {
+ DRM_ERROR("Failed to lookup wptr bo\n");
+ return -EINVAL;
+ }
+
+ wptr_obj->obj = wptr_mapping->bo_va->base.bo;
+ if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
+ DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
+ return -EINVAL;
+ }
+
+ ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
+ if (ret) {
+ DRM_ERROR("Failed to map wptr bo to GART\n");
+ return ret;
+ }
+
+ queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj);
+ return 0;
+}
+
+static int convert_to_mes_priority(int priority)
+{
+ switch (priority) {
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW:
+ default:
+ return AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW:
+ return AMDGPU_MES_PRIORITY_LEVEL_LOW;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_HIGH;
+ }
+}
+
+static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
+ struct mes_add_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
+
+ queue_input.process_va_start = 0;
+ queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
+
+ /* set process quantum to 10 ms and gang quantum to 1 ms as default */
+ queue_input.process_quantum = 100000;
+ queue_input.gang_quantum = 10000;
+ queue_input.paging = false;
+
+ queue_input.process_context_addr = ctx->gpu_addr;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority);
+
+ queue_input.process_id = queue->vm->pasid;
+ queue_input.queue_type = queue->queue_type;
+ queue_input.mqd_addr = queue->mqd.gpu_addr;
+ queue_input.wptr_addr = userq_props->wptr_gpu_addr;
+ queue_input.queue_size = userq_props->queue_size >> 2;
+ queue_input.doorbell_offset = userq_props->doorbell_index;
+ queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
+ queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
+ return r;
+ }
+
+ DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
+ return 0;
+}
+
+static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_remove_queue_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
+ queue_input.doorbell_offset = queue->doorbell_index;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
+ return r;
+}
+
+static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ struct drm_amdgpu_userq_in *mqd_user)
+{
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r, size;
+
+ /*
+ * The FW expects at least one page space allocated for
+ * process ctx and gang ctx each. Create an object
+ * for the same.
+ */
+ size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
+ r = amdgpu_userq_create_object(uq_mgr, ctx, size);
+ if (r) {
+ DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args_in,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
+ struct drm_amdgpu_userq_in *mqd_user = args_in;
+ struct amdgpu_mqd_prop *userq_props;
+ int r;
+
+ /* Structure to initialize MQD for userqueue using generic MQD init function */
+ userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
+ if (!userq_props) {
+ DRM_ERROR("Failed to allocate memory for userq_props\n");
+ return -ENOMEM;
+ }
+
+ if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
+ !mqd_user->queue_va || mqd_user->queue_size == 0) {
+ DRM_ERROR("Invalid MQD parameters for userqueue\n");
+ r = -EINVAL;
+ goto free_props;
+ }
+
+ r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
+ if (r) {
+ DRM_ERROR("Failed to create MQD object for userqueue\n");
+ goto free_props;
+ }
+
+ /* Initialize the MQD BO with user given values */
+ userq_props->wptr_gpu_addr = mqd_user->wptr_va;
+ userq_props->rptr_gpu_addr = mqd_user->rptr_va;
+ userq_props->queue_size = mqd_user->queue_size;
+ userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
+ userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
+ userq_props->use_doorbell = true;
+ userq_props->doorbell_index = queue->doorbell_index;
+ userq_props->fence_address = queue->fence_drv->gpu_addr;
+
+ if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
+ struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
+
+ if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
+ DRM_ERROR("Invalid compute IP MQD size\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(compute_mqd)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->eop_gpu_addr = compute_mqd->eop_va;
+ userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
+ userq_props->hqd_active = false;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+ kfree(compute_mqd);
+ } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
+ struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+
+ if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid GFX MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_gfx_v11)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
+ userq_props->csa_addr = mqd_gfx_v11->csa_va;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+ kfree(mqd_gfx_v11);
+ } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
+ struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
+
+ if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid SDMA MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_sdma_v11)) {
+ DRM_ERROR("Failed to read sdma user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->csa_addr = mqd_sdma_v11->csa_va;
+ kfree(mqd_sdma_v11);
+ }
+
+ queue->userq_prop = userq_props;
+
+ r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
+ if (r) {
+ DRM_ERROR("Failed to initialize MQD for userqueue\n");
+ goto free_mqd;
+ }
+
+ /* Create BO for FW operations */
+ r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
+ if (r) {
+ DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+ goto free_mqd;
+ }
+
+ /* FW expects WPTR BOs to be mapped into GART */
+ r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
+ if (r) {
+ DRM_ERROR("Failed to create WPTR mapping\n");
+ goto free_ctx;
+ }
+
+ return 0;
+
+free_ctx:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+
+free_mqd:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+
+free_props:
+ kfree(userq_props);
+
+ return r;
+}
+
+static void
+mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+ kfree(queue->userq_prop);
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+}
+
+const struct amdgpu_userq_funcs userq_mes_funcs = {
+ .mqd_create = mes_userq_mqd_create,
+ .mqd_destroy = mes_userq_mqd_destroy,
+ .unmap = mes_userq_unmap,
+ .map = mes_userq_map,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
index 7ac87ef26aec..090ae8897770 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +19,12 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ad102_ofa = {
- .sclass = {
- { -1, -1, NVC9FA_VIDEO_OFA },
- {}
- }
-};
-int
-ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ad102_ofa, device, type, inst, pengine);
+#ifndef MES_USERQ_H
+#define MES_USERQ_H
+#include "amdgpu_userq.h"
- return -ENODEV;
-}
+extern const struct amdgpu_userq_funcs userq_mes_funcs;
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index ef9538fbbf53..c9eba537de09 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -287,6 +287,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -310,9 +327,9 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -458,31 +475,6 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- if (input->use_mmio)
- return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
- input->me_id, input->pipe_id,
- input->queue_id, input->vmid);
-
- union MESAPI__RESET mes_reset_queue_pkt;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- return mes_v11_0_submit_pkt_and_poll_completion(mes,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -649,7 +641,7 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
break;
case MES_MISC_OP_CHANGE_CONFIG:
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
- dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n");
+ dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n");
return -EINVAL;
}
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
@@ -694,7 +686,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->compute_hqd_mask[i];
for (i = 0; i < MAX_GFX_PIPES; i++)
- mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
+ mes_set_hw_res_pkt.gfx_hqd_mask[i] =
+ mes->gfx_hqd_mask[i];
for (i = 0; i < MAX_SDMA_PIPES; i++)
mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
@@ -723,7 +716,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->event_log_gpu_addr;
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
@@ -753,8 +746,8 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
-static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
@@ -772,7 +765,7 @@ static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -798,7 +791,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.suspend_gang = mes_v11_0_suspend_gang,
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
- .reset_legacy_queue = mes_v11_0_reset_legacy_queue,
.reset_hw_queue = mes_v11_0_reset_hw_queue,
};
@@ -1701,22 +1693,10 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
- (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)))
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
.early_init = mes_v11_0_early_init,
- .late_init = mes_v11_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,
.hw_init = mes_v11_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index e6ab617b9a40..b4f17332d466 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -274,6 +274,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -297,9 +314,9 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -477,32 +494,6 @@ static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- union MESAPI__RESET mes_reset_queue_pkt;
- int pipe;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- if (mes->adev->enable_uni_mes)
- pipe = AMDGPU_MES_KIQ_PIPE;
- else
- pipe = AMDGPU_MES_SCHED_PIPE;
-
- return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -762,7 +753,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
@@ -845,8 +836,8 @@ static void mes_v12_0_enable_unmapped_doorbell_handling(
WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data);
}
-static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
int pipe;
@@ -865,7 +856,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -878,7 +869,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
}
- if (mes->adev->enable_uni_mes)
+ if (input->is_kq)
pipe = AMDGPU_MES_KIQ_PIPE;
else
pipe = AMDGPU_MES_SCHED_PIPE;
@@ -896,7 +887,6 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.suspend_gang = mes_v12_0_suspend_gang,
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
- .reset_legacy_queue = mes_v12_0_reset_legacy_queue,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
};
@@ -1811,21 +1801,10 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend)
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v12_0_ip_funcs = {
.name = "mes_v12_0",
.early_init = mes_v12_0_early_init,
- .late_init = mes_v12_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v12_0_sw_init,
.sw_fini = mes_v12_0_sw_fini,
.hw_init = mes_v12_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 84cde1239ee4..76167fadb292 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -30,6 +30,7 @@
#include "soc15_common.h"
#include "soc15.h"
#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
@@ -192,10 +193,8 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
uint32_t tmp, inst_mask;
int i;
- /* Setup TLB control */
- inst_mask = adev->aid_mask;
- for_each_inst(i, inst_mask) {
- tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
1);
@@ -209,7 +208,26 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
- WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
}
}
@@ -221,6 +239,9 @@ static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
inst_mask = adev->aid_mask;
for_each_inst(i, inst_mask) {
for (j = 0; j < 5; j++) { /* DAGB instances */
@@ -454,6 +475,30 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
return 0;
}
+static void mmhub_v1_8_disable_l1_tlb(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ u32 i, inst_mask;
+
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
+ }
+}
+
static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub;
@@ -467,15 +512,6 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
for (i = 0; i < 16; i++)
WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
-
- /* Setup TLB control */
- tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
- 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL, 0);
- WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
-
if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
@@ -485,6 +521,8 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
}
}
+
+ mmhub_v1_8_disable_l1_tlb(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
new file mode 100644
index 000000000000..6f749814929f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V5_0_H__
+#define __MMSCH_V5_0_H__
+
+#include "amdgpu_vcn.h"
+
+#define MMSCH_VERSION_MAJOR 5
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+#define RB_ENABLED (1 << 0)
+#define RB4_ENABLED (1 << 1)
+
+#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
+
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3
+#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4
+#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5
+
+enum mmsch_v5_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v5_0_table_info {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v5_0_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_v5_0_table_info vcn0;
+ struct mmsch_v5_0_table_info mjpegdec0[5];
+ struct mmsch_v5_0_table_info mjpegdec1[5];
+};
+
+struct mmsch_v5_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_direct_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v5_0_cmd_direct_read_modify_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v5_0_cmd_direct_polling {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v5_0_cmd_end {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v5_0_cmd_indirect_write {
+ struct mmsch_v5_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+#define MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_read_modify_write); \
+ size_dw = size / 4; \
+ direct_rd_mod_wt.cmd_header.reg_offset = reg; \
+ direct_rd_mod_wt.mask_value = mask; \
+ direct_rd_mod_wt.write_data = data; \
+ memcpy((void *)table_loc, &direct_rd_mod_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_WT(reg, value) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_write); \
+ size_dw = size / 4; \
+ direct_wt.cmd_header.reg_offset = reg; \
+ direct_wt.reg_value = value; \
+ memcpy((void *)table_loc, &direct_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_polling); \
+ size_dw = size / 4; \
+ direct_poll.cmd_header.reg_offset = reg; \
+ direct_poll.mask_value = mask; \
+ direct_poll.wait_value = wait; \
+ memcpy((void *)table_loc, &direct_poll, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_END() { \
+ size = sizeof(struct mmsch_v5_0_cmd_end); \
+ size_dw = size / 4; \
+ memcpy((void *)table_loc, &end, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index f5411b798e11..48101a34e049 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -274,6 +274,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -281,8 +282,6 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
if (amdgpu_device_should_recover_gpu(adev)
&& (!amdgpu_device_has_job_running(adev) ||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -293,6 +292,19 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -312,26 +324,42 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
- case IDH_FLR_NOTIFICATION:
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
- &adev->virt.flr_work),
- "Failed to queue work! at %s",
- __func__);
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
break;
- case IDH_QUERY_ALIVE:
- xgpu_ai_mailbox_send_ack(adev);
- break;
- /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
- * it byfar since that polling thread will handle it,
- * other msg like flr complete is not handled here.
- */
- case IDH_CLR_MSG_BUF:
- case IDH_FLR_NOTIFICATION_CMPL:
- case IDH_READY_TO_ACCESS_GPU:
- default:
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
+ case IDH_QUERY_ALIVE:
+ xgpu_ai_mailbox_send_ack(adev);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
break;
}
@@ -387,6 +415,7 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
+ INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index ed57cbc150af..874b9f8f9804 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -40,6 +40,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
IDH_RAS_POISON = 202,
+ IDH_REQ_RAS_BAD_PAGES = 205,
};
enum idh_event {
@@ -54,6 +55,9 @@ enum idh_event {
IDH_RAS_POISON_READY,
IDH_PF_SOFT_FLR_NOTIFICATION,
IDH_RAS_ERROR_DETECTED,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 5aadf24cb202..f6d8597452ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -67,6 +67,8 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg == IDH_FAIL)
r = -EINVAL;
+ if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
+ r = -ENODEV;
else if (reg != event)
return -ENOENT;
@@ -103,6 +105,7 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r;
uint64_t timeout, now;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
now = (uint64_t)ktime_to_ms(ktime_get());
timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
@@ -110,8 +113,16 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
do {
r = xgpu_nv_mailbox_rcv_msg(adev, event);
if (!r) {
- dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
+ dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
+ event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
return 0;
+ } else if (r == -ENODEV) {
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. "
+ "Runtime Services are halted.\n");
+ }
+ return r;
}
msleep(10);
@@ -166,6 +177,10 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
enum idh_event event = -1;
send_request:
+
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
switch (req) {
@@ -187,6 +202,9 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
+ case IDH_REQ_RAS_BAD_PAGES:
+ event = IDH_RAS_BAD_PAGES_READY;
+ break;
default:
break;
}
@@ -320,6 +338,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -330,8 +349,6 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -342,6 +359,19 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -364,8 +394,27 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
+ }
+
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
case IDH_FLR_NOTIFICATION:
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
@@ -436,6 +485,7 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+ INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
return 0;
}
@@ -480,6 +530,11 @@ static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
}
+static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -492,4 +547,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
+ .req_bad_pages = xgpu_nv_req_ras_bad_pages,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 72c9fceb9d79..5808689562cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -42,6 +42,7 @@ enum idh_request {
IDH_RAS_POISON = 202,
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
+ IDH_REQ_RAS_BAD_PAGES = 205,
};
enum idh_event {
@@ -58,6 +59,9 @@ enum idh_event {
IDH_RAS_ERROR_DETECTED,
IDH_RAS_ERROR_COUNT_READY = 11,
IDH_RAS_CPER_DUMP_READY = 14,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index f23cb79110d6..a376f072700d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -177,8 +177,12 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
{
u32 doorbell_range = 0, doorbell_ctrl = 0;
u32 aid_id = instance;
+ u32 range_size;
if (use_doorbell) {
+ range_size = (amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 5, 0)) ?
+ 0xb : 0x9;
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
@@ -186,7 +190,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY,
- 0x9);
+ range_size);
if (aid_id)
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
@@ -204,7 +208,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
- S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9);
+ S2A_DOORBELL_PORT1_RANGE_SIZE, range_size);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index fcd708eae75c..80153f837470 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -34,9 +34,6 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_7_4_offset.h"
-#include "oss/osssys_4_0_offset.h"
-#include "oss/osssys_4_0_sh_mask.h"
-
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
@@ -99,9 +96,6 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
0x80000000, 0x80000000, false);
@@ -138,8 +132,6 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
0, true);
@@ -147,37 +139,6 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static void psp_v12_0_reroute_ih(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- uint32_t tmp;
-
- /* Change IH ring for VMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-
- /* Change IH ring for UMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-}
-
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -186,49 +147,23 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- psp_v12_0_reroute_ih(psp);
-
- if (amdgpu_sriov_vf(psp->adev)) {
- /* Write low address of the ring to C2PMSG_102 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_103 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
-
- /* Write the ring initialization command to C2PMSG_101 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
-
- } else {
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
- }
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
return ret;
}
@@ -247,9 +182,6 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
GFX_CTRL_CMD_ID_DESTROY_RINGS);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index afdf8ce3b4c5..df612fd9cc50 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -71,20 +71,13 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin");
/* Retry times for vmbx ready wait */
#define PSP_VMBX_POLLING_LIMIT 3000
-/* VBIOS gfl defines */
-#define MBOX_READY_MASK 0x80000000
-#define MBOX_STATUS_MASK 0x0000FFFF
-#define MBOX_COMMAND_MASK 0x00FF0000
-#define MBOX_READY_FLAG 0x80000000
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
-#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
-
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
#define regMP1_PUB_SCRATCH0 0x3b10090
+#define PSP13_BL_STATUS_SIZE 100
+
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -151,6 +144,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
+static void psp_v13_0_bootloader_print_status(struct psp_context *psp,
+ const char *msg)
+{
+ struct amdgpu_device *adev = psp->adev;
+ u32 bl_status_reg;
+ char bl_status_msg[PSP13_BL_STATUS_SIZE];
+ int i, at;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
+ at = 0;
+ for_each_inst(i, adev->aid_mask) {
+ bl_status_reg =
+ (SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_92)
+ << 2) +
+ adev->asic_funcs->encode_ext_smn_addressing(i);
+ at += snprintf(bl_status_msg + at,
+ PSP13_BL_STATUS_SIZE - at,
+ " status(%02i): 0x%08x", i,
+ RREG32_PCIE_EXT(bl_status_reg));
+ }
+ dev_info(adev->dev, "%s - %s", msg, bl_status_msg);
+ }
+}
+
static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -196,6 +215,9 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
if (ret == 0)
return 0;
+ if (retry_loop && !(retry_loop % 10))
+ psp_v13_0_bootloader_print_status(
+ psp, "Waiting for bootloader completion");
}
return ret;
@@ -710,7 +732,8 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
/* Ring the doorbell */
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1);
- if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
+ if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE ||
+ cmd == C2PMSG_CMD_SPI_GET_FLASH_IMAGE)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
@@ -766,6 +789,37 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
return 0;
}
+static int psp_v13_0_dump_spirom(struct psp_context *psp,
+ uint64_t fw_pri_mc_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ /* Confirm PSP is ready to start */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ if (ret) {
+ dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
+ return ret;
+ }
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO);
+ if (ret)
+ return ret;
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI);
+ if (ret)
+ return ret;
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_FLASH_IMAGE);
+
+ return ret;
+}
+
static int psp_v13_0_vbflash_status(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -858,6 +912,25 @@ static bool psp_v13_0_is_reload_needed(struct psp_context *psp)
return false;
}
+static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret = -EOPNOTSUPP;
+
+ /* PSP will broadcast the value to all instances */
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_GBR_IH_SET);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ }
+
+ return ret;
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -879,11 +952,13 @@ static const struct psp_funcs psp_v13_0_funcs = {
.load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw,
.read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw,
.update_spirom = psp_v13_0_update_spirom,
+ .dump_spirom = psp_v13_0_dump_spirom,
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,
.is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required,
.is_reload_needed = psp_v13_0_is_reload_needed,
+ .reg_program_no_ring = psp_v13_0_reg_program_no_ring,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 688a720bbbbd..9c169112a5e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -106,8 +106,9 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
-static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -1333,6 +1334,11 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
}
}
+static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v4_4_2_stop_queue,
+ .start_kernel_queue = &sdma_v4_4_2_restore_queue,
+};
+
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1351,8 +1357,6 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
sdma_v4_4_2_set_vm_pte_funcs(adev);
sdma_v4_4_2_set_irq_funcs(adev);
sdma_v4_4_2_set_ras_funcs(adev);
- sdma_v4_4_2_set_engine_reset_funcs(adev);
-
return 0;
}
@@ -1447,6 +1451,7 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
/* Initialize guilty flags for GFX and PAGE queues */
adev->sdma.instance[i].gfx_guilty = false;
adev->sdma.instance[i].page_guilty = false;
+ adev->sdma.instance[i].funcs = &sdma_v4_4_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1678,11 +1683,12 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
-static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_id)
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+ u32 instance_id = GET_INST(SDMA0, ring->me);
u32 inst_mask;
uint64_t rptr;
- struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -1715,11 +1721,11 @@ static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_
return 0;
}
-static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instance_id)
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
{
- int i;
+ struct amdgpu_device *adev = ring->adev;
u32 inst_mask;
- struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
+ int i;
inst_mask = 1 << ring->me;
udelay(50);
@@ -1739,16 +1745,6 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instan
return sdma_v4_4_2_inst_start(adev, inst_mask, true);
}
-static struct sdma_on_reset_funcs sdma_v4_4_2_engine_reset_funcs = {
- .pre_reset = sdma_v4_4_2_stop_queue,
- .post_reset = sdma_v4_4_2_restore_queue,
-};
-
-static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev)
-{
- amdgpu_sdma_register_on_reset_callbacks(adev, &sdma_v4_4_2_engine_reset_funcs);
-}
-
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -2373,7 +2369,9 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(9, 5, 0):
- /*TODO: enable the queue reset flag until fw supported */
+ if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 0dce59f4f6e2..9505ae96fbec 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -112,6 +112,8 @@ static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
@@ -369,67 +371,36 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
-
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index,
- ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
- "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -575,11 +546,9 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -588,15 +557,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_0_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers (NAVI10).
*/
-static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -688,9 +657,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_0_gfx_stop(adev);
+ sdma_v5_0_gfx_stop(adev, 1 << inst_mask);
sdma_v5_0_rlc_stop(adev);
}
@@ -1046,33 +1017,22 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1085,10 +1045,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1100,8 +1057,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1124,38 +1080,24 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256,
- AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1183,10 +1125,7 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1197,8 +1136,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1389,6 +1327,36 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+static int sdma_v5_0_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
+{
+ u32 grbm_soft_reset;
+ u32 tmp;
+
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
+
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
+
+static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_0_stop_queue,
+ .start_kernel_queue = &sdma_v5_0_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
+};
+
static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1431,6 +1399,7 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1572,32 +1541,25 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ u32 inst_id = ring->me;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ return amdgpu_sdma_reset_engine(adev, inst_id);
+}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
- }
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_0_gfx_stop(adev, 1 << i);
/* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1628,30 +1590,25 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze*/
- freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+ WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
- r = sdma_v5_0_gfx_resume_instance(adev, i, true);
-
-err0:
+ r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 2b39a03ff0c1..a6e612b4a892 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -113,6 +113,8 @@ static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring);
static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
{
@@ -394,11 +396,9 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if ((flags & AMDGPU_FENCE_FLAG_INT)) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -407,15 +407,15 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_2_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers.
*/
-static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -506,9 +506,11 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_2_gfx_stop(adev);
+ sdma_v5_2_gfx_stop(adev, inst_mask);
sdma_v5_2_rlc_stop(adev);
}
@@ -761,37 +763,49 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+static int sdma_v5_2_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
{
- struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset;
u32 tmp;
- int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- grbm_soft_reset = REG_SET_FIELD(0,
- GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
- 1);
- grbm_soft_reset <<= i;
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma_v5_2_soft_reset_engine(adev, i);
udelay(50);
}
return 0;
}
+static const struct amdgpu_sdma_funcs sdma_v5_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_2_stop_queue,
+ .start_kernel_queue = &sdma_v5_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_2_soft_reset_engine,
+};
+
/**
* sdma_v5_2_start - setup and start the async dma engines
*
@@ -903,33 +917,22 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -942,10 +945,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -957,8 +957,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -981,37 +980,23 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1039,10 +1024,7 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1053,8 +1035,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1337,6 +1318,7 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1472,32 +1454,25 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ u32 inst_id = ring->me;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ return amdgpu_sdma_reset_engine(adev, inst_id);
+}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
- }
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_2_gfx_stop(adev, 1 << i);
/*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1530,31 +1505,26 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze and unhalt */
- freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+ WREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
- r = sdma_v5_2_gfx_resume_instance(adev, i, true);
+ r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
-err0:
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index c214c3d2149b..da5b5d64f137 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -43,6 +43,7 @@
#include "sdma_common.h"
#include "sdma_v6_0.h"
#include "v11_structs.h"
+#include "mes_userqueue.h"
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
@@ -376,11 +377,9 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -891,6 +890,9 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
return 0;
}
@@ -917,33 +919,22 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -956,10 +947,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -971,8 +959,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -995,37 +982,23 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1053,10 +1026,7 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1067,8 +1037,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1300,6 +1269,23 @@ static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r)
return r;
@@ -1334,6 +1320,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1376,6 +1363,10 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ /* add firmware version checks here */
+ if (0 && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -1399,11 +1390,39 @@ static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v6_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v6_0_start(adev);
+ r = sdma_v6_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v6_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1415,6 +1434,7 @@ static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);
+ sdma_v6_0_set_userq_trap_interrupts(adev, false);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index b2706221df99..befe013b11a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -42,6 +42,7 @@
#include "sdma_common.h"
#include "sdma_v7_0.h"
#include "v12_structs.h"
+#include "mes_userqueue.h"
MODULE_FIRMWARE("amdgpu/sdma_7_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
@@ -204,66 +205,39 @@ static uint64_t sdma_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "regSDMA%i_GFX_RB_WPTR == 0x%08x "
- "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "regSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -407,11 +381,9 @@ static void sdma_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -935,6 +907,9 @@ static int sdma_v7_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = 0x4000; //regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = 0xf; //regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
return 0;
}
@@ -961,33 +936,22 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1000,10 +964,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1015,8 +976,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1039,37 +999,23 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1097,10 +1043,7 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1111,8 +1054,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1312,6 +1254,23 @@ static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to init sdma firmware!\n");
@@ -1347,6 +1306,7 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1378,6 +1338,10 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ /* add firmware version checks here */
+ if (0 && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+
return r;
}
@@ -1400,11 +1364,39 @@ static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v7_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v7_0_start(adev);
+ r = sdma_v7_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v7_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1416,6 +1408,7 @@ static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v7_0_ctx_switch_enable(adev, false);
sdma_v7_0_enable(adev, false);
+ sdma_v7_0_set_userq_trap_interrupts(adev, false);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 2247f6a94858..e0f139de7991 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -35,6 +35,7 @@
#include "amdgpu_vce.h"
#include "atom.h"
#include "amd_pcie.h"
+
#include "si_dpm.h"
#include "sid.h"
#include "si_ih.h"
@@ -44,17 +45,30 @@
#include "dce_v6_0.h"
#include "si.h"
#include "uvd_v3_1.h"
-#include "amdgpu_vkms.h"
+
+#include "uvd/uvd_4_0_d.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
-#include "uvd/uvd_4_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+#include "si_enums.h"
#include "amdgpu_dm.h"
+#include "amdgpu_vkms.h"
static const u32 tahiti_golden_registers[] =
{
@@ -1071,8 +1085,8 @@ static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- r = RREG32(SMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ r = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -1082,8 +1096,8 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- WREG32(SMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ WREG32(mmSMC_IND_DATA_0, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -1110,20 +1124,20 @@ static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
}
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
- {GRBM_STATUS},
+ {mmGRBM_STATUS},
{mmGRBM_STATUS2},
{mmGRBM_STATUS_SE0},
{mmGRBM_STATUS_SE1},
{mmSRBM_STATUS},
{mmSRBM_STATUS2},
- {DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
- {DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA0_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA1_REGISTER_OFFSET},
{mmCP_STAT},
{mmCP_STALLED_STAT1},
{mmCP_STALLED_STAT2},
{mmCP_STALLED_STAT3},
- {GB_ADDR_CONFIG},
- {MC_ARB_RAMCFG},
+ {mmGB_ADDR_CONFIG},
+ {mmMC_ARB_RAMCFG},
{mmGB_TILE_MODE0},
{mmGB_TILE_MODE1},
{mmGB_TILE_MODE2},
@@ -1156,7 +1170,7 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{mmGB_TILE_MODE29},
{mmGB_TILE_MODE30},
{mmGB_TILE_MODE31},
- {CC_RB_BACKEND_DISABLE, true},
+ {mmCC_RB_BACKEND_DISABLE, true},
{mmGC_USER_RB_BACKEND_DISABLE, true},
{mmPA_SC_RASTER_CONFIG, true},
};
@@ -1264,37 +1278,37 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev)
u32 rom_cntl;
bool r;
- bus_cntl = RREG32(R600_BUS_CNTL);
+ bus_cntl = RREG32(mmBUS_CNTL);
if (adev->mode_info.num_crtc) {
- d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
- d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
- vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ d1vga_control = RREG32(mmD1VGA_CONTROL);
+ d2vga_control = RREG32(mmD2VGA_CONTROL);
+ vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
}
rom_cntl = RREG32(R600_ROM_CNTL);
/* enable the rom */
- WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
if (adev->mode_info.num_crtc) {
/* Disable VGA mode */
- WREG32(AVIVO_D1VGA_CONTROL,
- (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(AVIVO_D2VGA_CONTROL,
- (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(VGA_RENDER_CONTROL,
- (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+ WREG32(mmD1VGA_CONTROL,
+ (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmD2VGA_CONTROL,
+ (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmVGA_RENDER_CONTROL,
+ (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
}
WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
r = amdgpu_read_bios(adev);
/* restore regs */
- WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(mmBUS_CNTL, bus_cntl);
if (adev->mode_info.num_crtc) {
- WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
- WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
- WREG32(VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(mmD1VGA_CONTROL, d1vga_control);
+ WREG32(mmD2VGA_CONTROL, d2vga_control);
+ WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
}
WREG32(R600_ROM_CNTL, rom_cntl);
return r;
@@ -1331,23 +1345,24 @@ static void si_set_clk_bypass_mode(struct amdgpu_device *adev)
{
u32 tmp, i;
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_BYPASS_EN;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp |= SPLL_CTLREQ_CHG;
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp |= CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+ if (RREG32(mmCG_SPLL_STATUS) & CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK)
break;
udelay(1);
}
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp &= ~(CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK |
+ CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK);
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
tmp = RREG32(MPLL_CNTL_MODE);
tmp &= ~MPLL_MCLK_SEL;
@@ -1358,21 +1373,21 @@ static void si_spll_powerdown(struct amdgpu_device *adev)
{
u32 tmp;
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp |= SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp |= SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_RESET;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_SLEEP;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp &= ~SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp &= ~SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
}
static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
@@ -1454,14 +1469,14 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)
{
uint32_t temp;
- temp = RREG32(CONFIG_CNTL);
+ temp = RREG32(mmCONFIG_CNTL);
if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
temp &= ~(1<<1);
}
- WREG32(CONFIG_CNTL, temp);
+ WREG32(mmCONFIG_CNTL, temp);
}
static u32 si_get_xclk(struct amdgpu_device *adev)
@@ -1469,12 +1484,12 @@ static u32 si_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
- tmp = RREG32(CG_CLKPIN_CNTL_2);
- if (tmp & MUX_TCLK_TO_XCLK)
+ tmp = RREG32(mmCG_CLKPIN_CNTL_2);
+ if (tmp & CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK)
return TCLK;
- tmp = RREG32(CG_CLKPIN_CNTL);
- if (tmp & XTALIN_DIVIDE)
+ tmp = RREG32(mmCG_CLKPIN_CNTL);
+ if (tmp & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
return reference_clock / 4;
return reference_clock;
@@ -1519,9 +1534,9 @@ static int si_get_pcie_lanes(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
- switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
+ switch ((link_width_cntl & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT) {
case LC_LINK_WIDTH_X1:
return 1;
case LC_LINK_WIDTH_X2:
@@ -1568,13 +1583,13 @@ static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
return;
}
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- link_width_cntl &= ~LC_LINK_WIDTH_MASK;
- link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
- link_width_cntl |= (LC_RECONFIG_NOW |
- LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT;
+ link_width_cntl |= (PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK |
+ PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK);
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
@@ -2018,7 +2033,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
{
- return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+ return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
@@ -2239,9 +2254,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
- LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
@@ -2268,17 +2283,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
- tmp = RREG32_PCIE(PCIE_LC_STATUS1);
- max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
- current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+ tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
+ current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
if (current_lw < max_lw) {
- tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- if (tmp & LC_RENEGOTIATION_SUPPORT) {
- tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
- tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
- tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
+ tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
+ tmp |= (max_lw << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
+ tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
}
}
@@ -2301,13 +2316,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
PCI_EXP_LNKCTL2,
&gpu_cfg2);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_REDO_EQ;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
mdelay(100);
@@ -2333,16 +2348,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
(PCI_EXP_LNKCTL2_ENTER_COMP |
PCI_EXP_LNKCTL2_TX_MARGIN));
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp &= ~LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
}
}
}
- speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
- speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK | PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
+ speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
tmp16 = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -2354,13 +2369,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
PCI_EXP_LNKCTL2_TLS, tmp16);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
for (i = 0; i < adev->usec_timeout; i++) {
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
break;
udelay(1);
}
@@ -2418,121 +2433,121 @@ static void si_program_aspm(struct amdgpu_device *adev)
if (!amdgpu_device_should_use_aspm(adev))
return;
- orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- data &= ~LC_XMIT_N_FTS_MASK;
- data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+ data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
- data |= LC_GO_TO_RECOVERY;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL3, data);
- orig = data = RREG32_PCIE(PCIE_P_CNTL);
- data |= P_IGNORE_EDB_ERR;
+ orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+ data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_P_CNTL, data);
+ WREG32_PCIE(ixPCIE_P_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
- data |= LC_PMI_TO_L1_DIS;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (!disable_l0s)
- data |= LC_L0S_INACTIVITY(7);
+ data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
if (!disable_l1) {
- data |= LC_L1_INACTIVITY(7);
- data &= ~LC_PMI_TO_L1_DIS;
+ data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
if (!disable_plloff_in_l1) {
bool clk_req_support;
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_2);
+ data &= ~PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_3);
+ data &= ~PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_3, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_2);
+ data &= ~PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_3);
+ data &= ~PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_3, data);
}
- orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- data &= ~LC_DYN_LANES_PWR_STATE_MASK;
- data |= LC_DYN_LANES_PWR_STATE(3);
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+ data |= (3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_CNTL);
+ data &= ~PB0_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB0_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_CNTL, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_CNTL);
+ data &= ~PB1_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB1_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_CNTL, data);
if (!disable_clkreq &&
!pci_is_root_bus(adev->pdev->bus)) {
@@ -2548,64 +2563,64 @@ static void si_program_aspm(struct amdgpu_device *adev)
}
if (clk_req_support) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
- data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL2);
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL2, data);
- orig = data = RREG32(THM_CLK_CNTL);
- data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
- data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ orig = data = RREG32(mmTHM_CLK_CNTL);
+ data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+ data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
if (orig != data)
- WREG32(THM_CLK_CNTL, data);
+ WREG32(mmTHM_CLK_CNTL, data);
- orig = data = RREG32(MISC_CLK_CNTL);
- data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
- data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ orig = data = RREG32(mmMISC_CLK_CNTL);
+ data &= ~(MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK | MISC_CLK_CNTL__ZCLK_SEL_MASK);
+ data |= (1 << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT) | (1 << MISC_CLK_CNTL__ZCLK_SEL__SHIFT);
if (orig != data)
- WREG32(MISC_CLK_CNTL, data);
+ WREG32(mmMISC_CLK_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL);
- data &= ~BCLK_AS_XCLK;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL);
+ data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL, data);
+ WREG32(mmCG_CLKPIN_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL_2);
- data &= ~FORCE_BIF_REFCLK_EN;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL_2);
+ data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL_2, data);
+ WREG32(mmCG_CLKPIN_CNTL_2, data);
- orig = data = RREG32(MPLL_BYPASSCLK_SEL);
- data &= ~MPLL_CLKOUT_SEL_MASK;
- data |= MPLL_CLKOUT_SEL(4);
+ orig = data = RREG32(mmMPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+ data |= 4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT;
if (orig != data)
- WREG32(MPLL_BYPASSCLK_SEL, data);
+ WREG32(mmMPLL_BYPASSCLK_SEL, data);
- orig = data = RREG32(SPLL_CNTL_MODE);
- data &= ~SPLL_REFCLK_SEL_MASK;
+ orig = data = RREG32(mmSPLL_CNTL_MODE);
+ data &= ~SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK;
if (orig != data)
- WREG32(SPLL_CNTL_MODE, data);
+ WREG32(mmSPLL_CNTL_MODE, data);
}
}
} else {
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
- orig = data = RREG32_PCIE(PCIE_CNTL2);
- data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+ data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | PCIE_CNTL2__MST_MEM_LS_EN_MASK | PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_CNTL2, data);
+ WREG32_PCIE(ixPCIE_CNTL2, data);
if (!disable_l0s) {
- data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
- data = RREG32_PCIE(PCIE_LC_STATUS1);
- if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~LC_L0S_INACTIVITY_MASK;
+ data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ if((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
+ data = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) && (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index e2089c8da71b..7f18e4875287 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -27,6 +27,8 @@
#include "si.h"
#include "sid.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
DMA0_REGISTER_OFFSET,
@@ -38,17 +40,31 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+/**
+ * si_dma_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
{
return *ring->rptr_cpu_addr;
}
+/**
+ * si_dma_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+ return (RREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
}
static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
@@ -56,7 +72,7 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
@@ -117,9 +133,9 @@ static void si_dma_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
/* dma0 */
- rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ rb_cntl = RREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i]);
+ rb_cntl &= ~DMA_GFX_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
}
}
@@ -133,44 +149,44 @@ static int si_dma_start(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
- WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], 0);
rptr_addr = ring->rptr_gpu_addr;
- WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
- WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
- WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+ WREG32(mmDMA_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
/* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+ ib_cntl = DMA_GFX_IB_CNTL__IB_ENABLE_MASK | DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK;
#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
+ ib_cntl |= DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+ WREG32(mmDMA_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+ dma_cntl = RREG32(mmDMA_CNTL + sdma_offsets[i]);
+ dma_cntl &= ~DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + sdma_offsets[i], dma_cntl);
ring->wptr = 0;
- WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_GFX_RB_CNTL__RB_ENABLE_MASK);
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -461,7 +477,7 @@ static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->sdma.num_instances = 2;
+ adev->sdma.num_instances = SDMA_MAX_INSTANCE;
si_dma_set_ring_funcs(adev);
si_dma_set_buffer_funcs(adev);
@@ -545,9 +561,9 @@ static bool si_dma_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- u32 tmp = RREG32(SRBM_STATUS2);
+ u32 tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+ if (tmp & (SRBM_STATUS2__DMA_BUSY_MASK | SRBM_STATUS2__DMA1_BUSY_MASK))
return false;
return true;
@@ -583,14 +599,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -599,14 +615,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -645,11 +661,11 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data &= ~MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data &= ~DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
- WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, 0x00000100);
}
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -657,15 +673,15 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data |= MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data |= DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
- orig = data = RREG32(DMA_CLK_CTRL + offset);
+ orig = data = RREG32(mmDMA_CLK_CTRL + offset);
data = 0xff000000;
if (data != orig)
- WREG32(DMA_CLK_CTRL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, data);
}
}
@@ -679,11 +695,11 @@ static int si_dma_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
- WREG32(DMA_PGFSM_WRITE, 0x00002000);
- WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+ WREG32(mmDMA_PGFSM_WRITE, 0x00002000);
+ WREG32(mmDMA_PGFSM_CONFIG, 0x100010ff);
for (tmp = 0; tmp < 5; tmp++)
- WREG32(DMA_PGFSM_WRITE, 0);
+ WREG32(mmDMA_PGFSM_WRITE, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index d656ef1fa6e1..6da65778292b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -23,115 +23,15 @@
#ifndef SI_ENUMS_H
#define SI_ENUMS_H
-#define VBLANK_INT_MASK (1 << 0)
-#define DC_HPDx_INT_EN (1 << 16)
-#define VBLANK_ACK (1 << 4)
-#define VLINE_ACK (1 << 4)
-
-#define CURSOR_WIDTH 64
-#define CURSOR_HEIGHT 64
-
-#define VGA_VSTATUS_CNTL 0xFFFCFFFF
#define PRIORITY_MARK_MASK 0x7fff
#define PRIORITY_OFF (1 << 16)
#define PRIORITY_ALWAYS_ON (1 << 20)
-#define INTERLEAVE_EN (1 << 0)
-
-#define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-#define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-
-#define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-#define GRPH_ENDIAN_NONE 0
-#define GRPH_ENDIAN_8IN16 1
-#define GRPH_ENDIAN_8IN32 2
-#define GRPH_ENDIAN_8IN64 3
-#define GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-#define GRPH_RED_SEL_R 0
-#define GRPH_RED_SEL_G 1
-#define GRPH_RED_SEL_B 2
-#define GRPH_RED_SEL_A 3
-#define GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-#define GRPH_GREEN_SEL_G 0
-#define GRPH_GREEN_SEL_B 1
-#define GRPH_GREEN_SEL_A 2
-#define GRPH_GREEN_SEL_R 3
-#define GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-#define GRPH_BLUE_SEL_B 0
-#define GRPH_BLUE_SEL_A 1
-#define GRPH_BLUE_SEL_R 2
-#define GRPH_BLUE_SEL_G 3
-#define GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-#define GRPH_ALPHA_SEL_A 0
-#define GRPH_ALPHA_SEL_R 1
-#define GRPH_ALPHA_SEL_G 2
-#define GRPH_ALPHA_SEL_B 3
-
-#define GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define GRPH_DEPTH_8BPP 0
-#define GRPH_DEPTH_16BPP 1
-#define GRPH_DEPTH_32BPP 2
-
-#define GRPH_FORMAT(x) (((x) & 0x7) << 8)
-#define GRPH_FORMAT_INDEXED 0
-#define GRPH_FORMAT_ARGB1555 0
-#define GRPH_FORMAT_ARGB565 1
-#define GRPH_FORMAT_ARGB4444 2
-#define GRPH_FORMAT_AI88 3
-#define GRPH_FORMAT_MONO16 4
-#define GRPH_FORMAT_BGRA5551 5
-#define GRPH_FORMAT_ARGB8888 0
-#define GRPH_FORMAT_ARGB2101010 1
-#define GRPH_FORMAT_32BPP_DIG 2
-#define GRPH_FORMAT_8B_ARGB2101010 3
-#define GRPH_FORMAT_BGRA1010102 4
-#define GRPH_FORMAT_8B_BGRA1010102 5
-#define GRPH_FORMAT_RGB111110 6
-#define GRPH_FORMAT_BGR101111 7
-
-#define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_ARRAY_LINEAR_GENERAL 0
-#define GRPH_ARRAY_LINEAR_ALIGNED 1
-#define GRPH_ARRAY_1D_TILED_THIN1 2
-#define GRPH_ARRAY_2D_TILED_THIN1 4
-#define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
-
-#define CURSOR_EN (1 << 0)
-#define CURSOR_MODE(x) (((x) & 0x3) << 8)
-#define CURSOR_MONO 0
-#define CURSOR_24_1 1
-#define CURSOR_24_8_PRE_MULT 2
-#define CURSOR_24_8_UNPRE_MULT 3
-#define CURSOR_2X_MAGNIFY (1 << 16)
-#define CURSOR_FORCE_MC_ON (1 << 20)
-#define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-#define CURSOR_URGENT_ALWAYS 0
-#define CURSOR_URGENT_1_8 1
-#define CURSOR_URGENT_1_4 2
-#define CURSOR_URGENT_3_8 3
-#define CURSOR_URGENT_1_2 4
-#define CURSOR_UPDATE_PENDING (1 << 0)
-#define CURSOR_UPDATE_TAKEN (1 << 1)
-#define CURSOR_UPDATE_LOCK (1 << 16)
-#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-
-#define ES_AND_GS_AUTO 3
-#define RADEON_PACKET_TYPE3 3
-#define CE_PARTITION_BASE 3
-#define BUF_SWAP_32BIT (2 << 16)
#define GFX_POWER_STATUS (1 << 1)
#define GFX_CLOCK_STATUS (1 << 2)
#define GFX_LS_STATUS (1 << 3)
-#define RLC_BUSY_STATUS (1 << 0)
+#define RLC_BUSY_STATUS (1 << 0)
#define RLC_PUD(x) ((x) << 0)
#define RLC_PUD_MASK (0xff << 0)
#define RLC_PDD(x) ((x) << 8)
@@ -140,140 +40,8 @@
#define RLC_TTPD_MASK (0xff << 16)
#define RLC_MSD(x) ((x) << 24)
#define RLC_MSD_MASK (0xff << 24)
-#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
-#define WRITE_DATA_DST_SEL(x) ((x) << 8)
-#define EVENT_TYPE(x) ((x) << 0)
-#define EVENT_INDEX(x) ((x) << 8)
-#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
-#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
-#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
-#define GFX6_NUM_GFX_RINGS 1
-#define GFX6_NUM_COMPUTE_RINGS 2
#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
- (((op) & 0xFF) << 8) | \
- ((n) & 0x3FFF) << 16)
-#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
-#define PACKET3_NOP 0x10
-#define PACKET3_SET_BASE 0x11
-#define PACKET3_BASE_INDEX(x) ((x) << 0)
-#define PACKET3_CLEAR_STATE 0x12
-#define PACKET3_INDEX_BUFFER_SIZE 0x13
-#define PACKET3_DISPATCH_DIRECT 0x15
-#define PACKET3_DISPATCH_INDIRECT 0x16
-#define PACKET3_ALLOC_GDS 0x1B
-#define PACKET3_WRITE_GDS_RAM 0x1C
-#define PACKET3_ATOMIC_GDS 0x1D
-#define PACKET3_ATOMIC 0x1E
-#define PACKET3_OCCLUSION_QUERY 0x1F
-#define PACKET3_SET_PREDICATION 0x20
-#define PACKET3_REG_RMW 0x21
-#define PACKET3_COND_EXEC 0x22
-#define PACKET3_PRED_EXEC 0x23
-#define PACKET3_DRAW_INDIRECT 0x24
-#define PACKET3_DRAW_INDEX_INDIRECT 0x25
-#define PACKET3_INDEX_BASE 0x26
-#define PACKET3_DRAW_INDEX_2 0x27
-#define PACKET3_CONTEXT_CONTROL 0x28
-#define PACKET3_INDEX_TYPE 0x2A
-#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
-#define PACKET3_DRAW_INDEX_AUTO 0x2D
-#define PACKET3_DRAW_INDEX_IMMD 0x2E
-#define PACKET3_NUM_INSTANCES 0x2F
-#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
-#define PACKET3_INDIRECT_BUFFER_CONST 0x31
-#define PACKET3_INDIRECT_BUFFER 0x3F
-#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
-#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
-#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
-#define PACKET3_WRITE_DATA 0x37
-#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
-#define PACKET3_MEM_SEMAPHORE 0x39
-#define PACKET3_MPEG_INDEX 0x3A
-#define PACKET3_COPY_DW 0x3B
-#define PACKET3_WAIT_REG_MEM 0x3C
-#define PACKET3_MEM_WRITE 0x3D
-#define PACKET3_COPY_DATA 0x40
-#define PACKET3_CP_DMA 0x41
-# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
-# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
-# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
-# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
-# define PACKET3_CP_DMA_DIS_WC (1 << 21)
-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
-# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
-# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
-# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
-# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
-# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
-# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_PFP_SYNC_ME 0x42
-#define PACKET3_SURFACE_SYNC 0x43
-# define PACKET3_DEST_BASE_0_ENA (1 << 0)
-# define PACKET3_DEST_BASE_1_ENA (1 << 1)
-# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
-# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
-# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
-# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
-# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
-# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
-# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
-# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
-# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
-# define PACKET3_DEST_BASE_2_ENA (1 << 19)
-# define PACKET3_DEST_BASE_3_ENA (1 << 21)
-# define PACKET3_TCL1_ACTION_ENA (1 << 22)
-# define PACKET3_TC_ACTION_ENA (1 << 23)
-# define PACKET3_CB_ACTION_ENA (1 << 25)
-# define PACKET3_DB_ACTION_ENA (1 << 26)
-# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
-# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
-#define PACKET3_ME_INITIALIZE 0x44
-#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
-#define PACKET3_COND_WRITE 0x45
-#define PACKET3_EVENT_WRITE 0x46
-#define PACKET3_EVENT_WRITE_EOP 0x47
-#define PACKET3_EVENT_WRITE_EOS 0x48
-#define PACKET3_PREAMBLE_CNTL 0x4A
-# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
-# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
-#define PACKET3_ONE_REG_WRITE 0x57
-#define PACKET3_LOAD_CONFIG_REG 0x5F
-#define PACKET3_LOAD_CONTEXT_REG 0x60
-#define PACKET3_LOAD_SH_REG 0x61
-#define PACKET3_SET_CONFIG_REG 0x68
-#define PACKET3_SET_CONFIG_REG_START 0x00002000
-#define PACKET3_SET_CONFIG_REG_END 0x00002c00
-#define PACKET3_SET_CONTEXT_REG 0x69
-#define PACKET3_SET_CONTEXT_REG_START 0x000a000
-#define PACKET3_SET_CONTEXT_REG_END 0x000a400
-#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
-#define PACKET3_SET_RESOURCE_INDIRECT 0x74
-#define PACKET3_SET_SH_REG 0x76
-#define PACKET3_SET_SH_REG_START 0x00002c00
-#define PACKET3_SET_SH_REG_END 0x00003000
-#define PACKET3_SET_SH_REG_OFFSET 0x77
-#define PACKET3_ME_WRITE 0x7A
-#define PACKET3_SCRATCH_RAM_WRITE 0x7D
-#define PACKET3_SCRATCH_RAM_READ 0x7E
-#define PACKET3_CE_WRITE 0x7F
-#define PACKET3_LOAD_CONST_RAM 0x80
-#define PACKET3_WRITE_CONST_RAM 0x81
-#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
-#define PACKET3_DUMP_CONST_RAM 0x83
-#define PACKET3_INCREMENT_CE_COUNTER 0x84
-#define PACKET3_INCREMENT_DE_COUNTER 0x85
-#define PACKET3_WAIT_ON_CE_COUNTER 0x86
-#define PACKET3_WAIT_ON_DE_COUNTER 0x87
-#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
-#define PACKET3_SET_CE_DE_COUNTERS 0x89
-#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
-#define PACKET3_SWITCH_BUFFER 0x8B
-#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
-#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
-#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 5c38e1fb1dca..1df00f8a2406 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -27,6 +27,7 @@
#include "amdgpu_ih.h"
#include "sid.h"
#include "si_ih.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
@@ -213,7 +214,7 @@ static int si_ih_resume(struct amdgpu_ip_block *ip_block)
static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
return false;
@@ -239,23 +240,23 @@ static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
if (srbm_soft_reset) {
- tmp = RREG32(SRBM_SOFT_RESET);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ dev_info(adev->dev, "mmSRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index cbf232f5235b..cbd4f8951cfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -24,43 +24,12 @@
#ifndef SI_H
#define SI_H
-#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
-
-#define SI_MAX_SH_GPRS 256
-#define SI_MAX_TEMP_GPRS 16
-#define SI_MAX_SH_THREADS 256
-#define SI_MAX_SH_STACK_ENTRIES 4096
-#define SI_MAX_FRC_EOV_CNT 16384
-#define SI_MAX_BACKENDS 8
-#define SI_MAX_BACKENDS_MASK 0xFF
-#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
-#define SI_MAX_SIMDS 12
-#define SI_MAX_SIMDS_MASK 0x0FFF
-#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
-#define SI_MAX_PIPES 8
-#define SI_MAX_PIPES_MASK 0xFF
-#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
-#define SI_MAX_LDS_NUM 0xFFFF
-#define SI_MAX_TCC 16
-#define SI_MAX_TCC_MASK 0xFFFF
#define SI_MAX_CTLACKS_ASSERTION_WAIT 100
-/* SMC IND accessor regs */
-#define SMC_IND_INDEX_0 0x80
-#define SMC_IND_DATA_0 0x81
-
-#define SMC_IND_ACCESS_CNTL 0x8A
-# define AUTO_INCREMENT_IND_0 (1 << 0)
-#define SMC_MESSAGE_0 0x8B
-#define SMC_RESP_0 0x8C
-
/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
#define SMC_CG_IND_START 0xc0030000
#define SMC_CG_IND_END 0xc0040000
-#define CG_CGTT_LOCAL_0 0x400
-#define CG_CGTT_LOCAL_1 0x401
-
/* SMC IND registers */
#define SMC_SYSCON_RESET_CNTL 0x80000000
# define RST_REG (1 << 0)
@@ -68,9 +37,6 @@
# define CK_DISABLE (1 << 0)
# define CKEN (1 << 24)
-#define VGA_HDP_CONTROL 0xCA
-#define VGA_MEMORY_DISABLE (1 << 4)
-
#define DCCG_DISP_SLOW_SELECT_REG 0x13F
#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
@@ -79,47 +45,6 @@
#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
-#define CG_SPLL_FUNC_CNTL 0x180
-#define SPLL_RESET (1 << 0)
-#define SPLL_SLEEP (1 << 1)
-#define SPLL_BYPASS_EN (1 << 3)
-#define SPLL_REF_DIV(x) ((x) << 4)
-#define SPLL_REF_DIV_MASK (0x3f << 4)
-#define SPLL_PDIV_A(x) ((x) << 20)
-#define SPLL_PDIV_A_MASK (0x7f << 20)
-#define SPLL_PDIV_A_SHIFT 20
-#define CG_SPLL_FUNC_CNTL_2 0x181
-#define SCLK_MUX_SEL(x) ((x) << 0)
-#define SCLK_MUX_SEL_MASK (0x1ff << 0)
-#define SPLL_CTLREQ_CHG (1 << 23)
-#define SCLK_MUX_UPDATE (1 << 26)
-#define CG_SPLL_FUNC_CNTL_3 0x182
-#define SPLL_FB_DIV(x) ((x) << 0)
-#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
-#define SPLL_FB_DIV_SHIFT 0
-#define SPLL_DITHEN (1 << 28)
-#define CG_SPLL_FUNC_CNTL_4 0x183
-
-#define SPLL_STATUS 0x185
-#define SPLL_CHG_STATUS (1 << 1)
-#define SPLL_CNTL_MODE 0x186
-#define SPLL_SW_DIR_CONTROL (1 << 0)
-# define SPLL_REFCLK_SEL(x) ((x) << 26)
-# define SPLL_REFCLK_SEL_MASK (3 << 26)
-
-#define CG_SPLL_SPREAD_SPECTRUM 0x188
-#define SSEN (1 << 0)
-#define CLK_S(x) ((x) << 4)
-#define CLK_S_MASK (0xfff << 4)
-#define CLK_S_SHIFT 4
-#define CG_SPLL_SPREAD_SPECTRUM_2 0x189
-#define CLK_V(x) ((x) << 0)
-#define CLK_V_MASK (0x3ffffff << 0)
-#define CLK_V_SHIFT 0
-
-#define CG_SPLL_AUTOSCALE_CNTL 0x18b
-# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
-
/* discrete uvd clocks */
#define CG_UPLL_FUNC_CNTL 0x18d
# define UPLL_RESET_MASK 0x00000001
@@ -149,317 +74,13 @@
#define CG_UPLL_SPREAD_SPECTRUM 0x194
# define SSEN_MASK 0x00000001
-#define MPLL_BYPASSCLK_SEL 0x197
-# define MPLL_CLKOUT_SEL(x) ((x) << 8)
-# define MPLL_CLKOUT_SEL_MASK 0xFF00
-
-#define CG_CLKPIN_CNTL 0x198
-# define XTALIN_DIVIDE (1 << 1)
-# define BCLK_AS_XCLK (1 << 2)
-#define CG_CLKPIN_CNTL_2 0x199
-# define FORCE_BIF_REFCLK_EN (1 << 3)
-# define MUX_TCLK_TO_XCLK (1 << 8)
-
-#define THM_CLK_CNTL 0x19b
-# define CMON_CLK_SEL(x) ((x) << 0)
-# define CMON_CLK_SEL_MASK 0xFF
-# define TMON_CLK_SEL(x) ((x) << 8)
-# define TMON_CLK_SEL_MASK 0xFF00
-#define MISC_CLK_CNTL 0x19c
-# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
-# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
-# define ZCLK_SEL(x) ((x) << 8)
-# define ZCLK_SEL_MASK 0xFF00
-
-#define CG_THERMAL_CTRL 0x1c0
-#define DPM_EVENT_SRC(x) ((x) << 0)
-#define DPM_EVENT_SRC_MASK (7 << 0)
-#define DIG_THERM_DPM(x) ((x) << 14)
-#define DIG_THERM_DPM_MASK 0x003FC000
-#define DIG_THERM_DPM_SHIFT 14
-#define CG_THERMAL_STATUS 0x1c1
-#define FDO_PWM_DUTY(x) ((x) << 9)
-#define FDO_PWM_DUTY_MASK (0xff << 9)
-#define FDO_PWM_DUTY_SHIFT 9
-#define CG_THERMAL_INT 0x1c2
-#define DIG_THERM_INTH(x) ((x) << 8)
-#define DIG_THERM_INTH_MASK 0x0000FF00
-#define DIG_THERM_INTH_SHIFT 8
-#define DIG_THERM_INTL(x) ((x) << 16)
-#define DIG_THERM_INTL_MASK 0x00FF0000
-#define DIG_THERM_INTL_SHIFT 16
-#define THERM_INT_MASK_HIGH (1 << 24)
-#define THERM_INT_MASK_LOW (1 << 25)
-
-#define CG_MULT_THERMAL_CTRL 0x1c4
-#define TEMP_SEL(x) ((x) << 20)
-#define TEMP_SEL_MASK (0xff << 20)
-#define TEMP_SEL_SHIFT 20
-#define CG_MULT_THERMAL_STATUS 0x1c5
-#define ASIC_MAX_TEMP(x) ((x) << 0)
-#define ASIC_MAX_TEMP_MASK 0x000001ff
-#define ASIC_MAX_TEMP_SHIFT 0
-#define CTF_TEMP(x) ((x) << 9)
-#define CTF_TEMP_MASK 0x0003fe00
-#define CTF_TEMP_SHIFT 9
-
-#define CG_FDO_CTRL0 0x1d5
-#define FDO_STATIC_DUTY(x) ((x) << 0)
-#define FDO_STATIC_DUTY_MASK 0x000000FF
-#define FDO_STATIC_DUTY_SHIFT 0
-#define CG_FDO_CTRL1 0x1d6
-#define FMAX_DUTY100(x) ((x) << 0)
-#define FMAX_DUTY100_MASK 0x000000FF
-#define FMAX_DUTY100_SHIFT 0
-#define CG_FDO_CTRL2 0x1d7
-#define TMIN(x) ((x) << 0)
-#define TMIN_MASK 0x000000FF
-#define TMIN_SHIFT 0
-#define FDO_PWM_MODE(x) ((x) << 11)
-#define FDO_PWM_MODE_MASK (7 << 11)
-#define FDO_PWM_MODE_SHIFT 11
-#define TACH_PWM_RESP_RATE(x) ((x) << 25)
-#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
-#define TACH_PWM_RESP_RATE_SHIFT 25
-
-#define CG_TACH_CTRL 0x1dc
-# define EDGE_PER_REV(x) ((x) << 0)
-# define EDGE_PER_REV_MASK (0x7 << 0)
-# define EDGE_PER_REV_SHIFT 0
-# define TARGET_PERIOD(x) ((x) << 3)
-# define TARGET_PERIOD_MASK 0xfffffff8
-# define TARGET_PERIOD_SHIFT 3
-#define CG_TACH_STATUS 0x1dd
-# define TACH_PERIOD(x) ((x) << 0)
-# define TACH_PERIOD_MASK 0xffffffff
-# define TACH_PERIOD_SHIFT 0
-
-#define GENERAL_PWRMGT 0x1e0
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define THERMAL_PROTECTION_DIS (1 << 2)
-# define THERMAL_PROTECTION_TYPE (1 << 3)
-# define SW_SMIO_INDEX(x) ((x) << 6)
-# define SW_SMIO_INDEX_MASK (1 << 6)
-# define SW_SMIO_INDEX_SHIFT 6
-# define VOLT_PWRMGT_EN (1 << 10)
-# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
-#define CG_TPC 0x1e1
-#define SCLK_PWRMGT_CNTL 0x1e2
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_LOW_D1 (1 << 1)
-# define FIR_RESET (1 << 4)
-# define FIR_FORCE_TREND_SEL (1 << 5)
-# define FIR_TREND_MODE (1 << 6)
-# define DYN_GFX_CLK_OFF_EN (1 << 7)
-# define GFX_CLK_FORCE_ON (1 << 8)
-# define GFX_CLK_REQUEST_OFF (1 << 9)
-# define GFX_CLK_FORCE_OFF (1 << 10)
-# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
-# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
-# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
-# define DYN_LIGHT_SLEEP_EN (1 << 14)
-
-#define TARGET_AND_CURRENT_PROFILE_INDEX 0x1e6
-# define CURRENT_STATE_INDEX_MASK (0xf << 4)
-# define CURRENT_STATE_INDEX_SHIFT 4
-
-#define CG_FTV 0x1ef
-
-#define CG_FFCT_0 0x1f0
-# define UTC_0(x) ((x) << 0)
-# define UTC_0_MASK (0x3ff << 0)
-# define DTC_0(x) ((x) << 10)
-# define DTC_0_MASK (0x3ff << 10)
-
-#define CG_BSP 0x1ff
-# define BSP(x) ((x) << 0)
-# define BSP_MASK (0xffff << 0)
-# define BSU(x) ((x) << 16)
-# define BSU_MASK (0xf << 16)
-#define CG_AT 0x200
-# define CG_R(x) ((x) << 0)
-# define CG_R_MASK (0xffff << 0)
-# define CG_L(x) ((x) << 16)
-# define CG_L_MASK (0xffff << 16)
-
-#define CG_GIT 0x201
-# define CG_GICST(x) ((x) << 0)
-# define CG_GICST_MASK (0xffff << 0)
-# define CG_GIPOT(x) ((x) << 16)
-# define CG_GIPOT_MASK (0xffff << 16)
-
-#define CG_SSP 0x203
-# define SST(x) ((x) << 0)
-# define SST_MASK (0xffff << 0)
-# define SSTU(x) ((x) << 16)
-# define SSTU_MASK (0xf << 16)
-
-#define CG_DISPLAY_GAP_CNTL 0x20a
-# define DISP1_GAP(x) ((x) << 0)
-# define DISP1_GAP_MASK (3 << 0)
-# define DISP2_GAP(x) ((x) << 2)
-# define DISP2_GAP_MASK (3 << 2)
-# define VBI_TIMER_COUNT(x) ((x) << 4)
-# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
-# define VBI_TIMER_UNIT(x) ((x) << 20)
-# define VBI_TIMER_UNIT_MASK (7 << 20)
-# define DISP1_GAP_MCHG(x) ((x) << 24)
-# define DISP1_GAP_MCHG_MASK (3 << 24)
-# define DISP2_GAP_MCHG(x) ((x) << 26)
-# define DISP2_GAP_MCHG_MASK (3 << 26)
-
-#define CG_ULV_CONTROL 0x21e
-#define CG_ULV_PARAMETER 0x21f
-
-#define SMC_SCRATCH0 0x221
-
-#define CG_CAC_CTRL 0x22e
-# define CAC_WINDOW(x) ((x) << 0)
-# define CAC_WINDOW_MASK 0x00ffffff
-
-#define DMIF_ADDR_CONFIG 0x2F5
-
-#define DMIF_ADDR_CALC 0x300
-
-#define PIPE0_DMIF_BUFFER_CONTROL 0x0328
-# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
-# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
-
-#define SRBM_STATUS 0x394
-#define GRBM_RQ_PENDING (1 << 5)
-#define VMC_BUSY (1 << 8)
-#define MCB_BUSY (1 << 9)
-#define MCB_NON_DISPLAY_BUSY (1 << 10)
-#define MCC_BUSY (1 << 11)
-#define MCD_BUSY (1 << 12)
-#define SEM_BUSY (1 << 14)
-#define IH_BUSY (1 << 17)
-
-#define SRBM_SOFT_RESET 0x398
-#define SOFT_RESET_BIF (1 << 1)
-#define SOFT_RESET_DC (1 << 5)
-#define SOFT_RESET_DMA1 (1 << 6)
-#define SOFT_RESET_GRBM (1 << 8)
-#define SOFT_RESET_HDP (1 << 9)
-#define SOFT_RESET_IH (1 << 10)
-#define SOFT_RESET_MC (1 << 11)
-#define SOFT_RESET_ROM (1 << 14)
-#define SOFT_RESET_SEM (1 << 15)
-#define SOFT_RESET_VMC (1 << 17)
-#define SOFT_RESET_DMA (1 << 20)
-#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
-#define SOFT_RESET_ORB (1 << 23)
-
-#define CC_SYS_RB_BACKEND_DISABLE 0x3A0
-#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3A1
-
-#define SRBM_READ_ERROR 0x3A6
-#define SRBM_INT_CNTL 0x3A8
-#define SRBM_INT_ACK 0x3AA
-
-#define SRBM_STATUS2 0x3B1
-#define DMA_BUSY (1 << 5)
-#define DMA1_BUSY (1 << 6)
-
-#define VM_L2_CNTL 0x500
-#define ENABLE_L2_CACHE (1 << 0)
-#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
-#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
-#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
-#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
-#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
-#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
-#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
-#define VM_L2_CNTL2 0x501
-#define INVALIDATE_ALL_L1_TLBS (1 << 0)
-#define INVALIDATE_L2_CACHE (1 << 1)
-#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
-#define INVALIDATE_PTE_AND_PDE_CACHES 0
-#define INVALIDATE_ONLY_PTE_CACHES 1
-#define INVALIDATE_ONLY_PDE_CACHES 2
-#define VM_L2_CNTL3 0x502
-#define BANK_SELECT(x) ((x) << 0)
-#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
-#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
-#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
-#define VM_L2_STATUS 0x503
-#define L2_BUSY (1 << 0)
-#define VM_CONTEXT0_CNTL 0x504
-#define ENABLE_CONTEXT (1 << 0)
-#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
-#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
-#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
-#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
-#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
-#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
-#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
-#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
-#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
-#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
-#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
-#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
-#define VM_CONTEXT1_CNTL 0x505
-#define VM_CONTEXT0_CNTL2 0x50C
-#define VM_CONTEXT1_CNTL2 0x50D
-#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x50E
-#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x50F
-#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x510
-#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x511
-#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x512
-#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x513
-#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x514
-#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x515
-
-#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x53f
-#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x537
-#define PROTECTIONS_MASK (0xf << 0)
-#define PROTECTIONS_SHIFT 0
- /* bit 0: range
- * bit 1: pde0
- * bit 2: valid
- * bit 3: read
- * bit 4: write
- */
-#define MEMORY_CLIENT_ID_MASK (0xff << 12)
-#define MEMORY_CLIENT_ID_SHIFT 12
-#define MEMORY_CLIENT_RW_MASK (1 << 24)
-#define MEMORY_CLIENT_RW_SHIFT 24
-#define FAULT_VMID_MASK (0xf << 25)
-#define FAULT_VMID_SHIFT 25
-
#define VM_INVALIDATE_REQUEST 0x51E
#define VM_INVALIDATE_RESPONSE 0x51F
-#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x546
-#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x547
-
-#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54F
-#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x550
-#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x551
-#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x552
-#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x553
-#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x554
-#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x555
-#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x556
-#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x557
-#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x558
-
-#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x55F
-#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x560
-
#define VM_L2_CG 0x570
#define MC_CG_ENABLE (1 << 18)
#define MC_LS_ENABLE (1 << 19)
-#define MC_SHARED_CHMAP 0x801
-#define NOOFCHAN_SHIFT 12
-#define NOOFCHAN_MASK 0x0000f000
-#define MC_SHARED_CHREMAP 0x802
-
#define MC_VM_FB_LOCATION 0x809
#define MC_VM_AGP_TOP 0x80A
#define MC_VM_AGP_BOT 0x80B
@@ -491,21 +112,6 @@
#define MC_CITF_MISC_WR_CG 0x993
#define MC_CITF_MISC_VM_CG 0x994
-#define MC_ARB_RAMCFG 0x9D8
-#define NOOFBANK_SHIFT 0
-#define NOOFBANK_MASK 0x00000003
-#define NOOFRANK_SHIFT 2
-#define NOOFRANK_MASK 0x00000004
-#define NOOFROWS_SHIFT 3
-#define NOOFROWS_MASK 0x00000038
-#define NOOFCOLS_SHIFT 6
-#define NOOFCOLS_MASK 0x000000C0
-#define CHANSIZE_SHIFT 8
-#define CHANSIZE_MASK 0x00000100
-#define CHANSIZE_OVERRIDE (1 << 11)
-#define NOOFGROUPS_SHIFT 12
-#define NOOFGROUPS_MASK 0x00001000
-
#define MC_ARB_DRAM_TIMING 0x9DD
#define MC_ARB_DRAM_TIMING2 0x9DE
@@ -631,20 +237,6 @@
#define CLKS(x) ((x) << 0)
#define CLKS_MASK (0xfff << 0)
-#define HDP_HOST_PATH_CNTL 0xB00
-#define CLOCK_GATING_DIS (1 << 23)
-#define HDP_NONSURFACE_BASE 0xB01
-#define HDP_NONSURFACE_INFO 0xB02
-#define HDP_NONSURFACE_SIZE 0xB03
-
-#define HDP_DEBUG0 0xBCC
-
-#define HDP_ADDR_CONFIG 0xBD2
-#define HDP_MISC_CNTL 0xBD3
-#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
-#define HDP_MEM_POWER_LS 0xBD4
-#define HDP_LS_ENABLE (1 << 0)
-
#define ATC_MISC_CG 0xCD4
#define IH_RB_CNTL 0xF80
@@ -674,8 +266,6 @@
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
# define MC_VMID(x) ((x) << 25)
-#define CONFIG_MEMSIZE 0x150A
-
#define INTERRUPT_CNTL 0x151A
# define IH_DUMMY_RD_OVERRIDE (1 << 0)
# define IH_DUMMY_RD_EN (1 << 1)
@@ -683,486 +273,22 @@
# define GEN_IH_INT_EN (1 << 8)
#define INTERRUPT_CNTL2 0x151B
-#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
-
-#define BIF_FB_EN 0x1524
-#define FB_READ_EN (1 << 0)
-#define FB_WRITE_EN (1 << 1)
-
-#define HDP_REG_COHERENCY_FLUSH_CNTL 0x1528
-
-/* DCE6 ELD audio interface */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
-# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
-/* max channels minus one. 7 = 8 channels */
-# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
-# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
-# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
-/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
- * bit0 = 32 kHz
- * bit1 = 44.1 kHz
- * bit2 = 48 kHz
- * bit3 = 88.2 kHz
- * bit4 = 96 kHz
- * bit5 = 176.4 kHz
- * bit6 = 192 kHz
- */
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
-# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
-# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
-/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
- * 0 = invalid
- * x = legal delay value
- * 255 = sync not supported
- */
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
-# define HBR_CAPABLE (1 << 0) /* enabled by default */
-
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
-# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
-# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
-# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
-# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
-# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
-# define DESCRIPTION0(x) (((x) & 0xff) << 0)
-# define DESCRIPTION1(x) (((x) & 0xff) << 8)
-# define DESCRIPTION2(x) (((x) & 0xff) << 16)
-# define DESCRIPTION3(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
-# define DESCRIPTION4(x) (((x) & 0xff) << 0)
-# define DESCRIPTION5(x) (((x) & 0xff) << 8)
-# define DESCRIPTION6(x) (((x) & 0xff) << 16)
-# define DESCRIPTION7(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
-# define DESCRIPTION8(x) (((x) & 0xff) << 0)
-# define DESCRIPTION9(x) (((x) & 0xff) << 8)
-# define DESCRIPTION10(x) (((x) & 0xff) << 16)
-# define DESCRIPTION11(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
-# define DESCRIPTION12(x) (((x) & 0xff) << 0)
-# define DESCRIPTION13(x) (((x) & 0xff) << 8)
-# define DESCRIPTION14(x) (((x) & 0xff) << 16)
-# define DESCRIPTION15(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
-# define DESCRIPTION16(x) (((x) & 0xff) << 0)
-# define DESCRIPTION17(x) (((x) & 0xff) << 8)
-
-#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
-# define AUDIO_ENABLED (1 << 31)
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
-#define PORT_CONNECTIVITY_MASK (3 << 30)
-#define PORT_CONNECTIVITY_SHIFT 30
-
-#define DC_LB_MEMORY_SPLIT 0x1AC3
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-
-#define PRIORITY_A_CNT 0x1AC6
-#define PRIORITY_MARK_MASK 0x7fff
-#define PRIORITY_OFF (1 << 16)
-#define PRIORITY_ALWAYS_ON (1 << 20)
-#define PRIORITY_B_CNT 0x1AC7
-
-#define DPG_PIPE_ARBITRATION_CONTROL3 0x1B32
-# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DPG_PIPE_LATENCY_CONTROL 0x1B33
-# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
-# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
-
-/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
-#define VLINE_STATUS 0x1AEE
-# define VLINE_OCCURRED (1 << 0)
-# define VLINE_ACK (1 << 4)
-# define VLINE_STAT (1 << 12)
-# define VLINE_INTERRUPT (1 << 16)
-# define VLINE_INTERRUPT_TYPE (1 << 17)
-/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
-#define VBLANK_STATUS 0x1AEF
-# define VBLANK_OCCURRED (1 << 0)
-# define VBLANK_ACK (1 << 4)
-# define VBLANK_STAT (1 << 12)
-# define VBLANK_INTERRUPT (1 << 16)
-# define VBLANK_INTERRUPT_TYPE (1 << 17)
-
-/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
-#define INT_MASK 0x1AD0
-# define VBLANK_INT_MASK (1 << 0)
-# define VLINE_INT_MASK (1 << 4)
-
-#define DISP_INTERRUPT_STATUS 0x183D
-# define LB_D1_VLINE_INTERRUPT (1 << 2)
-# define LB_D1_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD1_INTERRUPT (1 << 17)
-# define DC_HPD1_RX_INTERRUPT (1 << 18)
-# define DACA_AUTODETECT_INTERRUPT (1 << 22)
-# define DACB_AUTODETECT_INTERRUPT (1 << 23)
-# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
-# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
-#define DISP_INTERRUPT_STATUS_CONTINUE 0x183E
-# define LB_D2_VLINE_INTERRUPT (1 << 2)
-# define LB_D2_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD2_INTERRUPT (1 << 17)
-# define DC_HPD2_RX_INTERRUPT (1 << 18)
-# define DISP_TIMER_INTERRUPT (1 << 24)
-#define DISP_INTERRUPT_STATUS_CONTINUE2 0x183F
-# define LB_D3_VLINE_INTERRUPT (1 << 2)
-# define LB_D3_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD3_INTERRUPT (1 << 17)
-# define DC_HPD3_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE3 0x1840
-# define LB_D4_VLINE_INTERRUPT (1 << 2)
-# define LB_D4_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD4_INTERRUPT (1 << 17)
-# define DC_HPD4_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE4 0x1853
-# define LB_D5_VLINE_INTERRUPT (1 << 2)
-# define LB_D5_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD5_INTERRUPT (1 << 17)
-# define DC_HPD5_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE5 0x1854
-# define LB_D6_VLINE_INTERRUPT (1 << 2)
-# define LB_D6_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD6_INTERRUPT (1 << 17)
-# define DC_HPD6_RX_INTERRUPT (1 << 18)
-
-/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
-#define GRPH_INT_STATUS 0x1A16
-# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
-# define GRPH_PFLIP_INT_CLEAR (1 << 8)
-/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
-#define GRPH_INT_CONTROL 0x1A17
-# define GRPH_PFLIP_INT_MASK (1 << 0)
-# define GRPH_PFLIP_INT_TYPE (1 << 8)
-
-#define DAC_AUTODETECT_INT_CONTROL 0x19F2
-
-#define DC_HPD1_INT_STATUS 0x1807
-#define DC_HPD2_INT_STATUS 0x180A
-#define DC_HPD3_INT_STATUS 0x180D
-#define DC_HPD4_INT_STATUS 0x1810
-#define DC_HPD5_INT_STATUS 0x1813
-#define DC_HPD6_INT_STATUS 0x1816
-# define DC_HPDx_INT_STATUS (1 << 0)
-# define DC_HPDx_SENSE (1 << 1)
-# define DC_HPDx_RX_INT_STATUS (1 << 8)
-
-#define DC_HPD1_INT_CONTROL 0x1808
-#define DC_HPD2_INT_CONTROL 0x180B
-#define DC_HPD3_INT_CONTROL 0x180E
-#define DC_HPD4_INT_CONTROL 0x1811
-#define DC_HPD5_INT_CONTROL 0x1814
-#define DC_HPD6_INT_CONTROL 0x1817
-# define DC_HPDx_INT_ACK (1 << 0)
-# define DC_HPDx_INT_POLARITY (1 << 8)
-# define DC_HPDx_INT_EN (1 << 16)
-# define DC_HPDx_RX_INT_ACK (1 << 20)
-# define DC_HPDx_RX_INT_EN (1 << 24)
-
-#define DC_HPD1_CONTROL 0x1809
-#define DC_HPD2_CONTROL 0x180C
-#define DC_HPD3_CONTROL 0x180F
-#define DC_HPD4_CONTROL 0x1812
-#define DC_HPD5_CONTROL 0x1815
-#define DC_HPD6_CONTROL 0x1818
-# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
-# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
-# define DC_HPDx_EN (1 << 28)
-
-#define DPG_PIPE_STUTTER_CONTROL 0x1B35
-# define STUTTER_ENABLE (1 << 0)
-
-/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
-#define CRTC_STATUS_FRAME_COUNT 0x1BA6
-
-/* Audio clocks */
-#define DCCG_AUDIO_DTO0_PHASE 0x05b0
-#define DCCG_AUDIO_DTO0_MODULE 0x05b4
-#define DCCG_AUDIO_DTO1_PHASE 0x05c0
-#define DCCG_AUDIO_DTO1_MODULE 0x05c4
-
-#define GRBM_CNTL 0x2000
-#define GRBM_READ_TIMEOUT(x) ((x) << 0)
-
-#define GRBM_STATUS2 0x2002
-#define RLC_RQ_PENDING (1 << 0)
-#define RLC_BUSY (1 << 8)
-#define TC_BUSY (1 << 9)
-
-#define GRBM_STATUS 0x2004
-#define CMDFIFO_AVAIL_MASK 0x0000000F
-#define RING2_RQ_PENDING (1 << 4)
-#define SRBM_RQ_PENDING (1 << 5)
-#define RING1_RQ_PENDING (1 << 6)
-#define CF_RQ_PENDING (1 << 7)
-#define PF_RQ_PENDING (1 << 8)
-#define GDS_DMA_RQ_PENDING (1 << 9)
-#define GRBM_EE_BUSY (1 << 10)
-#define DB_CLEAN (1 << 12)
-#define CB_CLEAN (1 << 13)
-#define TA_BUSY (1 << 14)
-#define GDS_BUSY (1 << 15)
-#define VGT_BUSY (1 << 17)
-#define IA_BUSY_NO_DMA (1 << 18)
-#define IA_BUSY (1 << 19)
-#define SX_BUSY (1 << 20)
-#define SPI_BUSY (1 << 22)
-#define BCI_BUSY (1 << 23)
-#define SC_BUSY (1 << 24)
-#define PA_BUSY (1 << 25)
-#define DB_BUSY (1 << 26)
-#define CP_COHERENCY_BUSY (1 << 28)
-#define CP_BUSY (1 << 29)
-#define CB_BUSY (1 << 30)
-#define GUI_ACTIVE (1 << 31)
-#define GRBM_STATUS_SE0 0x2005
-#define GRBM_STATUS_SE1 0x2006
-#define SE_DB_CLEAN (1 << 1)
-#define SE_CB_CLEAN (1 << 2)
-#define SE_BCI_BUSY (1 << 22)
-#define SE_VGT_BUSY (1 << 23)
-#define SE_PA_BUSY (1 << 24)
-#define SE_TA_BUSY (1 << 25)
-#define SE_SX_BUSY (1 << 26)
-#define SE_SPI_BUSY (1 << 27)
-#define SE_SC_BUSY (1 << 29)
-#define SE_DB_BUSY (1 << 30)
-#define SE_CB_BUSY (1 << 31)
-
-#define GRBM_INT_CNTL 0x2018
-# define RDERR_INT_ENABLE (1 << 0)
-# define GUI_IDLE_INT_ENABLE (1 << 19)
-
-#define CP_STRMOUT_CNTL 0x213F
-#define SCRATCH_REG0 0x2140
-#define SCRATCH_REG1 0x2141
-#define SCRATCH_REG2 0x2142
-#define SCRATCH_REG3 0x2143
-#define SCRATCH_REG4 0x2144
-#define SCRATCH_REG5 0x2145
-#define SCRATCH_REG6 0x2146
-#define SCRATCH_REG7 0x2147
-
-#define SCRATCH_UMSK 0x2150
-#define SCRATCH_ADDR 0x2151
-
-#define CP_SEM_WAIT_TIMER 0x216F
-
-#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x2172
-
-#define CP_ME_CNTL 0x21B6
-#define CP_CE_HALT (1 << 24)
-#define CP_PFP_HALT (1 << 26)
-#define CP_ME_HALT (1 << 28)
-
-#define CP_COHER_CNTL2 0x217A
-
-#define CP_RB2_RPTR 0x21BE
-#define CP_RB1_RPTR 0x21BF
-#define CP_RB0_RPTR 0x21C0
-#define CP_RB_WPTR_DELAY 0x21C1
-
-#define CP_QUEUE_THRESHOLDS 0x21D8
-#define ROQ_IB1_START(x) ((x) << 0)
-#define ROQ_IB2_START(x) ((x) << 8)
-#define CP_MEQ_THRESHOLDS 0x21D9
-#define MEQ1_START(x) ((x) << 0)
-#define MEQ2_START(x) ((x) << 8)
-
-#define CP_PERFMON_CNTL 0x21FF
-
#define VGT_VTX_VECT_EJECT_REG 0x222C
-
#define VGT_ESGS_RING_SIZE 0x2232
#define VGT_GSVS_RING_SIZE 0x2233
-
#define VGT_GS_VERTEX_REUSE 0x2235
-
#define VGT_PRIMITIVE_TYPE 0x2256
#define VGT_INDEX_TYPE 0x2257
-
#define VGT_NUM_INDICES 0x225C
#define VGT_NUM_INSTANCES 0x225D
-
#define VGT_TF_RING_SIZE 0x2262
-
#define VGT_HS_OFFCHIP_PARAM 0x226C
-
#define VGT_TF_MEMORY_BASE 0x226E
-#define PA_CL_ENHANCE 0x2285
-#define CLIP_VTX_REORDER_ENA (1 << 0)
-#define NUM_CLIP_SEQ(x) ((x) << 1)
-
-#define PA_SU_LINE_STIPPLE_VALUE 0x2298
-
-#define PA_SC_LINE_STIPPLE_STATE 0x22C4
-
-#define PA_SC_FORCE_EOV_MAX_CNTS 0x22C9
-#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
-#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
-
-#define PA_SC_FIFO_SIZE 0x22F3
-#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
-#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
-#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
-#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
-
#define PA_SC_ENHANCE 0x22FC
-#define SQ_CONFIG 0x2300
-
-#define SQC_CACHES 0x2302
-
-#define SQ_POWER_THROTTLE 0x2396
-#define MIN_POWER(x) ((x) << 0)
-#define MIN_POWER_MASK (0x3fff << 0)
-#define MIN_POWER_SHIFT 0
-#define MAX_POWER(x) ((x) << 16)
-#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
-#define SQ_POWER_THROTTLE2 0x2397
-#define MAX_POWER_DELTA(x) ((x) << 0)
-#define MAX_POWER_DELTA_MASK (0x3fff << 0)
-#define MAX_POWER_DELTA_SHIFT 0
-#define STI_SIZE(x) ((x) << 16)
-#define STI_SIZE_MASK (0x3ff << 16)
-#define STI_SIZE_SHIFT 16
-#define LTI_RATIO(x) ((x) << 27)
-#define LTI_RATIO_MASK (0xf << 27)
-#define LTI_RATIO_SHIFT 27
-
-#define SX_DEBUG_1 0x2418
-
-#define SPI_STATIC_THREAD_MGMT_1 0x2438
-#define SPI_STATIC_THREAD_MGMT_2 0x2439
-#define SPI_STATIC_THREAD_MGMT_3 0x243A
-#define SPI_PS_MAX_WAVE_ID 0x243B
-
-#define SPI_CONFIG_CNTL 0x2440
-
-#define SPI_CONFIG_CNTL_1 0x244F
-#define VTX_DONE_DELAY(x) ((x) << 0)
-#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
-
-#define CGTS_TCC_DISABLE 0x2452
-#define CGTS_USER_TCC_DISABLE 0x2453
-#define TCC_DISABLE_MASK 0xFFFF0000
-#define TCC_DISABLE_SHIFT 16
-#define CGTS_SM_CTRL_REG 0x2454
-#define OVERRIDE (1 << 21)
-#define LS_OVERRIDE (1 << 22)
-
-#define SPI_LB_CU_MASK 0x24D5
-
#define TA_CNTL_AUX 0x2542
-#define CC_RB_BACKEND_DISABLE 0x263D
-#define BACKEND_DISABLE(x) ((x) << 16)
-#define GB_ADDR_CONFIG 0x263E
-#define NUM_PIPES(x) ((x) << 0)
-#define NUM_PIPES_MASK 0x00000007
-#define NUM_PIPES_SHIFT 0
-#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
-#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
-#define PIPE_INTERLEAVE_SIZE_SHIFT 4
-#define NUM_SHADER_ENGINES(x) ((x) << 12)
-#define NUM_SHADER_ENGINES_MASK 0x00003000
-#define NUM_SHADER_ENGINES_SHIFT 12
-#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
-#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
-#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
-#define NUM_GPUS(x) ((x) << 20)
-#define NUM_GPUS_MASK 0x00700000
-#define NUM_GPUS_SHIFT 20
-#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
-#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
-#define MULTI_GPU_TILE_SIZE_SHIFT 24
-#define ROW_SIZE(x) ((x) << 28)
-#define ROW_SIZE_MASK 0x30000000
-#define ROW_SIZE_SHIFT 28
-
-#define CB_PERFCOUNTER0_SELECT0 0x2688
-#define CB_PERFCOUNTER0_SELECT1 0x2689
-#define CB_PERFCOUNTER1_SELECT0 0x268A
-#define CB_PERFCOUNTER1_SELECT1 0x268B
-#define CB_PERFCOUNTER2_SELECT0 0x268C
-#define CB_PERFCOUNTER2_SELECT1 0x268D
-#define CB_PERFCOUNTER3_SELECT0 0x268E
-#define CB_PERFCOUNTER3_SELECT1 0x268F
-
-#define CB_CGTT_SCLK_CTRL 0x2698
-
-#define TCP_CHAN_STEER_LO 0x2B03
-#define TCP_CHAN_STEER_HI 0x2B94
-
-#define CP_RB0_BASE 0x3040
-#define CP_RB0_CNTL 0x3041
-#define RB_BUFSZ(x) ((x) << 0)
-#define RB_BLKSZ(x) ((x) << 8)
-#define BUF_SWAP_32BIT (2 << 16)
-#define RB_NO_UPDATE (1 << 27)
-#define RB_RPTR_WR_ENA (1 << 31)
-
-#define CP_RB0_RPTR_ADDR 0x3043
-#define CP_RB0_RPTR_ADDR_HI 0x3044
-#define CP_RB0_WPTR 0x3045
-
-#define CP_PFP_UCODE_ADDR 0x3054
-#define CP_PFP_UCODE_DATA 0x3055
-#define CP_ME_RAM_RADDR 0x3056
-#define CP_ME_RAM_WADDR 0x3057
-#define CP_ME_RAM_DATA 0x3058
-
-#define CP_CE_UCODE_ADDR 0x305A
-#define CP_CE_UCODE_DATA 0x305B
-
-#define CP_RB1_BASE 0x3060
-#define CP_RB1_CNTL 0x3061
-#define CP_RB1_RPTR_ADDR 0x3062
-#define CP_RB1_RPTR_ADDR_HI 0x3063
-#define CP_RB1_WPTR 0x3064
-#define CP_RB2_BASE 0x3065
-#define CP_RB2_CNTL 0x3066
-#define CP_RB2_RPTR_ADDR 0x3067
-#define CP_RB2_RPTR_ADDR_HI 0x3068
-#define CP_RB2_WPTR 0x3069
-#define CP_INT_CNTL_RING0 0x306A
-#define CP_INT_CNTL_RING1 0x306B
-#define CP_INT_CNTL_RING2 0x306C
-# define CNTX_BUSY_INT_ENABLE (1 << 19)
-# define CNTX_EMPTY_INT_ENABLE (1 << 20)
-# define WAIT_MEM_SEM_INT_ENABLE (1 << 21)
-# define TIME_STAMP_INT_ENABLE (1 << 26)
-# define CP_RINGID2_INT_ENABLE (1 << 29)
-# define CP_RINGID1_INT_ENABLE (1 << 30)
-# define CP_RINGID0_INT_ENABLE (1 << 31)
-#define CP_INT_STATUS_RING0 0x306D
-#define CP_INT_STATUS_RING1 0x306E
-#define CP_INT_STATUS_RING2 0x306F
-# define WAIT_MEM_SEM_INT_STAT (1 << 21)
-# define TIME_STAMP_INT_STAT (1 << 26)
-# define CP_RINGID2_INT_STAT (1 << 29)
-# define CP_RINGID1_INT_STAT (1 << 30)
-# define CP_RINGID0_INT_STAT (1 << 31)
-
// #define PA_SC_RASTER_CONFIG 0xA0D4
# define RB_XSEL2(x) ((x) << 4)
# define RB_XSEL2_MASK (0x3 << 4)
@@ -1185,171 +311,14 @@
# define SE_YSEL(x) ((x) << 28)
# define SE_YSEL_MASK (0x3 << 28)
-/* PIF PHY0 registers idx/data 0x8/0xc */
-#define PB0_PIF_CNTL 0x10
-# define LS2_EXIT_TIME(x) ((x) << 17)
-# define LS2_EXIT_TIME_MASK (0x7 << 17)
-# define LS2_EXIT_TIME_SHIFT 17
-#define PB0_PIF_PAIRING 0x11
-# define MULTI_PIF (1 << 25)
-#define PB0_PIF_PWRDOWN_0 0x12
-# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
-# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_0_SHIFT 24
-#define PB0_PIF_PWRDOWN_1 0x13
-# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
-# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_1_SHIFT 24
-
-#define PB0_PIF_PWRDOWN_2 0x17
-# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
-# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_2_SHIFT 24
-#define PB0_PIF_PWRDOWN_3 0x18
-# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
-# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_3_SHIFT 24
-/* PIF PHY1 registers idx/data 0x10/0x14 */
-#define PB1_PIF_CNTL 0x10
-#define PB1_PIF_PAIRING 0x11
-#define PB1_PIF_PWRDOWN_0 0x12
-#define PB1_PIF_PWRDOWN_1 0x13
-
-#define PB1_PIF_PWRDOWN_2 0x17
-#define PB1_PIF_PWRDOWN_3 0x18
-/* PCIE registers idx/data 0x30/0x34 */
-#define PCIE_CNTL2 0x1c /* PCIE */
-# define SLV_MEM_LS_EN (1 << 16)
-# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
-# define MST_MEM_LS_EN (1 << 18)
-# define REPLAY_MEM_LS_EN (1 << 19)
-#define PCIE_LC_STATUS1 0x28 /* PCIE */
-# define LC_REVERSE_RCVR (1 << 0)
-# define LC_REVERSE_XMIT (1 << 1)
-# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
-# define LC_OPERATING_LINK_WIDTH_SHIFT 2
-# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
-# define LC_DETECTED_LINK_WIDTH_SHIFT 5
-
-#define PCIE_P_CNTL 0x40 /* PCIE */
-# define P_IGNORE_EDB_ERR (1 << 6)
-
/* PCIE PORT registers idx/data 0x38/0x3c */
-#define PCIE_LC_CNTL 0xa0
-# define LC_L0S_INACTIVITY(x) ((x) << 8)
-# define LC_L0S_INACTIVITY_MASK (0xf << 8)
-# define LC_L0S_INACTIVITY_SHIFT 8
-# define LC_L1_INACTIVITY(x) ((x) << 12)
-# define LC_L1_INACTIVITY_MASK (0xf << 12)
-# define LC_L1_INACTIVITY_SHIFT 12
-# define LC_PMI_TO_L1_DIS (1 << 16)
-# define LC_ASPM_TO_L1_DIS (1 << 24)
-#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
-# define LC_LINK_WIDTH_SHIFT 0
-# define LC_LINK_WIDTH_MASK 0x7
+// #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
-# define LC_LINK_WIDTH_RD_SHIFT 4
-# define LC_LINK_WIDTH_RD_MASK 0x70
-# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
-# define LC_RECONFIG_NOW (1 << 8)
-# define LC_RENEGOTIATION_SUPPORT (1 << 9)
-# define LC_RENEGOTIATE_EN (1 << 10)
-# define LC_SHORT_RECONFIG_EN (1 << 11)
-# define LC_UPCONFIGURE_SUPPORT (1 << 12)
-# define LC_UPCONFIGURE_DIS (1 << 13)
-# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
-# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
-# define LC_DYN_LANES_PWR_STATE_SHIFT 21
-#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
-# define LC_XMIT_N_FTS(x) ((x) << 0)
-# define LC_XMIT_N_FTS_MASK (0xff << 0)
-# define LC_XMIT_N_FTS_SHIFT 0
-# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
-# define LC_N_FTS_MASK (0xff << 24)
-#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
-# define LC_GEN2_EN_STRAP (1 << 0)
-# define LC_GEN3_EN_STRAP (1 << 1)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
-# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
-# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
-# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
-# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
-# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
-# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
-# define LC_CURRENT_DATA_RATE_SHIFT 13
-# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
-# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
-# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
-# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
-# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
-
-#define PCIE_LC_CNTL2 0xb1
-# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
-# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
-
-#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
-# define LC_GO_TO_RECOVERY (1 << 30)
-#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
-# define LC_REDO_EQ (1 << 5)
-# define LC_SET_QUIESCE (1 << 13)
-
-/*
- * UVD
- */
-#define UVD_UDEC_ADDR_CONFIG 0x3bd3
-#define UVD_UDEC_DB_ADDR_CONFIG 0x3bd4
-#define UVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
-#define UVD_RBC_RB_RPTR 0x3da4
-#define UVD_RBC_RB_WPTR 0x3da5
-#define UVD_STATUS 0x3daf
-
-#define UVD_CGC_CTRL 0x3dc2
-# define DCM (1 << 0)
-# define CG_DT(x) ((x) << 2)
-# define CG_DT_MASK (0xf << 2)
-# define CLK_OD(x) ((x) << 6)
-# define CLK_OD_MASK (0x1f << 6)
-
- /* UVD CTX indirect */
-#define UVD_CGC_MEM_CTRL 0xC0
-#define UVD_CGC_CTRL2 0xC1
-# define DYN_OR_EN (1 << 0)
-# define DYN_RR_EN (1 << 1)
-# define G_DIV_ID(x) ((x) << 2)
-# define G_DIV_ID_MASK (0x7 << 2)
/*
* PM4
@@ -1583,45 +552,7 @@
/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
#define DMA1_REGISTER_OFFSET 0x200 /* not a register */
-
-#define DMA_RB_CNTL 0x3400
-# define DMA_RB_ENABLE (1 << 0)
-# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
-# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
-# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
-#define DMA_RB_BASE 0x3401
-#define DMA_RB_RPTR 0x3402
-#define DMA_RB_WPTR 0x3403
-
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-
-#define DMA_IB_CNTL 0x3409
-# define DMA_IB_ENABLE (1 << 0)
-# define DMA_IB_SWAP_ENABLE (1 << 4)
-# define CMD_VMID_FORCE (1 << 31)
-#define DMA_IB_RPTR 0x340a
-#define DMA_CNTL 0x340b
-# define TRAP_ENABLE (1 << 0)
-# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
-# define SEM_WAIT_INT_ENABLE (1 << 2)
-# define DATA_SWAP_ENABLE (1 << 3)
-# define FENCE_SWAP_ENABLE (1 << 4)
-# define CTXEMPTY_INT_ENABLE (1 << 28)
-#define DMA_STATUS_REG 0x340d
-# define DMA_IDLE (1 << 0)
-#define DMA_TILING_CONFIG 0x342e
-
-#define DMA_POWER_CNTL 0x342f
-# define MEM_POWER_OVERRIDE (1 << 8)
-#define DMA_CLK_CTRL 0x3430
-
-#define DMA_PG 0x3435
-# define PG_CNTL_ENABLE (1 << 0)
-#define DMA_PGFSM_CONFIG 0x3436
-#define DMA_PGFSM_WRITE 0x3437
+#define SDMA_MAX_INSTANCE 2
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
(((b) & 0x1) << 26) | \
@@ -1650,6 +581,7 @@
#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
+/* VCE */
#define VCE_STATUS 0x20004
#define VCE_VCPU_CNTL 0x20014
#define VCE_CLK_EN (1 << 0)
@@ -1726,378 +658,118 @@
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
-#define AMDGPU_MM_INDEX 0x0000
-#define AMDGPU_MM_DATA 0x0001
-
-#define VERDE_NUM_CRTC 6
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-#define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-#define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define EVERGREEN_DATA_FORMAT 0x1ac0
-# define EVERGREEN_INTERLEAVE_EN (1 << 0)
-
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
-#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
-#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
-
-#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a45
-#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1845
-
-#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1847
-#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a47
-
-#define R600_D1GRPH_SWAP_CONTROL 0x1843
-#define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
-
-#define AVIVO_D1VGA_CONTROL 0x00cc
-# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1 << 0)
-# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1 << 8)
-# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
-# define AVIVO_DVGA_CONTROL_ROTATE (1 << 24)
-#define AVIVO_D2VGA_CONTROL 0x00ce
-
-#define R600_BUS_CNTL 0x1508
-# define R600_BIOS_ROM_DIS (1 << 1)
+
#define R600_ROM_CNTL 0x580
# define R600_SCK_OVERWRITE (1 << 1)
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
-#define FMT_BIT_DEPTH_CONTROL 0x1bf2
-#define FMT_TRUNCATE_EN (1 << 0)
-#define FMT_TRUNCATE_DEPTH (1 << 4)
-#define FMT_SPATIAL_DITHER_EN (1 << 8)
-#define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
-#define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
-#define FMT_FRAME_RANDOM_ENABLE (1 << 13)
-#define FMT_RGB_RANDOM_ENABLE (1 << 14)
-#define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
-#define FMT_TEMPORAL_DITHER_EN (1 << 16)
-#define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
-#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
-#define FMT_TEMPORAL_LEVEL (1 << 24)
-#define FMT_TEMPORAL_DITHER_RESET (1 << 25)
-#define FMT_25FRC_SEL(x) ((x) << 26)
-#define FMT_50FRC_SEL(x) ((x) << 28)
-#define FMT_75FRC_SEL(x) ((x) << 30)
-
-#define EVERGREEN_DC_LUT_CONTROL 0x1a80
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x1a81
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x1a82
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x1a83
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x1a84
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x1a85
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x1a86
-#define EVERGREEN_DC_LUT_30_COLOR 0x1a7c
-#define EVERGREEN_DC_LUT_RW_INDEX 0x1a79
-#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x1a7e
-#define EVERGREEN_DC_LUT_RW_MODE 0x1a78
-
-#define EVERGREEN_GRPH_ENABLE 0x1a00
-#define EVERGREEN_GRPH_CONTROL 0x1a01
-#define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define EVERGREEN_GRPH_DEPTH_8BPP 0
-#define EVERGREEN_GRPH_DEPTH_16BPP 1
-#define EVERGREEN_GRPH_DEPTH_32BPP 2
-#define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define EVERGREEN_ADDR_SURF_2_BANK 0
-#define EVERGREEN_ADDR_SURF_4_BANK 1
-#define EVERGREEN_ADDR_SURF_8_BANK 2
-#define EVERGREEN_ADDR_SURF_16_BANK 3
-#define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
-#define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
-#define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
-
-#define EVERGREEN_GRPH_FORMAT_INDEXED 0
-#define EVERGREEN_GRPH_FORMAT_ARGB1555 0
-#define EVERGREEN_GRPH_FORMAT_ARGB565 1
-#define EVERGREEN_GRPH_FORMAT_ARGB4444 2
-#define EVERGREEN_GRPH_FORMAT_AI88 3
-#define EVERGREEN_GRPH_FORMAT_MONO16 4
-#define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+#define GRPH_ARRAY_LINEAR_GENERAL 0
+#define GRPH_ARRAY_LINEAR_ALIGNED 1
+#define GRPH_ARRAY_1D_TILED_THIN1 2
+#define GRPH_ARRAY_2D_TILED_THIN1 4
+
+#define ES_AND_GS_AUTO 3
+#define BUF_SWAP_32BIT (2 << 16)
+
+#define GRPH_DEPTH_8BPP 0
+#define GRPH_DEPTH_16BPP 1
+#define GRPH_DEPTH_32BPP 2
+
+/* 8 BPP */
+#define GRPH_FORMAT_INDEXED 0
+
+/* 16 BPP */
+#define GRPH_FORMAT_ARGB1555 0
+#define GRPH_FORMAT_ARGB565 1
+#define GRPH_FORMAT_ARGB4444 2
+#define GRPH_FORMAT_AI88 3
+#define GRPH_FORMAT_MONO16 4
+#define GRPH_FORMAT_BGRA5551 5
/* 32 BPP */
-#define EVERGREEN_GRPH_FORMAT_ARGB8888 0
-#define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
-#define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
-#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
-#define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
-#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
-#define EVERGREEN_GRPH_FORMAT_RGB111110 6
-#define EVERGREEN_GRPH_FORMAT_BGR101111 7
-#define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
-#define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
-#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-#define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
-#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
-#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
-#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-
-#define EVERGREEN_GRPH_SWAP_CONTROL 0x1a03
-#define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-# define EVERGREEN_GRPH_ENDIAN_NONE 0
-# define EVERGREEN_GRPH_ENDIAN_8IN16 1
-# define EVERGREEN_GRPH_ENDIAN_8IN32 2
-# define EVERGREEN_GRPH_ENDIAN_8IN64 3
-#define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-# define EVERGREEN_GRPH_RED_SEL_R 0
-# define EVERGREEN_GRPH_RED_SEL_G 1
-# define EVERGREEN_GRPH_RED_SEL_B 2
-# define EVERGREEN_GRPH_RED_SEL_A 3
-#define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-# define EVERGREEN_GRPH_GREEN_SEL_G 0
-# define EVERGREEN_GRPH_GREEN_SEL_B 1
-# define EVERGREEN_GRPH_GREEN_SEL_A 2
-# define EVERGREEN_GRPH_GREEN_SEL_R 3
-#define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-# define EVERGREEN_GRPH_BLUE_SEL_B 0
-# define EVERGREEN_GRPH_BLUE_SEL_A 1
-# define EVERGREEN_GRPH_BLUE_SEL_R 2
-# define EVERGREEN_GRPH_BLUE_SEL_G 3
-#define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-# define EVERGREEN_GRPH_ALPHA_SEL_A 0
-# define EVERGREEN_GRPH_ALPHA_SEL_R 1
-# define EVERGREEN_GRPH_ALPHA_SEL_G 2
-# define EVERGREEN_GRPH_ALPHA_SEL_B 3
-
-#define EVERGREEN_D3VGA_CONTROL 0xf8
-#define EVERGREEN_D4VGA_CONTROL 0xf9
-#define EVERGREEN_D5VGA_CONTROL 0xfa
-#define EVERGREEN_D6VGA_CONTROL 0xfb
-
-#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
-
-#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02
-#define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
-
-#define EVERGREEN_GRPH_PITCH 0x1a06
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x1a09
-#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x1a0a
-#define EVERGREEN_GRPH_X_START 0x1a0b
-#define EVERGREEN_GRPH_Y_START 0x1a0c
-#define EVERGREEN_GRPH_X_END 0x1a0d
-#define EVERGREEN_GRPH_Y_END 0x1a0e
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_FLIP_CONTROL 0x1a12
-#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
-
-#define EVERGREEN_VIEWPORT_START 0x1b5c
-#define EVERGREEN_VIEWPORT_SIZE 0x1b5d
-#define EVERGREEN_DESKTOP_HEIGHT 0x1ac1
-
-/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
-#define EVERGREEN_CUR_CONTROL 0x1a66
-# define EVERGREEN_CURSOR_EN (1 << 0)
-# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
-# define EVERGREEN_CURSOR_MONO 0
-# define EVERGREEN_CURSOR_24_1 1
-# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
-# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
-# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
-# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
-# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
-# define EVERGREEN_CURSOR_URGENT_1_8 1
-# define EVERGREEN_CURSOR_URGENT_1_4 2
-# define EVERGREEN_CURSOR_URGENT_3_8 3
-# define EVERGREEN_CURSOR_URGENT_1_2 4
-#define EVERGREEN_CUR_SURFACE_ADDRESS 0x1a67
-# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
-#define EVERGREEN_CUR_SIZE 0x1a68
-#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x1a69
-#define EVERGREEN_CUR_POSITION 0x1a6a
-#define EVERGREEN_CUR_HOT_SPOT 0x1a6b
-#define EVERGREEN_CUR_COLOR1 0x1a6c
-#define EVERGREEN_CUR_COLOR2 0x1a6d
-#define EVERGREEN_CUR_UPDATE 0x1a6e
-# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
-# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
-# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
-# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-
-#define NI_INPUT_CSC_CONTROL 0x1a35
-# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_CSC_BYPASS 0
-# define NI_INPUT_CSC_PROG_COEFF 1
-# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
-# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_OUTPUT_CSC_CONTROL 0x1a3c
-# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
-# define NI_OUTPUT_CSC_BYPASS 0
-# define NI_OUTPUT_CSC_TV_RGB 1
-# define NI_OUTPUT_CSC_YCBCR_601 2
-# define NI_OUTPUT_CSC_YCBCR_709 3
-# define NI_OUTPUT_CSC_PROG_COEFF 4
-# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
-# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
-
-#define NI_DEGAMMA_CONTROL 0x1a58
-# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_DEGAMMA_BYPASS 0
-# define NI_DEGAMMA_SRGB_24 1
-# define NI_DEGAMMA_XVYCC_222 2
-# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
-# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
-
-#define NI_GAMUT_REMAP_CONTROL 0x1a59
-# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
-# define NI_GAMUT_REMAP_BYPASS 0
-# define NI_GAMUT_REMAP_PROG_COEFF 1
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
-# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_REGAMMA_CONTROL 0x1aa0
-# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
-# define NI_REGAMMA_BYPASS 0
-# define NI_REGAMMA_SRGB_24 1
-# define NI_REGAMMA_XVYCC_222 2
-# define NI_REGAMMA_PROG_A 3
-# define NI_REGAMMA_PROG_B 4
-# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
-
-
-#define NI_PRESCALE_GRPH_CONTROL 0x1a2d
-# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
-
-#define NI_PRESCALE_OVL_CONTROL 0x1a31
-# define NI_OVL_PRESCALE_BYPASS (1 << 4)
-
-#define NI_INPUT_GAMMA_CONTROL 0x1a10
-# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_GAMMA_USE_LUT 0
-# define NI_INPUT_GAMMA_BYPASS 1
-# define NI_INPUT_GAMMA_SRGB_24 2
-# define NI_INPUT_GAMMA_XVYCC_222 3
-# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
-
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-# define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
-
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
-
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
-
-#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
-#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
-#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
-#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
-
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+#define GRPH_FORMAT_ARGB8888 0
+#define GRPH_FORMAT_ARGB2101010 1
+#define GRPH_FORMAT_32BPP_DIG 2
+#define GRPH_FORMAT_8B_ARGB2101010 3
+#define GRPH_FORMAT_BGRA1010102 4
+#define GRPH_FORMAT_8B_BGRA1010102 5
+#define GRPH_FORMAT_RGB111110 6
+#define GRPH_FORMAT_BGR101111 7
+
+#define GRPH_ENDIAN_NONE 0
+#define GRPH_ENDIAN_8IN16 1
+#define GRPH_ENDIAN_8IN32 2
+#define GRPH_ENDIAN_8IN64 3
+#define GRPH_RED_SEL_R 0
+#define GRPH_RED_SEL_G 1
+#define GRPH_RED_SEL_B 2
+#define GRPH_RED_SEL_A 3
+
+#define GRPH_GREEN_SEL_G 0
+#define GRPH_GREEN_SEL_B 1
+#define GRPH_GREEN_SEL_A 2
+#define GRPH_GREEN_SEL_R 3
+
+#define GRPH_BLUE_SEL_B 0
+#define GRPH_BLUE_SEL_A 1
+#define GRPH_BLUE_SEL_R 2
+#define GRPH_BLUE_SEL_G 3
+
+#define GRPH_ALPHA_SEL_A 0
+#define GRPH_ALPHA_SEL_R 1
+#define GRPH_ALPHA_SEL_G 2
+#define GRPH_ALPHA_SEL_B 3
+
+/* CUR_CONTROL */
+ #define CURSOR_MONO 0
+ #define CURSOR_24_1 1
+ #define CURSOR_24_8_PRE_MULT 2
+ #define CURSOR_24_8_UNPRE_MULT 3
+ #define CURSOR_URGENT_ALWAYS 0
+ #define CURSOR_URGENT_1_8 1
+ #define CURSOR_URGENT_1_4 2
+ #define CURSOR_URGENT_3_8 3
+ #define CURSOR_URGENT_1_2 4
+
+/* INPUT_CSC_CONTROL */
+# define INPUT_CSC_BYPASS 0
+# define INPUT_CSC_PROG_COEFF 1
+# define INPUT_CSC_PROG_SHARED_MATRIXA 2
+
+/* OUTPUT_CSC_CONTROL */
+# define OUTPUT_CSC_BYPASS 0
+# define OUTPUT_CSC_TV_RGB 1
+# define OUTPUT_CSC_YCBCR_601 2
+# define OUTPUT_CSC_YCBCR_709 3
+# define OUTPUT_CSC_PROG_COEFF 4
+# define OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+
+/* DEGAMMA_CONTROL */
+# define DEGAMMA_BYPASS 0
+# define DEGAMMA_SRGB_24 1
+# define DEGAMMA_XVYCC_222 2
+
+/* GAMUT_REMAP_CONTROL */
+# define GAMUT_REMAP_BYPASS 0
+# define GAMUT_REMAP_PROG_COEFF 1
+# define GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+
+/* REGAMMA_CONTROL */
+# define REGAMMA_BYPASS 0
+# define REGAMMA_SRGB_24 1
+# define REGAMMA_XVYCC_222 2
+# define REGAMMA_PROG_A 3
+# define REGAMMA_PROG_B 4
+
+
+/* INPUT_GAMMA_CONTROL */
+# define INPUT_GAMMA_USE_LUT 0
+# define INPUT_GAMMA_BYPASS 1
+# define INPUT_GAMMA_SRGB_24 2
+# define INPUT_GAMMA_XVYCC_222 3
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -2113,20 +785,9 @@
#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-#define CONFIG_CNTL 0x1509
-#define CC_DRM_ID_STRAPS 0X1559
#define AMDGPU_PCIE_INDEX 0xc
#define AMDGPU_PCIE_DATA 0xd
-#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
-#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
-#define DMA_MODE 0x342f
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-#define DMA_BUSY_MASK 0x20
-#define DMA1_BUSY_MASK 0X40
-#define SDMA_MAX_INSTANCE 2
-
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
#define PCIE_PORT_INDEX 0xe
@@ -2136,8 +797,6 @@
#define EVERGREEN_PIF_PHY1_INDEX 0x10
#define EVERGREEN_PIF_PHY1_DATA 0x14
-#define MC_VM_FB_OFFSET 0x81a
-
/* Discrete VCE clocks */
#define CG_VCEPLL_FUNC_CNTL 0xc0030600
#define VCEPLL_RESET_MASK 0x00000001
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 659eab9b90be..c457be3a3c56 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -584,6 +584,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
* Enable triggering of GPU reset only if specified
* by module parameter.
*/
+ if (adev->pcie_reset_ctx.in_link_reset)
+ return AMD_RESET_METHOD_LINK;
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
else if (!(adev->flags & AMD_IS_APU))
@@ -640,6 +642,9 @@ asic_reset:
case AMD_RESET_METHOD_MODE2:
dev_info(adev->dev, "MODE2 reset\n");
return amdgpu_dpm_mode2_reset(adev);
+ case AMD_RESET_METHOD_LINK:
+ dev_info(adev->dev, "Link reset\n");
+ return amdgpu_device_link_reset(adev);
default:
dev_info(adev->dev, "MODE1 reset\n");
return amdgpu_device_mode1_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index a5000c171c02..cf93fa477674 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -552,6 +552,11 @@
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+#define PACKET3_RUN_CLEANER_SHADER_9_0 0xD7
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
+
#define PACKET3_RUN_CLEANER_SHADER 0xD2
/* 1. header
* 2. RESERVED [31:0]
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 0e404c074975..e590cbdd8de9 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -174,19 +174,76 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
+static void umc_v12_0_get_retire_flip_bits(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ uint32_t vram_type = adev->gmc.vram_type;
+ struct amdgpu_umc_flip_bits *flip_bits = &(adev->umc.flip_bits);
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
+ /* default setting */
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
+ flip_bits->flip_row_bit = 13;
+ flip_bits->bit_num = 4;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
+ } else if (nps == AMDGPU_NPS4_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
+ }
+
+ switch (vram_type) {
+ case AMDGPU_VRAM_TYPE_HBM:
+ /* other nps modes are taken as nps1 */
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+
+ break;
+ case AMDGPU_VRAM_TYPE_HBM3E:
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ flip_bits->flip_row_bit = 12;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
+
+ break;
+ default:
+ dev_warn(adev->dev,
+ "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
+ break;
+ }
+
+ adev->umc.retire_unit = 0x1 << flip_bits->bit_num;
+}
+
static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
struct ta_ras_query_address_input *addr_in,
struct ta_ras_query_address_output *addr_out,
bool dump_addr)
{
- uint32_t col, col_lower, row, row_lower, bank;
+ uint32_t col, col_lower, row, row_lower, row_high, bank;
uint32_t channel_index = 0, umc_inst = 0;
- uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS];
+ uint32_t i, bit_num, retire_unit, *flip_bits;
uint64_t soc_pa, column, err_addr;
struct ta_ras_query_address_output addr_out_tmp;
struct ta_ras_query_address_output *paddr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
if (!addr_out)
@@ -211,46 +268,46 @@ static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
umc_inst = addr_in->ma.umc_inst;
}
- loop_bits[0] = UMC_V12_0_PA_C2_BIT;
- loop_bits[1] = UMC_V12_0_PA_C3_BIT;
- loop_bits[2] = UMC_V12_0_PA_C4_BIT;
- loop_bits[3] = UMC_V12_0_PA_R13_BIT;
-
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
-
- /* other nps modes are taken as nps1 */
- if (nps == AMDGPU_NPS4_PARTITION_MODE) {
- loop_bits[0] = UMC_V12_0_PA_CH4_BIT;
- loop_bits[1] = UMC_V12_0_PA_CH5_BIT;
- loop_bits[2] = UMC_V12_0_PA_B0_BIT;
- loop_bits[3] = UMC_V12_0_PA_R11_BIT;
- }
+ flip_bits = adev->umc.flip_bits.flip_bits_in_pa;
+ bit_num = adev->umc.flip_bits.bit_num;
+ retire_unit = adev->umc.retire_unit;
soc_pa = paddr_out->pa.pa;
channel_index = paddr_out->pa.channel_idx;
/* clear loop bits in soc physical address */
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa &= ~BIT_ULL(loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa &= ~BIT_ULL(flip_bits[i]);
paddr_out->pa.pa = soc_pa;
/* get column bit 0 and 1 in mca address */
col_lower = (err_addr >> 1) & 0x3ULL;
- /* MA_R13_BIT will be handled later */
+ /* extra row bit will be handled later */
row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
+ row_lower &= ~BIT_ULL(adev->umc.flip_bits.flip_row_bit);
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 5, 0)) {
+ row_high = (soc_pa >> adev->umc.flip_bits.r13_in_pa) & 0x3ULL;
+ /* it's 2.25GB in each channel, from MCA address to PA
+ * [R14 R13] is converted if the two bits value are 0x3,
+ * get them from PA instead of MCA address.
+ */
+ row_lower |= (row_high << 13);
+ }
if (!err_data && !dump_addr)
goto out;
/* loop for all possibilities of retired bits */
- for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) {
+ for (column = 0; column < retire_unit; column++) {
soc_pa = paddr_out->pa.pa;
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << flip_bits[i]);
col = ((column & 0x7) << 2) | col_lower;
- /* add row bit 13 */
- row = ((column >> 3) << 13) | row_lower;
+ /* handle extra row bit */
+ if (bit_num == RETIRE_FLIP_BITS_NUM)
+ row = ((column >> 3) << adev->umc.flip_bits.flip_row_bit) |
+ row_lower;
if (dump_addr)
dev_info(adev->dev,
@@ -428,8 +485,12 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank
bank->regs[ACA_REG_IDX_ADDR]);
ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
- count = ext_error_code == 0 ?
- ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
+ if (umc_v12_0_is_deferred_error(adev, status))
+ count = ext_error_code == 0 ?
+ adev->umc.err_addr_cnt / adev->umc.retire_unit : 1ULL;
+ else
+ count = ext_error_code == 0 ?
+ ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
return aca_error_cache_log_bank_error(handle, &info, err_type, count);
}
@@ -469,8 +530,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
struct ta_ras_query_address_output addr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
- uint32_t shift_bit = UMC_V12_0_PA_C4_BIT;
+ uint32_t shift_bit = adev->umc.flip_bits.flip_bits_in_pa[2];
int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
@@ -515,11 +575,6 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
ecc_err->channel_idx = addr_out.pa.channel_idx;
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
- if (nps == AMDGPU_NPS4_PARTITION_MODE)
- shift_bit = UMC_V12_0_PA_B0_BIT;
-
/* If converted pa_pfn is 0, use pa C4 pfn. */
if (!ecc_err->pa_pfn)
ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
@@ -665,5 +720,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.update_ecc_status = umc_v12_0_update_ecc_status,
.convert_ras_err_addr = umc_v12_0_convert_error_address,
.get_die_id_from_pa = umc_v12_0_get_die_id,
+ .get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index 9298018d938f..63b7e7254526 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,8 +55,6 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
-/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */
-#define UMC_V12_0_RETIRE_LOOP_BITS 4
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
@@ -64,13 +62,16 @@
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
#define UMC_V12_0_PA_R0_BIT 22
+#define UMC_V12_0_PA_R10_BIT 32
#define UMC_V12_0_PA_R11_BIT 33
+#define UMC_V12_0_PA_R12_BIT 34
#define UMC_V12_0_PA_R13_BIT 35
/* channel bit in SOC physical address */
#define UMC_V12_0_PA_CH4_BIT 12
#define UMC_V12_0_PA_CH5_BIT 13
/* bank bit in SOC physical address */
#define UMC_V12_0_PA_B0_BIT 19
+#define UMC_V12_0_PA_B1_BIT 20
/* row bits in MCA address */
#define UMC_V12_0_MA_R0_BIT 10
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 1f777c125b00..8fff470bce87 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -239,9 +239,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
@@ -1947,6 +1947,20 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
return 0;
}
+static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_v4_0_stop(vinst);
+ vcn_v4_0_start(vinst);
+
+ return amdgpu_ring_test_helper(ring);
+}
+
static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1976,6 +1990,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 012f6ea928ec..712e1fba33ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -288,6 +288,31 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v4_0_3_hw_init_inst(struct amdgpu_vcn_inst *vinst)
+{
+ int vcn_inst;
+ struct amdgpu_device *adev = vinst->adev;
+ struct amdgpu_ring *ring;
+ int inst_idx = vinst->inst;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ if (ring->use_doorbell) {
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst,
+ adev->vcn.inst[inst_idx].aid_id);
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+ }
+
+ return 0;
+}
+
/**
* vcn_v4_0_3_hw_init - start and test VCN block
*
@@ -299,7 +324,8 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ struct amdgpu_vcn_inst *vinst;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v4_0_3_start_sriov(adev);
@@ -322,28 +348,9 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn4_fw_shared *fw_shared;
- vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell) {
- adev->nbio.funcs->vcn_doorbell_range(
- adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst,
- adev->vcn.inst[i].aid_id);
-
- WREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL,
- ring->doorbell_index
- << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- /* Read DB_CTRL to flush the write DB_CTRL command. */
- RREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL);
- }
+ vinst = &adev->vcn.inst[i];
+ vcn_v4_0_3_hw_init_inst(vinst);
/* Re-init fw_shared when RAS fatal error occurred */
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
@@ -1564,6 +1571,37 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (amdgpu_sriov_vf(ring->adev))
+ return -EOPNOTSUPP;
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ /* This flag is not set for VF, assumed to be disabled always */
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ vcn_v4_0_3_hw_init_inst(vinst);
+ vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
+ r = amdgpu_ring_test_helper(ring);
+
+ return r;
+}
+
static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1592,6 +1630,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_3_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index f11df9c2ec13..a09f9a2dd471 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -208,6 +208,10 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+ fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+ fw_shared->drm_key_wa.method =
+ AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
@@ -215,6 +219,13 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
}
+ adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -1444,6 +1455,20 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_v4_0_5_stop(vinst);
+ vcn_v4_0_5_start(vinst);
+
+ return amdgpu_ring_test_helper(ring);
+}
+
static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1471,6 +1496,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_5_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index b90da3d3e140..27dcc6f37a73 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -196,9 +196,9 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
vcn_v5_0_0_alloc_ip_dump(adev);
@@ -1172,6 +1172,20 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_v5_0_0_stop(vinst);
+ vcn_v5_0_0_start(vinst);
+
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1199,6 +1213,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_0_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index e0e84ef7f568..8e843011703c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -30,6 +30,7 @@
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
#include "vcn_v4_0_3.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
@@ -39,6 +40,7 @@
#include <drm/drm_drv.h>
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
@@ -126,7 +128,14 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 32 * vcn_inst;
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
@@ -143,6 +152,12 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
vcn_v5_0_0_alloc_ip_dump(adev);
return amdgpu_vcn_sysfs_reset_mask_init(adev);
@@ -172,6 +187,9 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -204,24 +222,38 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
- adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
- ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell)
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst),
- adev->vcn.inst[i].aid_id);
-
- /* Re-init fw_shared, if required */
- vcn_v5_0_1_fw_shared_init(adev, i);
-
- r = amdgpu_ring_test_helper(ring);
+ if (amdgpu_sriov_vf(adev)) {
+ r = vcn_v5_0_1_start_sriov(adev);
if (r)
return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v5_0_1_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ } else {
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
+
+ /* Re-init fw_shared, if required */
+ vcn_v5_0_1_fw_shared_init(adev, i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
return 0;
@@ -663,6 +695,195 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
return 0;
}
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+ struct amdgpu_ring *ring_enc;
+ uint64_t cache_addr;
+ uint64_t rb_enc_addr;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t offset, cache_size;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+ uint32_t enabled_vcn;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_direct_read_modify_write
+ direct_rd_mod_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ vcn_inst = GET_INST(VCN, i);
+
+ vcn_v5_0_1_fw_shared_init(adev, vcn_inst);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ table_size = 0;
+
+ MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+
+ offset = 0;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = cache_size;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE0),
+ cache_size);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET1), 0);
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE;
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET2), 0);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
+
+ fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
+ rb_setup = &fw_shared->rb_setup;
+
+ ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
+ ring_enc->wptr = 0;
+ rb_enc_addr = ring_enc->gpu_addr;
+
+ rb_setup->is_rb_enabled_flags |= RB_ENABLED;
+ rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+ rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+ rb_setup->rb_size = ring_enc->ring_size / 4;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ MMSCH_V5_0_INSERT_END();
+
+ header.vcn0.init_status = 0;
+ header.vcn0.table_offset = header.total_size;
+ header.vcn0.table_size = table_size;
+ header.total_size += table_size;
+
+ /* Send init table to mmsch */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
+ if (resp != 0)
+ break;
+
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+
+ enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
+ init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status;
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
+ && init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
+ "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
+ }
+ }
+
+ return 0;
+}
+
/**
* vcn_v5_0_1_start - VCN start
*
@@ -1103,8 +1324,18 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = vinst->adev;
int ret = 0;
+ /* for SRIOV, guest should not control VCN Power-gating
+ * MMSCH FW should control Power-gating and clock-gating
+ * guest should avoid touching CGC and PG
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == vinst->cur_state)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index faa0dd75dd6d..85846fd08ce4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -350,6 +350,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
if (!amdgpu_sriov_vf(adev))
@@ -437,7 +438,10 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 4a5a0a4e00f2..9bde2c64540f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -27,6 +27,16 @@
#include "kfd_priv.h"
static struct dentry *debugfs_root;
+static struct dentry *debugfs_proc;
+static struct list_head procs;
+
+struct debugfs_proc_entry {
+ struct list_head list;
+ struct dentry *proc_dentry;
+ pid_t pid;
+};
+
+#define MAX_DEBUGFS_FILENAME_LEN 32
static int kfd_debugfs_open(struct inode *inode, struct file *file)
{
@@ -92,6 +102,8 @@ static const struct file_operations kfd_debugfs_hang_hws_fops = {
void kfd_debugfs_init(void)
{
debugfs_root = debugfs_create_dir("kfd", NULL);
+ debugfs_proc = debugfs_create_dir("proc", debugfs_root);
+ INIT_LIST_HEAD(&procs);
debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root,
kfd_debugfs_mqds_by_process, &kfd_debugfs_fops);
@@ -107,5 +119,69 @@ void kfd_debugfs_init(void)
void kfd_debugfs_fini(void)
{
+ debugfs_remove_recursive(debugfs_proc);
debugfs_remove_recursive(debugfs_root);
}
+
+static ssize_t kfd_debugfs_pasid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kfd_process_device *pdd = file_inode(file)->i_private;
+ char tmp[32];
+ int len;
+
+ len = snprintf(tmp, sizeof(tmp), "%u\n", pdd->pasid);
+
+ return simple_read_from_buffer(buf, count, ppos, tmp, len);
+}
+
+static const struct file_operations kfd_debugfs_pasid_fops = {
+ .owner = THIS_MODULE,
+ .read = kfd_debugfs_pasid_read,
+};
+
+void kfd_debugfs_add_process(struct kfd_process *p)
+{
+ int i;
+ char name[MAX_DEBUGFS_FILENAME_LEN];
+ struct debugfs_proc_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ list_add(&entry->list, &procs);
+ entry->pid = p->lead_thread->pid;
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "%d",
+ (int)entry->pid);
+ entry->proc_dentry = debugfs_create_dir(name, debugfs_proc);
+
+ /* Create debugfs files for each GPU:
+ * - proc/<pid>/pasid_<gpuid>
+ */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "pasid_%u",
+ pdd->dev->id);
+ debugfs_create_file((const char *)name, S_IFREG | 0444,
+ entry->proc_dentry, pdd,
+ &kfd_debugfs_pasid_fops);
+ }
+}
+
+void kfd_debugfs_remove_process(struct kfd_process *p)
+{
+ struct debugfs_proc_entry *entry, *next;
+
+ mutex_lock(&kfd_processes_mutex);
+ list_for_each_entry_safe(entry, next, &procs, list) {
+ if (entry->pid != p->lead_thread->pid)
+ continue;
+
+ debugfs_remove_recursive(entry->proc_dentry);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index b9c82be6ce13..bf0854bd5555 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -352,11 +352,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &aldebaran_kfd2kgd;
break;
case IP_VERSION(9, 4, 3):
- gfx_target_version = adev->rev_id >= 1 ? 90402
- : adev->flags & AMD_IS_APU ? 90400
- : 90401;
- f2g = &gc_9_4_3_kfd2kgd;
- break;
case IP_VERSION(9, 4, 4):
gfx_target_version = 90402;
f2g = &gc_9_4_3_kfd2kgd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c610e172a2b8..76359c6a3f3a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1576,8 +1576,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->sdma_bitmap, get_num_sdma_queues(dqm))) {
+ dev_warn(dev, "No more SDMA queue to allocate (%d total queues)\n",
+ get_num_sdma_queues(dqm));
return -ENOMEM;
}
@@ -1602,8 +1603,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more XGMI SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->xgmi_sdma_bitmap, get_num_xgmi_sdma_queues(dqm))) {
+ dev_warn(dev, "No more XGMI SDMA queue to allocate (%d total queues)\n",
+ get_num_xgmi_sdma_queues(dqm));
return -ENOMEM;
}
if (restore_sdma_id) {
@@ -1662,8 +1664,8 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
}
if (!free_bit_found) {
- dev_err(dev, "No more SDMA queue to allocate for target ID %i\n",
- q->properties.sdma_engine_id);
+ dev_warn(dev, "No more SDMA queue to allocate for target ID %i (%d total queues)\n",
+ q->properties.sdma_engine_id, num_queues);
return -ENOMEM;
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index fecdb6794075..e54e708ed82d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1177,6 +1177,25 @@ void kfd_signal_hw_exception_event(u32 pasid)
kfd_unref_process(p);
}
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_hsa_memory_exception_data exception_data;
+ int i;
+
+ memset(&exception_data, 0, sizeof(exception_data));
+ exception_data.va = gpu_va;
+ exception_data.failure.NotPresent = 1;
+
+ // Send VM seg fault to all kfd process device
+ for (i = 0; i < p->n_pdds; i++) {
+ pdd = p->pdds[i];
+ exception_data.gpu_id = pdd->user_gpu_id;
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, &exception_data);
+ }
+}
+
void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 37b69fe0ede3..3e1ad8974797 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -168,14 +168,14 @@ static bool event_interrupt_isr_v10(struct kfd_node *dev,
client_id != SOC15_IH_CLIENTID_SE3SH)
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return 0;
/* Interrupt types we care about: various signals and faults.
@@ -217,37 +217,66 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF0_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF1_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF0_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF1_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID));
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
@@ -259,21 +288,37 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID),
sq_intr_err_type);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index c5f97e6e36ff..2788a52714d1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -148,44 +148,69 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \
KFD_CTXID0_DOORBELL_ID_MASK)
-static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_auto(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
}
-static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_inst(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SH_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
}
-static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_error(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_warn_ratelimited(
+ dev_warn_ratelimited(
+ dev->adev->dev,
"sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ DETAIL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ TYPE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ WGP_ID));
}
static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
@@ -255,14 +280,14 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev,
(context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG))
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return false;
/* Interrupt types we care about: various signals and faults.
@@ -353,10 +378,10 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (sq_int_enc) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- print_sq_intr_info_auto(context_id0, context_id1);
+ print_sq_intr_info_auto(dev, context_id0, context_id1);
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- print_sq_intr_info_inst(context_id0, context_id1);
+ print_sq_intr_info_inst(dev, context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -366,7 +391,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
return;
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
- print_sq_intr_info_error(context_id0, context_id1);
+ print_sq_intr_info_error(dev, context_id0, context_id1);
sq_int_errtype = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE);
if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index b8a91bf4ef30..4ceb251312a6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -314,11 +314,12 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev,
& ~pasid_mask) | pasid);
}
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
@@ -379,28 +380,82 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_int_data);
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -412,14 +467,37 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_intr_err);
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 80320a6c8854..97933d2a3803 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -495,6 +495,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
+ /* Allow context switch so we don't cross-process starve with a massive
+ * command buffer of long-running SDMA commands
+ */
+ m->sdmax_rlcx_ib_cntl |= SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 271c567242ab..b1a6eb349bb3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -31,6 +31,7 @@
#define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
#define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
#define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
+#define OVER_SUBSCRIPTION_XNACK_CONFLICT (1 << 3)
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
@@ -44,7 +45,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
- int *over_subscription)
+ int *over_subscription,
+ int xnack_conflict)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
@@ -73,6 +75,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
if (gws_queue_count > 1)
*over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
+ if (xnack_conflict && (node->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ *over_subscription |= OVER_SUBSCRIPTION_XNACK_CONFLICT;
if (*over_subscription)
dev_dbg(dev, "Over subscribed runlist\n");
@@ -96,7 +100,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
- int *is_over_subscription)
+ int *is_over_subscription,
+ int xnack_conflict)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
@@ -105,7 +110,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
if (WARN_ON(pm->allocated))
return -EINVAL;
- pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+ pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription,
+ xnack_conflict);
mutex_lock(&pm->lock);
@@ -142,11 +148,27 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct queue *q;
struct kernel_queue *kq;
int is_over_subscription;
+ int xnack_enabled = -1;
+ bool xnack_conflict = 0;
rl_wptr = retval = processes_mapped = 0;
+ /* Check if processes set different xnack modes */
+ list_for_each_entry(cur, queues, list) {
+ qpd = cur->qpd;
+ if (xnack_enabled < 0)
+ /* First process */
+ xnack_enabled = qpd->pqm->process->xnack_enabled;
+ else if (qpd->pqm->process->xnack_enabled != xnack_enabled) {
+ /* Found a process with a different xnack mode */
+ xnack_conflict = 1;
+ break;
+ }
+ }
+
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
- &alloc_size_bytes, &is_over_subscription);
+ &alloc_size_bytes, &is_over_subscription,
+ xnack_conflict);
if (retval)
return retval;
@@ -156,9 +178,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->active_queue_count);
+build_runlist_ib:
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
qpd = cur->qpd;
+ /* group processes with the same xnack mode together */
+ if (qpd->pqm->process->xnack_enabled != xnack_enabled)
+ continue;
/* build map process packet */
if (processes_mapped >= pm->dqm->processes_count) {
dev_dbg(dev, "Not enough space left in runlist IB\n");
@@ -215,18 +241,26 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
alloc_size_bytes);
}
}
+ if (xnack_conflict) {
+ /* pick up processes with the other xnack mode */
+ xnack_enabled = !xnack_enabled;
+ xnack_conflict = 0;
+ goto build_runlist_ib;
+ }
dev_dbg(dev, "Finished map process and queues to runlist\n");
if (is_over_subscription) {
if (!pm->is_over_subscription)
- dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
- is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
- " too many processes." : "",
- is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
- " too many queues." : "",
- is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
- " multiple processes using cooperative launch." : "");
+ dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s%s. Expect reduced ROCm performance.\n",
+ is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
+ " too many processes" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
+ " too many queues" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
+ " multiple processes using cooperative launch" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_XNACK_CONFLICT ?
+ " xnack on/off processes mixed on gfx9" : "");
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 2893fd5e5d00..8fa6489b6f5d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process));
- if (adev->enforce_isolation[kfd->node_id])
+ if (adev->enforce_isolation[kfd->node_id] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
@@ -102,7 +102,8 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process_aldebaran));
- if (adev->enforce_isolation[knode->node_id])
+ if (adev->enforce_isolation[knode->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
@@ -165,9 +166,9 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- concurrent_proc_cnt = adev->enforce_isolation[kfd->node_id] ?
- 1 : min(pm->dqm->processes_count,
- kfd->max_proc_per_quantum);
+ concurrent_proc_cnt = (adev->enforce_isolation[kfd->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE) ?
+ 1 : min(pm->dqm->processes_count, kfd->max_proc_per_quantum);
packet = (struct pm4_mes_runlist *)buffer;
@@ -202,6 +203,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+ if (pm->dqm->dev->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)
+ packet->bitfields2.enb_xnack_retry_disable_check = 1;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index cd8611401a66..e356a207d03c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -63,7 +63,8 @@ struct pm4_mes_set_resources {
struct {
uint32_t vmid_mask:16;
uint32_t unmap_latency:8;
- uint32_t reserved1:5;
+ uint32_t reserved1:4;
+ uint32_t enb_xnack_retry_disable_check:1;
enum mes_set_resources_queue_type_enum queue_type:3;
} bitfields2;
uint32_t ordinal2;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f6aedf69c644..d221c58dccc3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1507,6 +1507,8 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
int kfd_get_num_events(struct kfd_process *p);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va);
+
void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data);
@@ -1581,10 +1583,15 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev);
int pm_debugfs_hang_hws(struct packet_manager *pm);
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
+void kfd_debugfs_add_process(struct kfd_process *p);
+void kfd_debugfs_remove_process(struct kfd_process *p);
+
#else
static inline void kfd_debugfs_init(void) {}
static inline void kfd_debugfs_fini(void) {}
+static inline void kfd_debugfs_add_process(struct kfd_process *p) {}
+static inline void kfd_debugfs_remove_process(struct kfd_process *p) {}
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7c0c24732481..722ac1662bdc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -900,6 +900,8 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
kfd_procfs_add_sysfs_files(process);
kfd_procfs_add_sysfs_counters(process);
+ kfd_debugfs_add_process(process);
+
init_waitqueue_head(&process->wait_irq_drain);
}
out:
@@ -1054,6 +1056,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ kfd_smi_event_process(pdd, false);
+
pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
pdd->dev->id, p->lead_thread->pid);
kfd_process_device_destroy_cwsr_dgpu(pdd);
@@ -1174,6 +1178,7 @@ static void kfd_process_wq_release(struct work_struct *work)
dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
+ kfd_debugfs_remove_process(p);
kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
@@ -1715,6 +1720,8 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
pdd->pasid = avm->pasid;
pdd->drm_file = drm_file;
+ kfd_smi_event_process(pdd, true);
+
return 0;
err_get_pasid:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7eb370b68159..6d5fa57d4a23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -451,8 +451,15 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
- pqm->process->lead_thread->pid, type, retval);
+ if ((type == KFD_QUEUE_TYPE_SDMA ||
+ type == KFD_QUEUE_TYPE_SDMA_XGMI ||
+ type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) &&
+ retval == -ENOMEM)
+ pr_warn("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
+ else
+ pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
goto err_create_queue;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 4afff7094caf..a65c67cf56ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -402,7 +402,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
{
u32 vgpr_size = 0x40000;
- if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
+ if (gfxv == 90402 || /* GFX_VERSION_AQUA_VANJARAM */
gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
gfxv == 90008 || /* GFX_VERSION_ARCTURUS */
gfxv == 90500)
@@ -462,7 +462,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
if (gfxv == 80002) /* GFX_VERSION_TONGA */
props->eop_buffer_size = 0x8000;
- else if ((gfxv / 100 * 100) == 90400) /* GFX_VERSION_AQUA_VANJARAM */
+ else if (gfxv == 90402) /* GFX_VERSION_AQUA_VANJARAM */
props->eop_buffer_size = 4096;
else if (gfxv >= 80000)
props->eop_buffer_size = 4096;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 9b8169761ec5..83d9384ac815 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -163,10 +163,9 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
unsigned int event)
{
- uint64_t all = KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS);
uint64_t events = READ_ONCE(client->events);
- if (pid && client->pid != pid && !(client->suser && (events & all)))
+ if (pid && client->pid != pid && !client->suser)
return false;
return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
@@ -345,6 +344,27 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
pid, address, last - address + 1, node->id, trigger));
}
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start)
+{
+ struct amdgpu_task_info *task_info;
+ struct amdgpu_vm *avm;
+
+ if (!pdd->drm_priv)
+ return;
+
+ avm = drm_priv_to_vm(pdd->drm_priv);
+ task_info = amdgpu_vm_get_task_info_vm(avm);
+
+ if (task_info) {
+ kfd_smi_event_add(0, pdd->dev,
+ start ? KFD_SMI_EVENT_PROCESS_START :
+ KFD_SMI_EVENT_PROCESS_END,
+ KFD_EVENT_FMT_PROCESS(task_info->pid,
+ task_info->task_name));
+ amdgpu_vm_put_task_info(task_info);
+ }
+}
+
int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
{
struct kfd_smi_client *client;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index 503bff13d815..bb4d72b57387 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -53,4 +53,5 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger);
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 100717a98ec1..72be6e152e88 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1245,8 +1245,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
if (ext_coherent)
- mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
+ mtype_local = AMDGPU_VM_MTYPE_CC;
else
mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 9bbee484d57c..baa2374acdeb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1267,34 +1267,41 @@ static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev,
{
struct kfd_node *gpu = outbound_link->gpu;
struct amdgpu_device *adev = gpu->adev;
- int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_sdma_engines = kfd_get_num_xgmi_sdma_engines(gpu);
+ unsigned int num_sdma_engines = kfd_get_num_sdma_engines(gpu);
+ uint32_t sdma_eng_id_mask = (1 << num_sdma_engines) - 1;
+ uint32_t xgmi_sdma_eng_id_mask =
+ ((1 << num_xgmi_sdma_engines) - 1) << num_sdma_engines;
+
bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu &&
adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 &&
- kfd_get_num_xgmi_sdma_engines(gpu) >= 14 &&
- (!(adev->flags & AMD_IS_APU) && num_xgmi_nodes == 8);
+ num_xgmi_sdma_engines >= 6 && (!(adev->flags & AMD_IS_APU) &&
+ num_xgmi_nodes == 8);
if (support_rec_eng) {
int src_socket_id = adev->gmc.xgmi.physical_node_id;
int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id;
+ unsigned int reshift = num_xgmi_sdma_engines == 6 ? 1 : 0;
outbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[src_socket_id][dst_socket_id];
+ 1 << (rec_sdma_eng_map[src_socket_id][dst_socket_id] >> reshift);
inbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[dst_socket_id][src_socket_id];
- } else {
- int num_sdma_eng = kfd_get_num_sdma_engines(gpu);
- int i, eng_offset = 0;
+ 1 << (rec_sdma_eng_map[dst_socket_id][src_socket_id] >> reshift);
- if (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
- kfd_get_num_xgmi_sdma_engines(gpu) && to_dev->gpu) {
- eng_offset = num_sdma_eng;
- num_sdma_eng = kfd_get_num_xgmi_sdma_engines(gpu);
- }
+ /* If recommended engine is out of range, need to reset the mask */
+ if (outbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ outbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
+ if (inbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ inbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
- for (i = 0; i < num_sdma_eng; i++) {
- outbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- inbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- }
+ } else {
+ uint32_t engine_mask = (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
+ num_xgmi_sdma_engines && to_dev->gpu) ? xgmi_sdma_eng_id_mask :
+ sdma_eng_id_mask;
+
+ outbound_link->rec_sdma_eng_id_mask = engine_mask;
+ inbound_link->rec_sdma_eng_id_mask = engine_mask;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index ab2a97e354da..7329b8cc2576 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -38,6 +38,7 @@ AMDGPUDM = \
amdgpu_dm_pp_smu.o \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
+ amdgpu_dm_quirks.o \
amdgpu_dm_wb.o
ifdef CONFIG_DRM_AMD_DC_FP
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a187cdb43e7e..742b10881112 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -80,7 +80,6 @@
#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
-#include <linux/dmi.h>
#include <linux/sort.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -115,6 +114,8 @@
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
+static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch");
+
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
@@ -280,7 +281,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -301,7 +302,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -757,6 +758,29 @@ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
complete(&adev->dm.dmub_aux_transfer_done);
}
+static void dmub_aux_fused_io_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (!adev || !notify) {
+ ASSERT(false);
+ return;
+ }
+
+ const struct dmub_cmd_fused_request *req = &notify->fused_request;
+ const uint8_t ddc_line = req->u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) {
+ ASSERT(false);
+ return;
+ }
+
+ struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line];
+
+ static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch");
+ memcpy(sync->reply_data, req, sizeof(*req));
+ complete(&sync->replied);
+}
+
/**
* dmub_hpd_callback - DMUB HPD interrupt processing callback.
* @adev: amdgpu_device pointer
@@ -780,18 +804,18 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
return;
if (notify == NULL) {
- DRM_ERROR("DMUB HPD callback notification was NULL");
+ drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL");
return;
}
if (notify->link_index > adev->dm.dc->link_count) {
- DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index);
return;
}
/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
- DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
+ drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n");
return;
}
@@ -808,11 +832,11 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
- DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index);
else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
- DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
else
- DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n",
notify->type, link_index);
hpd_aconnector = aconnector;
@@ -824,7 +848,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
if (hpd_aconnector) {
if (notify->type == DMUB_NOTIFICATION_HPD) {
if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
- DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
+ drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index);
handle_hpd_irq_helper(hpd_aconnector);
} else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
handle_hpd_rx_irq(hpd_aconnector);
@@ -843,7 +867,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
struct dmub_notification *notify)
{
- DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
+ drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
}
/**
@@ -879,7 +903,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
if (!dmub_hpd_wrk->dmub_notify) {
- DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL");
return;
}
@@ -893,6 +917,30 @@ static void dm_handle_hpd_work(struct work_struct *work)
}
+static const char *dmub_notification_type_str(enum dmub_notification_type e)
+{
+ switch (e) {
+ case DMUB_NOTIFICATION_NO_DATA:
+ return "NO_DATA";
+ case DMUB_NOTIFICATION_AUX_REPLY:
+ return "AUX_REPLY";
+ case DMUB_NOTIFICATION_HPD:
+ return "HPD";
+ case DMUB_NOTIFICATION_HPD_IRQ:
+ return "HPD_IRQ";
+ case DMUB_NOTIFICATION_SET_CONFIG_REPLY:
+ return "SET_CONFIG_REPLY";
+ case DMUB_NOTIFICATION_DPIA_NOTIFICATION:
+ return "DPIA_NOTIFICATION";
+ case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY:
+ return "HPD_SENSE_NOTIFY";
+ case DMUB_NOTIFICATION_FUSED_IO:
+ return "FUSED_IO";
+ default:
+ return "<unknown>";
+ }
+}
+
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -910,22 +958,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct dmcub_trace_buf_entry entry = { 0 };
u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
- static const char *const event_type[] = {
- "NO_DATA",
- "AUX_REPLY",
- "HPD",
- "HPD_IRQ",
- "SET_CONFIGC_REPLY",
- "DPIA_NOTIFICATION",
- "HPD_SENSE_NOTIFY",
- };
do {
if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
entry.param0, entry.param1);
- DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
entry.trace_code, entry.tick_count, entry.param0, entry.param1);
} else
break;
@@ -935,7 +974,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
} while (count <= DMUB_TRACE_MAX_READ);
if (count > DMUB_TRACE_MAX_READ)
- DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+ drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ");
if (dc_enable_dmub_notifications(adev->dm.dc) &&
irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
@@ -943,25 +982,25 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
- DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type);
continue;
}
if (!dm->dmub_callback[notify.type]) {
- DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
- event_type[notify.type]);
+ drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n",
+ dmub_notification_type_str(notify.type));
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
- DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk");
return;
}
dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
GFP_ATOMIC);
if (!dmub_hpd_wrk->dmub_notify) {
kfree(dmub_hpd_wrk);
- DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
@@ -1019,10 +1058,10 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
&compressor->gpu_addr, &compressor->cpu_addr);
if (r)
- DRM_ERROR("DM: Failed to initialize FBC\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n");
else {
adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
- DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4);
}
}
@@ -1187,13 +1226,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
if (!fb_info) {
- DRM_ERROR("No framebuffer info for DMUB service.\n");
+ drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n");
return -EINVAL;
}
if (!dmub_fw) {
/* Firmware required for DMUB support. */
- DRM_ERROR("No firmware provided for DMUB.\n");
+ drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n");
return -EINVAL;
}
@@ -1203,19 +1242,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status);
return -EINVAL;
}
if (!has_hw_support) {
- DRM_INFO("DMUB unsupported on ASIC\n");
+ drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n");
return 0;
}
/* Reset DMCUB if it was previously running - before we overwrite its memory. */
status = dmub_srv_hw_reset(dmub_srv);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Error resetting DMUB HW: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status);
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
@@ -1298,6 +1337,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
case IP_VERSION(3, 6, 0):
hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
+ hw_params.lower_hbr3_phy_ssc = true;
break;
default:
break;
@@ -1305,14 +1345,14 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status);
return -EINVAL;
}
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
/* Init DMCU and ABM if available. */
if (dmcu && abm) {
@@ -1323,11 +1363,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
if (!adev->dm.dc->ctx->dmub_srv)
adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) {
- DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n");
return -ENOMEM;
}
- DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n",
adev->dm.dmcub_fw_version);
/* Keeping sanity checks off if
@@ -1370,18 +1410,18 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
status = dmub_srv_is_hw_init(dmub_srv, &init);
if (status != DMUB_STATUS_OK)
- DRM_WARN("DMUB hardware init check failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status);
if (status == DMUB_STATUS_OK && init) {
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
} else {
/* Perform the full hardware initialization. */
r = dm_dmub_hw_init(adev);
if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
}
}
@@ -1491,18 +1531,18 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
+ adev = offload_work->adev;
if (!aconnector) {
- DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work");
goto skip;
}
- adev = drm_to_adev(aconnector->base.dev);
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
if (new_connection_type == dc_connection_none)
@@ -1571,8 +1611,9 @@ skip:
}
-static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev)
{
+ struct dc *dc = adev->dm.dc;
int max_caps = dc->caps.max_links;
int i = 0;
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
@@ -1588,7 +1629,7 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
if (hpd_rx_offload_wq[i].wq == NULL) {
- DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!");
goto out_err;
}
@@ -1637,153 +1678,6 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-struct amdgpu_dm_quirks {
- bool aux_hpd_discon;
- bool support_edp0_on_dp1;
-};
-
-static struct amdgpu_dm_quirks quirk_entries = {
- .aux_hpd_discon = false,
- .support_edp0_on_dp1 = false
-};
-
-static int edp0_on_dp1_callback(const struct dmi_system_id *id)
-{
- quirk_entries.support_edp0_on_dp1 = true;
- return 0;
-}
-
-static int aux_hpd_discon_callback(const struct dmi_system_id *id)
-{
- quirk_entries.aux_hpd_discon = true;
- return 0;
-}
-
-static const struct dmi_system_id dmi_quirk_table[] = {
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
- },
- },
- {}
- /* TODO: refactor this from a fixed table to a dynamic option */
-};
-
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
-{
- int dmi_id;
- struct drm_device *dev = dm->ddev;
-
- dm->aux_hpd_discon_quirk = false;
- init_data->flags.support_edp0_on_dp1 = false;
-
- dmi_id = dmi_check_system(dmi_quirk_table);
-
- if (!dmi_id)
- return;
-
- if (quirk_entries.aux_hpd_discon) {
- dm->aux_hpd_discon_quirk = true;
- drm_info(dev, "aux_hpd_discon_quirk attached\n");
- }
- if (quirk_entries.support_edp0_on_dp1) {
- init_data->flags.support_edp0_on_dp1 = true;
- drm_info(dev, "support_edp0_on_dp1 attached\n");
- }
-}
void*
dm_allocate_gpu_mem(
@@ -1959,7 +1853,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.audio_lock);
if (amdgpu_dm_irq_init(adev)) {
- DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n");
goto error;
}
@@ -2070,7 +1964,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
- retrieve_dmi_info(&adev->dm, &init_data);
+ retrieve_dmi_info(&adev->dm);
+ if (adev->dm.edp0_on_dp1_quirk)
+ init_data.flags.support_edp0_on_dp1 = true;
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -2081,10 +1977,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER,
dce_version_to_string(adev->dm.dc->ctx->dce_version));
} else {
- DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
@@ -2118,25 +2014,31 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc->debug.using_dml21 = true;
}
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE)
+ adev->dm.dc->debug.hdcp_lc_force_fw_enable = true;
+
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK)
+ adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
- DRM_INFO("DP-HDMI FRL PCON supported\n");
+ drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n");
r = dm_dmub_hw_init(adev);
if (r) {
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
goto error;
}
dc_hardware_init(adev->dm.dc);
- adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
- DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n");
goto error;
}
@@ -2151,10 +2053,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize freesync_module.\n");
} else
- DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
amdgpu_dm_init_color_mod();
@@ -2163,7 +2065,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
- DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support &&
@@ -2174,9 +2076,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
- DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n");
else
- DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
dc_init_callbacks(adev->dm.dc, &init_params);
}
@@ -2184,20 +2086,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
- DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
- DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
- DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++)
+ init_completion(&adev->dm.fused_io[i].replied);
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
+ dmub_aux_fused_io_callback, false)) {
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
@@ -2214,7 +2125,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
if (amdgpu_dm_initialize_drm_device(adev)) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
@@ -2229,7 +2140,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
@@ -2237,14 +2148,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctx.crtc_ctx)
- DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n");
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
adev->dm.secure_display_ctx.support_mul_roi = true;
#endif
- DRM_DEBUG_DRIVER("KMS initialized.\n");
+ drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n");
return 0;
error:
@@ -2417,7 +2328,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
default:
break;
}
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -2435,7 +2346,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
@@ -2560,7 +2471,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n",
adev->dm.dmcub_fw_version);
}
@@ -2569,7 +2480,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_srv = adev->dm.dmub_srv;
if (!dmub_srv) {
- DRM_ERROR("Failed to allocate DMUB service!\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n");
return -ENOMEM;
}
@@ -2582,7 +2493,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
/* Create the DMUB service. */
status = dmub_srv_create(dmub_srv, &create_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error creating DMUB service: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status);
return -EINVAL;
}
@@ -2607,7 +2518,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
&region_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status);
return -EINVAL;
}
@@ -2636,14 +2547,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
fb_info = adev->dm.dmub_fb_info;
if (!fb_info) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Failed to allocate framebuffer info for DMUB service!\n");
return -ENOMEM;
}
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status);
return -EINVAL;
}
@@ -2660,7 +2571,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block)
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
- DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n");
return -EINVAL;
}
@@ -2966,7 +2877,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
ret = amdgpu_dpm_write_watermarks_table(adev);
if (ret) {
- DRM_ERROR("Failed to update WMTABLE!\n");
+ drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n");
return ret;
}
@@ -2984,13 +2895,13 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
if (oem_ddc_service) {
oem_i2c = create_i2c(oem_ddc_service, true);
if (!oem_i2c) {
- dev_info(adev->dev, "Failed to create oem i2c adapter data\n");
+ drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n");
return -ENOMEM;
}
r = i2c_add_adapter(&oem_i2c->base);
if (r) {
- dev_info(adev->dev, "Failed to register oem i2c\n");
+ drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
kfree(oem_i2c);
return r;
}
@@ -3033,7 +2944,7 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block)
r = dm_oem_i2c_hw_init(adev);
if (r)
- dev_info(adev->dev, "Failed to add OEM i2c bus\n");
+ drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n");
return 0;
}
@@ -3076,7 +2987,7 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
if (rc)
- DRM_WARN("Failed to %s pflip interrupts\n",
+ drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
if (enable) {
@@ -3086,14 +2997,14 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
if (rc)
- DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
* don't use amdgpu_irq_get/put() to avoid refcount change.
*/
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
- DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
}
}
@@ -3923,20 +3834,21 @@ static void handle_hpd_irq(void *param)
}
-static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
struct hpd_rx_irq_offload_work *offload_work =
kzalloc(sizeof(*offload_work), GFP_KERNEL);
if (!offload_work) {
- DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n");
return;
}
INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
offload_work->data = hpd_irq_data;
offload_work->offload_wq = offload_wq;
+ offload_work->adev = adev;
queue_work(offload_wq->wq, &offload_work->work);
DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
@@ -3978,7 +3890,7 @@ static void handle_hpd_rx_irq(void *param)
goto out;
if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4000,7 +3912,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4017,7 +3929,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4027,7 +3939,7 @@ out:
if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(dc_link);
@@ -4090,19 +4002,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
dmub_hpd_sense_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd sense callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback");
return -EINVAL;
}
}
@@ -4123,7 +4035,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1 ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6) {
- DRM_ERROR("Failed to register hpd irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n");
return -EINVAL;
}
@@ -4141,7 +4053,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1RX ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) {
- DRM_ERROR("Failed to register hpd rx irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n");
return -EINVAL;
}
@@ -4183,7 +4095,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4194,7 +4106,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4213,7 +4125,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4224,7 +4136,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4242,7 +4154,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4284,7 +4196,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4295,7 +4207,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4313,7 +4225,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4324,7 +4236,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4343,7 +4255,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4354,7 +4266,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4372,7 +4284,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4422,7 +4334,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4433,7 +4345,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4454,7 +4366,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
vrtl_int_srcid[i], &adev->vline0_irq);
if (r) {
- DRM_ERROR("Failed to add vline0 irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n");
return r;
}
@@ -4465,7 +4377,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 ||
int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) {
- DRM_ERROR("Failed to register vline0 irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n");
return -EINVAL;
}
@@ -4493,7 +4405,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4504,7 +4416,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4524,7 +4436,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4535,7 +4447,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4553,7 +4465,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4575,7 +4487,7 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
&adev->dmub_outbox_irq);
if (r) {
- DRM_ERROR("Failed to add outbox irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n");
return r;
}
@@ -4807,41 +4719,54 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
-static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
- uint32_t brightness)
+static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t *brightness)
{
- unsigned int min, max;
u8 prev_signal = 0, prev_lum = 0;
+ int i = 0;
- if (!get_brightness_range(caps, &min, &max))
- return brightness;
-
- for (int i = 0; i < caps->data_points; i++) {
- u8 signal, lum;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
+ return;
- if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
- break;
+ if (!caps->data_points)
+ return;
- signal = caps->luminance_data[i].input_signal;
- lum = caps->luminance_data[i].luminance;
+ /* choose start to run less interpolation steps */
+ if (caps->luminance_data[caps->data_points/2].input_signal > *brightness)
+ i = caps->data_points/2;
+ do {
+ u8 signal = caps->luminance_data[i].input_signal;
+ u8 lum = caps->luminance_data[i].luminance;
/*
* brightness == signal: luminance is percent numerator
* brightness < signal: interpolate between previous and current luminance numerator
* brightness > signal: find next data point
*/
- if (brightness < signal)
- lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (brightness - prev_signal),
- signal - prev_signal);
- else if (brightness > signal) {
+ if (*brightness > signal) {
prev_signal = signal;
prev_lum = lum;
+ i++;
continue;
}
- brightness = DIV_ROUND_CLOSEST(lum * brightness, 101);
- break;
- }
+ if (*brightness < signal)
+ lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
+ (*brightness - prev_signal),
+ signal - prev_signal);
+ *brightness = DIV_ROUND_CLOSEST(lum * *brightness, 101);
+ return;
+ } while (i < caps->data_points);
+}
+
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ convert_custom_brightness(caps, &brightness);
// Rescale 0..255 to min..max
return min + DIV_ROUND_CLOSEST((max - min) * brightness,
@@ -5020,10 +4945,10 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
- DRM_ERROR("DM: Backlight registration failed!\n");
+ drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
} else
- DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+ drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -5037,7 +4962,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
if (!plane) {
- DRM_ERROR("KMS: Failed to allocate plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n");
return -ENOMEM;
}
plane->type = plane_type;
@@ -5055,7 +4980,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
- DRM_ERROR("KMS: Failed to initialize plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n");
kfree(plane);
return ret;
}
@@ -5124,14 +5049,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize mode config\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n");
return -EINVAL;
}
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
if (primary_planes > AMDGPU_MAX_PLANES) {
- DRM_ERROR("DM: Plane nums out of 6 planes\n");
+ drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n");
return -EINVAL;
}
@@ -5144,7 +5069,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, mode_info, i,
DRM_PLANE_TYPE_PRIMARY, plane)) {
- DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n");
goto fail;
}
}
@@ -5176,14 +5101,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY, plane)) {
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n");
goto fail;
}
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
- DRM_ERROR("KMS: Failed to initialize crtc\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n");
goto fail;
}
@@ -5203,7 +5128,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (register_outbox_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5253,7 +5178,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
if (link_cnt > MAX_LINKS) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"KMS: Cannot support more than %d display indexes\n",
MAX_LINKS);
goto fail;
@@ -5269,12 +5194,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
if (!wbcon) {
- DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n");
continue;
}
if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
- DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n");
kfree(wbcon);
continue;
}
@@ -5294,12 +5219,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
- DRM_ERROR("KMS: Failed to initialize encoder\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n");
goto fail;
}
if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
- DRM_ERROR("KMS: Failed to initialize connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n");
goto fail;
}
@@ -5308,7 +5233,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
aconnector;
if (!dc_link_detect_connection_type(link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
@@ -5330,8 +5255,15 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (amdgpu_dm_set_replay_caps(link, aconnector))
psr_feature_enabled = false;
- if (psr_feature_enabled)
+ if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
+ drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ link->psr_settings.psr_feature_enabled,
+ link->psr_settings.psr_version,
+ link->dpcd_caps.psr_info.psr_version,
+ link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
+ link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
+ }
}
}
amdgpu_set_panel_orientation(&aconnector->base);
@@ -5345,7 +5277,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_OLAND:
if (dce60_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5367,7 +5299,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5395,12 +5327,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
goto fail;
}
@@ -5561,7 +5493,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
/* if there is no object header, skip DM */
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
- dev_info(adev->dev, "No object header, skipping DM\n");
+ drm_info(adev_to_drm(adev), "No object header, skipping DM\n");
return -ENOENT;
}
@@ -5673,7 +5605,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
adev->mode_info.num_dig = 4;
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
return -EINVAL;
}
@@ -5822,7 +5754,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
break;
default:
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Unsupported screen format %p4cc\n",
&fb->format->format);
return -EINVAL;
@@ -6343,6 +6275,7 @@ static void fill_stream_properties_from_drm_display_mode(
struct amdgpu_dm_connector *aconnector = NULL;
struct hdmi_vendor_infoframe hv_frame;
struct hdmi_avi_infoframe avi_frame;
+ ssize_t err;
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
aconnector = to_amdgpu_dm_connector(connector);
@@ -6389,9 +6322,17 @@ static void fill_stream_properties_from_drm_display_mode(
}
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
- drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err);
timing_out->vic = avi_frame.video_code;
- drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -6516,7 +6457,7 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
}
static struct dc_sink *
-create_fake_sink(struct dc_link *link)
+create_fake_sink(struct drm_device *dev, struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
@@ -6526,7 +6467,7 @@ create_fake_sink(struct dc_link *link)
sink = dc_sink_create(&sink_init_data);
if (!sink) {
- DRM_ERROR("Failed to create sink!\n");
+ drm_err(dev, "Failed to create sink!\n");
return NULL;
}
sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
@@ -6659,7 +6600,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
m_pref = list_first_entry_or_null(
&aconnector->base.modes, struct drm_display_mode, head);
if (!m_pref) {
- DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n");
return NULL;
}
}
@@ -6834,7 +6775,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n",
__func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
@@ -6854,7 +6795,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -6882,6 +6823,7 @@ create_stream_for_sink(struct drm_connector *connector,
const struct dc_stream_state *old_stream,
int requested_bpc)
{
+ struct drm_device *dev = connector->dev;
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_display_mode *preferred_mode = NULL;
const struct drm_connector_state *con_state = &dm_state->base;
@@ -6904,11 +6846,6 @@ create_stream_for_sink(struct drm_connector *connector,
drm_mode_init(&mode, drm_mode);
memset(&saved_mode, 0, sizeof(saved_mode));
- if (connector == NULL) {
- DRM_ERROR("connector is NULL!\n");
- return stream;
- }
-
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
aconnector = NULL;
aconnector = to_amdgpu_dm_connector(connector);
@@ -6923,7 +6860,7 @@ create_stream_for_sink(struct drm_connector *connector,
}
if (!aconnector || !aconnector->dc_sink) {
- sink = create_fake_sink(link);
+ sink = create_fake_sink(dev, link);
if (!sink)
return stream;
@@ -6935,7 +6872,7 @@ create_stream_for_sink(struct drm_connector *connector,
stream = dc_create_stream_for_sink(sink);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(dev, "Failed to create stream for sink!\n");
goto finish;
}
@@ -6967,7 +6904,7 @@ create_stream_for_sink(struct drm_connector *connector,
* case, we call set mode ourselves to restore the previous mode
* and the modelist may not be filled in time.
*/
- DRM_DEBUG_DRIVER("No preferred mode found\n");
+ drm_dbg_driver(dev, "No preferred mode found\n");
} else if (aconnector) {
recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
@@ -7417,6 +7354,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
struct i2c_adapter *ddc;
+ struct drm_device *dev = connector->dev;
if (dc_link && dc_link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
@@ -7426,7 +7364,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7485,7 +7423,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7619,7 +7557,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
dm_state, old_stream,
requested_bpc);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n");
break;
}
@@ -7694,7 +7632,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
aconnector->base.force != DRM_FORCE_ON) {
- DRM_ERROR("dc_sink is NULL!\n");
+ drm_err(connector->dev, "dc_sink is NULL!\n");
goto fail;
}
@@ -8602,7 +8540,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
i2c = create_i2c(link->ddc, false);
if (!i2c) {
- DRM_ERROR("Failed to create i2c adapter data\n");
+ drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n");
return -ENOMEM;
}
@@ -8610,7 +8548,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
res = i2c_add_adapter(&i2c->base);
if (res) {
- DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
goto out_free;
}
@@ -8624,7 +8562,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
&i2c->base);
if (res) {
- DRM_ERROR("connector_init failed\n");
+ drm_err(adev_to_drm(dm->adev), "connector_init failed\n");
aconnector->connector_id = -1;
goto out_free;
}
@@ -9114,7 +9052,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
@@ -9122,7 +9060,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
}
}
@@ -9209,13 +9147,13 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
if (crtc_state->stream) {
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
- DRM_ERROR("DC failed to set cursor attributes\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n");
update->cursor_attributes = &crtc_state->stream->cursor_attributes;
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
- DRM_ERROR("DC failed to set cursor position\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor position\n");
update->cursor_position = &crtc_state->stream->cursor_position;
}
@@ -9466,7 +9404,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].surface = dc_plane;
if (!bundle->surface_updates[planes_count].surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
+ drm_err(dev, "No surface for CRTC: id=%d\n",
acrtc_attach->crtc_id);
continue;
}
@@ -9982,20 +9920,20 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
if (!wb_info) {
- DRM_ERROR("Failed to allocate wb_info\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n");
return;
}
acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
if (!acrtc) {
- DRM_ERROR("no amdgpu_crtc found\n");
+ drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n");
kfree(wb_info);
return;
}
afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
if (!afb) {
- DRM_ERROR("No amdgpu_framebuffer found\n");
+ drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n");
kfree(wb_info);
return;
}
@@ -10216,7 +10154,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -10308,7 +10246,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
*/
dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
if (!dummy_updates) {
- DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
}
for (j = 0; j < status->plane_count; j++)
@@ -10516,16 +10454,20 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
*/
conn_state = drm_atomic_get_connector_state(state, connector);
- ret = PTR_ERR_OR_ZERO(conn_state);
- if (ret)
+ /* Check for error in getting connector state */
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
goto out;
+ }
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
+ /* Check for error in getting crtc state */
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
goto out;
+ }
/* force a restore */
crtc_state->mode_changed = true;
@@ -10533,9 +10475,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
/* Attach plane to drm_atomic_state */
plane_state = drm_atomic_get_plane_state(state, plane);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (ret)
+ /* Check for error in getting plane state */
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
goto out;
+ }
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
@@ -10543,7 +10487,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
out:
drm_atomic_state_put(state);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(ddev, "Restoring old state failed with %i\n", ret);
return ret;
}
@@ -10627,7 +10571,7 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n",
crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
@@ -10743,6 +10687,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct dm_atomic_state *dm_state = NULL;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct dc_stream_state *new_stream;
+ struct amdgpu_device *adev = dm->adev;
int ret = 0;
/*
@@ -10772,8 +10717,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
drm_old_conn_state = drm_atomic_get_old_connector_state(state,
connector);
- if (IS_ERR(drm_new_conn_state)) {
- ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ if (WARN_ON(!drm_new_conn_state)) {
+ ret = -EINVAL;
goto fail;
}
@@ -10796,7 +10741,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
*/
if (!new_stream) {
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n",
__func__, acrtc->base.base.id);
ret = -ENOMEM;
goto fail;
@@ -10834,7 +10779,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d",
new_crtc_state->mode_changed);
}
}
@@ -10872,7 +10817,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER(
+ drm_dbg_driver(adev_to_drm(adev),
"Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
@@ -10893,7 +10838,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (ret)
goto fail;
- DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n",
crtc->base.id);
/* i.e. reset mode */
@@ -11746,7 +11691,7 @@ static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
old_plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
- DRM_ERROR("Failed to get plane state for plane %s\n", plane->name);
+ drm_err(dev, "Failed to get plane state for plane %s\n", plane->name);
return false;
}
@@ -12315,7 +12260,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
if (!res) {
- DRM_ERROR("EDID CEA parser failed\n");
+ drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n");
return false;
}
@@ -12323,7 +12268,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
if (output->type == DMUB_CMD__EDID_CEA_ACK) {
if (!output->ack.success) {
- DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n",
output->ack.offset);
}
} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
@@ -12335,7 +12280,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
} else {
- DRM_WARN("Unknown EDID CEA parser results\n");
+ drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n");
return false;
}
@@ -12551,7 +12496,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
if (!connector->state) {
- DRM_ERROR("%s - Connector has no state", __func__);
+ drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__);
goto update;
}
@@ -12736,7 +12681,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
}
if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
*operation_result = AUX_RET_ERROR_TIMEOUT;
goto out;
}
@@ -12747,11 +12692,11 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* lead to this error. We can ignore this for now.
*/
if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
- DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
}
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ *operation_result = p_notify->result;
goto out;
}
@@ -12774,6 +12719,79 @@ out:
return ret;
}
+static void abort_fused_io(
+ struct dc_context *ctx,
+ const struct dmub_cmd_fused_request *request
+)
+{
+ union dmub_rb_cmd command = { 0 };
+ struct dmub_rb_cmd_fused_io *io = &command.fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT;
+ io->header.payload_bytes = sizeof(*io) - sizeof(io->header);
+ io->request = *request;
+ dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+static bool execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io))
+ return false;
+
+ struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line];
+ struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io;
+ const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+ && first->header.ret_status
+ && first->request.status == FUSED_REQUEST_STATUS_SUCCESS;
+
+ if (!result)
+ return false;
+
+ while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) {
+ reinit_completion(&sync->replied);
+
+ struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data;
+
+ static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch");
+
+ if (reply->identifier == first->request.identifier) {
+ first->request = *reply;
+ return true;
+ }
+ }
+
+ reinit_completion(&sync->replied);
+ first->request.status = FUSED_REQUEST_STATUS_TIMEOUT;
+ abort_fused_io(ctx, &first->request);
+ return false;
+}
+
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us)
+{
+ struct amdgpu_display_manager *dm = &dev->dm;
+
+ mutex_lock(&dm->dpia_aux_lock);
+
+ const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us);
+
+ mutex_unlock(&dm->dpia_aux_lock);
+ return result;
+}
+
int amdgpu_dm_process_dmub_set_config_sync(
struct dc_context *ctx,
unsigned int link_index,
@@ -12792,7 +12810,7 @@ int amdgpu_dm_process_dmub_set_config_sync(
ret = 0;
*operation_result = adev->dm.dmub_notify->sc_status;
} else {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
ret = -1;
*operation_result = SET_CONFIG_UNKNOWN_ERROR;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 385faaca6e26..d7d92f9911e4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,7 +50,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 7
+#define AMDGPU_DMUB_NOTIFICATION_MAX 8
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
@@ -81,6 +81,7 @@ struct amdgpu_bo;
struct dmub_srv;
struct dc_plane_state;
struct dmub_notification;
+struct dmub_cmd_fused_request;
struct amd_vsdb_block {
unsigned char ieee_id[3];
@@ -276,6 +277,10 @@ struct hpd_rx_irq_offload_work {
* @offload_wq: offload work queue that this work is queued to
*/
struct hpd_rx_irq_offload_work_queue *offload_wq;
+ /**
+ * @adev: amdgpu_device pointer
+ */
+ struct amdgpu_device *adev;
};
/**
@@ -614,6 +619,13 @@ struct amdgpu_display_manager {
bool aux_hpd_discon_quirk;
/**
+ * @edp0_on_dp1_quirk:
+ *
+ * quirk for platforms that put edp0 on DP1.
+ */
+ bool edp0_on_dp1_quirk;
+
+ /**
* @dpia_aux_lock:
*
* Guards access to DPIA AUX
@@ -633,6 +645,16 @@ struct amdgpu_display_manager {
* OEM i2c bus
*/
struct amdgpu_i2c_adapter *oem_i2c;
+
+ /**
+ * @fused_io:
+ *
+ * dmub fused io interface
+ */
+ struct fused_io_sync {
+ struct completion replied;
+ char reply_data[0x40]; // Cannot include dmub_cmd here
+ } fused_io[8];
};
enum dsc_clock_force_state {
@@ -1012,6 +1034,14 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
struct aux_payload *payload, enum aux_return_code_type *operation_result);
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
@@ -1045,4 +1075,6 @@ void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
+void retrieve_dmi_info(struct amdgpu_display_manager *dm);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 8f22ad966543..c16962256514 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -26,6 +26,7 @@
#include "amdgpu_dm_hdcp.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
+#include "dc_fused_io.h"
#include "dm_helpers.h"
#include <drm/display/drm_hdcp_helper.h>
#include "hdcp_psp.h"
@@ -76,6 +77,34 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static bool lp_atomic_write_poll_read_i2c(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
+static bool lp_atomic_write_poll_read_aux(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
struct ta_hdcp_shared_memory *hdcp_cmd;
@@ -732,7 +761,10 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
+ struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config;
+ struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
+
+ config->psp.handle = &adev->psp;
if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
@@ -740,12 +772,22 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
dc->ctx->dce_version == DCN_VERSION_3_51 ||
dc->ctx->dce_version == DCN_VERSION_3_6 ||
dc->ctx->dce_version == DCN_VERSION_3_16)
- hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
- hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
- hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
- hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ config->psp.caps.dtm_v3_supported = 1;
+ config->ddc.handle = dc_get_link_at_index(dc, i);
+
+ ddc_funcs->write_i2c = lp_write_i2c;
+ ddc_funcs->read_i2c = lp_read_i2c;
+ ddc_funcs->write_dpcd = lp_write_dpcd;
+ ddc_funcs->read_dpcd = lp_read_dpcd;
+
+ config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
+ if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) {
+ ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
+ ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
+ } else {
+ ddc_funcs->atomic_write_poll_read_i2c = NULL;
+ ddc_funcs->atomic_write_poll_read_aux = NULL;
+ }
memset(hdcp_work[i].aconnector, 0,
sizeof(struct amdgpu_dm_connector *) *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 1395a748d726..d4395b92fb85 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -630,6 +630,19 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ struct amdgpu_device *dev = ctx->driver_context;
+
+ return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us);
+}
+
static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
bool is_write_cmd,
unsigned char cmd,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 5cdbc86ef8f5..25e8befbcc47 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1739,16 +1739,17 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
struct dc_dsc_policy dsc_policy = {0};
+ bool is_dsc_possible;
dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
- dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
- stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
- dsc_policy.min_target_bpp * 16,
- dsc_policy.max_target_bpp * 16,
- &stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
-
- return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
+ is_dsc_possible = dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
+
+ return is_dsc_possible;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 3e0f45f1711c..b7c6e8d13435 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -948,13 +948,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index e140b7a04d72..f984cb0cb889 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -87,14 +87,6 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->psr_settings.psr_feature_enabled = true;
}
-
- DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
- link->psr_settings.psr_feature_enabled,
- link->psr_settings.psr_version,
- link->dpcd_caps.psr_info.psr_version,
- link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
- link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
-
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
new file mode 100644
index 000000000000..1da07ebf9217
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/dmi.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ struct drm_device *dev = dm->ddev;
+ int dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+ dm->edp0_on_dp1_quirk = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
+
+ if (!dmi_id)
+ return;
+
+ if (quirk_entries.aux_hpd_discon) {
+ dm->aux_hpd_discon_quirk = true;
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ dm->edp0_on_dp1_quirk = true;
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 0d5fefb0f591..d9527c05fc87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -102,13 +102,13 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3e1f5b689718..3c9ecea7eebc 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -53,31 +53,30 @@ DC_LIBS += hdcp
ifdef CONFIG_DRM_AMD_DC_FP
DC_LIBS += sspl
-DC_SPL_TRANS += dc_spl_translate.o
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, dc_spl_translate.o)
endif
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
-DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
+FILES =
+FILES += dc_dmub_srv.o
+FILES += dc_edid_parser.o
+FILES += dc_fused_io.o
+FILES += dc_helper.o
+FILES += core/dc.o
+FILES += core/dc_debug.o
+FILES += core/dc_hw_sequencer.o
+FILES += core/dc_link_enc_cfg.o
+FILES += core/dc_link_exports.o
+FILES += core/dc_resource.o
+FILES += core/dc_sink.o
+FILES += core/dc_stat.o
+FILES += core/dc_state.o
+FILES += core/dc_stream.o
+FILES += core/dc_surface.o
+FILES += core/dc_vm_helper.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, $(FILES))
-DISPLAY_CORE += dc_vm_helper.o
-
-AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
-
-AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
-
-AMD_DC_SPL_TRANS = $(addprefix $(AMDDALPATH)/dc/,$(DC_SPL_TRANS))
-
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
-AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
-
-DC_DMUB += dc_dmub_srv.o
-DC_EDID += dc_edid_parser.o
-AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
-AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
-
-AMD_DISPLAY_FILES += $(AMD_DC_SPL_TRANS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 88d3f9d7dd55..452206b5095e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -51,8 +51,6 @@ static inline unsigned long long complete_integer_division_u64(
{
unsigned long long result;
- ASSERT(divisor);
-
result = div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -213,9 +211,6 @@ struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg)
* @note
* Good idea to use Newton's method
*/
-
- ASSERT(arg.value);
-
return dc_fixpt_from_fraction(
dc_fixpt_one.value,
arg.value);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 3bacf470f7c5..67f08495b7e6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2384,10 +2384,10 @@ static enum bp_result get_integrated_info_v8(
}
/*
- * get_integrated_info_v8
+ * get_integrated_info_v9
*
* @brief
- * Get V8 integrated BIOS information
+ * Get V9 integrated BIOS information
*
* @param
* bios_parser *bp - [in]BIOS parser handler to get master data table
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 19897fa52e7e..d82a52319088 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -142,17 +142,3 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
return actual_dispclk_set_mhz * 1000;
}
-
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
index 083cb3158859..81d7c912549c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
@@ -27,6 +27,5 @@
#define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 23b390245b5d..5a633333dbb5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -164,20 +164,6 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
return actual_dispclk_set_mhz * 1000;
}
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
-
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index 1ce19d875358..f76fad87f0e1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -30,7 +30,6 @@ enum dcn_pwr_state;
int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz);
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 2d14346b680e..478b4d6a3544 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -49,12 +49,9 @@ static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E0000
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+
+#define CTX clk_mgr->base.ctx
+#define IND_REG(offset) offset
#define regBIF_BX_PF2_RSMU_INDEX 0x0000
#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1
@@ -67,9 +64,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define FN(reg_name, field) \
FD(reg_name##__##field)
-#define REG_NBIO(reg_name) \
- (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
-
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
@@ -77,6 +71,13 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define mmMP1_C2PMSG_3 0x3B1050C
+#define reg__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define reg__MP1_C2PMSG_3__SHIFT (0)
+
+
+#define data_reg_name__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define data_reg_name__MP1_C2PMSG_3__SHIFT (0)
+
#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0
@@ -153,12 +154,10 @@ static int dcn315_smu_send_msg_with_param(
for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
/* Trigger the message transaction by writing the message ID */
- generic_write_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3, msg_id);
- read_back_data = generic_read_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3);
+ IX_REG_SET_SYNC(mmMP1_C2PMSG_3, 0,
+ MP1_C2PMSG_3, msg_id);
+ IX_REG_GET_SYNC(mmMP1_C2PMSG_3,
+ MP1_C2PMSG_3, &read_back_data);
if (read_back_data == msg_id)
break;
udelay(2);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
index 6a6ae618650b..4607eff07253 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
@@ -65,6 +65,7 @@
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
#define mmCLK5_spll_field_8 0x1B04B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 142de8938d7c..bb1ac12a2b09 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -90,6 +90,7 @@
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
#define mmCLK5_spll_field_8 0x1B24B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
@@ -116,6 +117,7 @@
#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+#define CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
#undef FN
@@ -596,7 +598,11 @@ static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
uint32_t ssc_enable;
- ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ if (clk_mgr_base->ctx->dce_version == DCN_VERSION_3_51) {
+ ssc_enable = REG_READ(CLK6_spll_field_8) & CLK6_spll_field_8__spll_ssc_en_MASK;
+ } else {
+ ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ }
return ssc_enable != 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index f6f0e6a33001..604d256cb47a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -84,8 +84,8 @@
#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
#define VBIOSSMC_MSG_SetDtbClk 0x17
-#define VBIOSSMC_MSG_DispPsrEntry 0x18 ///< Display PSR entry, DMU
-#define VBIOSSMC_MSG_DispPsrExit 0x19 ///< Display PSR exit, DMU
+#define VBIOSSMC_MSG_DispIPS2Entry 0x18 ///< Display IPS2 entry, DMU
+#define VBIOSSMC_MSG_DispIPS2Exit 0x19 ///< Display IPS2 exit, DMU
#define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS
#define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number
#define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported
@@ -475,7 +475,7 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
- VBIOSSMC_MSG_DispPsrExit,
+ VBIOSSMC_MSG_DispIPS2Exit,
0);
smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
return retv;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index ba4ce8a63158..56d011a1323c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -36,7 +36,9 @@
#include "resource.h"
#include "dc_state.h"
#include "dc_state_priv.h"
+#include "dc_plane.h"
#include "dc_plane_priv.h"
+#include "dc_stream_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -1195,6 +1197,12 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
+ if (dc->debug.visual_confirm & VISUAL_CONFIRM_EXPLICIT) {
+ memcpy(&pipe_ctx->visual_confirm_color, &pipe_ctx->plane_state->visual_confirm_color,
+ sizeof(pipe_ctx->visual_confirm_color));
+ return;
+ }
+
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1228,6 +1236,51 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
}
}
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color)
+{
+ struct dc_stream_status *stream_status = dc_stream_get_status(stream_state);
+ struct pipe_ctx *pipe_ctx;
+ int i;
+ struct dc_plane_state *plane_state = NULL;
+
+ if (!stream_status)
+ return;
+
+ switch (dc->debug.visual_confirm) {
+ case VISUAL_CONFIRM_DISABLE:
+ return;
+ case VISUAL_CONFIRM_PSR:
+ case VISUAL_CONFIRM_FAMS:
+ pipe_ctx = dc_stream_get_pipe_ctx(stream_state);
+ if (!pipe_ctx)
+ return;
+ dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx);
+ memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color));
+ return;
+
+ default:
+ /* find plane with highest layer_index */
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i]->visible)
+ plane_state = stream_status->plane_states[i];
+ }
+ if (!plane_state)
+ return;
+ /* find pipe that contains plane with highest layer index */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state == plane_state) {
+ memcpy(color, &pipe->visual_confirm_color, sizeof(struct tg_color));
+ return;
+ }
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -2056,6 +2109,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.enable_accelerated_mode(dc, context);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ //Only delay otg master for a given config
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ //dc_commit_state_no_check is always a full update
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false);
+ break;
+ }
+ }
+ }
+
if (context->stream_count > get_seamless_boot_stream_count(context) ||
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
@@ -2120,6 +2185,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe);
+ }
+ }
+
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
@@ -2261,11 +2334,15 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
for (i = 0; i < params->stream_count; i++) {
struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
+ struct dc_sink *sink = stream->sink;
/* revalidate streams */
- res = dc_validate_stream(dc, stream);
- if (res != DC_OK)
- return res;
+ if (!dc_is_virtual_signal(sink->sink_signal)) {
+ res = dc_validate_stream(dc, stream);
+ if (res != DC_OK)
+ return res;
+ }
+
dc_stream_log(dc, stream);
@@ -2818,7 +2895,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
int i;
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
- if (dc->idle_optimizations_allowed)
+ if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc))
overall_type = UPDATE_TYPE_FULL;
if (stream_status == NULL || stream_status->plane_count != surface_count)
@@ -3223,7 +3300,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (dsc_validate_context) {
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
- if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) {
stream->timing.dsc_cfg = old_dsc_cfg;
stream->timing.flags.DSC = old_dsc_enabled;
update->dsc_config = NULL;
@@ -3252,7 +3329,7 @@ static void backup_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- scratch->plane_states[i] = *status->plane_states[i];
+ dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]);
}
scratch->stream_state = *stream;
}
@@ -3268,10 +3345,7 @@ static void restore_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- /* refcount will always be valid, restore everything else */
- struct kref refcount = status->plane_states[i]->refcount;
- *status->plane_states[i] = scratch->plane_states[i];
- status->plane_states[i]->refcount = refcount;
+ dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]);
}
*stream = scratch->stream_state;
}
@@ -3448,7 +3522,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -4002,6 +4076,7 @@ static void commit_planes_for_stream(struct dc *dc,
&context->res_ctx,
stream);
ASSERT(top_pipe_to_program != NULL);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4052,6 +4127,9 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
@@ -4172,12 +4250,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FAST)
continue;
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
- if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
- /*turn off triple buffer for full update*/
- dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
- }
stream_status =
stream_get_status(context, pipe_ctx->stream);
@@ -4186,8 +4258,37 @@ static void commit_planes_for_stream(struct dc *dc,
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ //Pipe busy until some frame and line #
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx);
+ }
+ }
+
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
@@ -4527,7 +4628,7 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) {
/* prevent underflow and corruption when reconfiguring pipes */
force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
@@ -4962,6 +5063,9 @@ static bool full_update_required(struct dc *dc,
if (dc->idle_optimizations_allowed)
return true;
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
return false;
}
@@ -5047,7 +5151,7 @@ static bool update_planes_and_stream_v1(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
if (update_type >= UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_state_release(context);
return false;
@@ -6191,15 +6295,22 @@ bool dc_abm_save_restore(
void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
{
unsigned int i;
- bool subvp_sw_cursor_req = false;
+ unsigned int max_cursor_size = dc->caps.max_cursor_size;
+ unsigned int stream_cursor_size;
- for (i = 0; i < dc->current_state->stream_count; i++) {
- if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) {
- subvp_sw_cursor_req = true;
- break;
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc,
+ dc->current_state,
+ dc->current_state->streams[i]);
+
+ if (stream_cursor_size < max_cursor_size) {
+ max_cursor_size = stream_cursor_size;
+ }
}
}
- properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
+
+ properties->cursor_size_limit = max_cursor_size;
}
/**
@@ -6265,3 +6376,27 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
else
return 0;
}
+
+bool dc_is_cursor_limit_pending(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i]))
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_can_clear_cursor_limit(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 650e89825968..7551d0a3fe82 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -266,6 +266,8 @@ char *dc_status_to_str(enum dc_status status)
return "Fail dp payload allocation";
case DC_FAIL_DP_LINK_BANDWIDTH:
return "Insufficient DP link bandwidth";
+ case DC_FAIL_HW_CURSOR_SUPPORT:
+ return "HW Cursor not supported";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 55b32dfbfdd6..7014b8d000bb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -697,7 +697,7 @@ void get_fams2_visual_confirm_color(
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
@@ -896,7 +896,7 @@ void hwss_build_fast_sequence(struct dc *dc,
}
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps)
{
unsigned int i;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 313a32248cd7..3da25bd8b578 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1342,32 +1342,6 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
data->viewport_c.y += src.y / vpc_div;
}
-static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
-{
- uint32_t refresh_rate;
- struct dc *dc = stream->ctx->dc;
-
- refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
- stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
- refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
- refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
-
- /* If there's any stream that fits the SubVP high refresh criteria,
- * we must return true. This is because cursor updates are asynchronous
- * with full updates, so we could transition into a SubVP config and
- * remain in HW cursor mode if there's no cursor update which will
- * then cause corruption.
- */
- if ((refresh_rate >= 120 && refresh_rate <= 175 &&
- stream->timing.v_addressable >= 1080 &&
- stream->timing.v_addressable <= 2160) &&
- (dc->current_state->stream_count > 1 ||
- (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
- return true;
-
- return false;
-}
-
static enum controller_dp_test_pattern convert_dp_to_controller_test_pattern(
enum dp_test_pattern test_pattern)
{
@@ -3937,6 +3911,10 @@ enum dc_status resource_map_pool_resources(
if (!dc->link_srv->dp_decide_link_settings(stream,
&pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
+
+ dc->link_srv->dp_decide_tunnel_settings(stream,
+ &pipe_ctx->link_config.dp_tunnel_settings);
+
if (dc->link_srv->dp_get_encoding_format(
&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
@@ -4259,6 +4237,11 @@ enum dc_status dc_validate_with_context(struct dc *dc,
}
}
+ /* clear subvp cursor limitations */
+ for (i = 0; i < context->stream_count; i++) {
+ dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false);
+ }
+
res = dc_validate_global_state(dc, context, fast_validate);
/* calculate pixel rate divider after deciding pxiel clock & odm combine */
@@ -4385,8 +4368,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
- result = DC_FAIL_BANDWIDTH_VALIDATE;
+ result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate);
return result;
}
@@ -5538,23 +5520,17 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
return DC_OK;
}
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
+struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
{
- if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
- return true;
- if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
- else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
-
- return false;
+ return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
}
-struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
+static bool resource_allocate_mcache(struct dc_state *context, const struct dc_mcache_params *mcache_params)
{
- return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+ if (context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config)
+ context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config(context, mcache_params);
+
+ return true;
}
void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
@@ -5576,6 +5552,7 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
dml2_options->callbacks.get_max_flickerless_instant_vtotal_increase = &dc_stream_get_max_flickerless_instant_vtotal_increase;
+ dml2_options->callbacks.allocate_mcache = &resource_allocate_mcache;
dml2_options->svp_pstate.callbacks.dc = dc;
dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 1b2cce127981..4db7383720fd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -22,6 +22,7 @@
* Authors: AMD
*
*/
+#include "dc_types.h"
#include "core_types.h"
#include "core_status.h"
#include "dc_state.h"
@@ -812,8 +813,12 @@ enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
if (phantom_stream_status) {
phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+ phantom_stream_status->mall_stream_config.subvp_limit_cursor_size = false;
+ phantom_stream_status->mall_stream_config.cursor_size_limit_subvp = false;
}
+ dc_state_set_stream_subvp_cursor_limit(main_stream, state, true);
+
return res;
}
@@ -939,13 +944,20 @@ void dc_state_release_phantom_streams_and_planes(
const struct dc *dc,
struct dc_state *state)
{
+ unsigned int phantom_count;
+ struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES];
+ struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES];
int i;
- for (i = 0; i < state->phantom_stream_count; i++)
- dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
+ phantom_count = state->phantom_stream_count;
+ memcpy(phantom_streams, state->phantom_streams, sizeof(struct dc_stream_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_stream(dc, state, phantom_streams[i]);
- for (i = 0; i < state->phantom_plane_count; i++)
- dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
+ phantom_count = state->phantom_plane_count;
+ memcpy(phantom_planes, state->phantom_planes, sizeof(struct dc_plane_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_plane(dc, state, phantom_planes[i]);
}
struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id)
@@ -977,3 +989,94 @@ bool dc_state_is_fams2_in_use(
return is_fams2_in_use;
}
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.subvp_limit_cursor_size = limit;
+ }
+}
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.subvp_limit_cursor_size;
+ }
+
+ return limit;
+}
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.cursor_size_limit_subvp = limit;
+ }
+}
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.cursor_size_limit_subvp;
+ }
+
+ return limit;
+}
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool can_clear_limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, state) &&
+ (stream_status->mall_stream_config.type == SUBVP_PHANTOM ||
+ stream->hw_cursor_req ||
+ !stream_status->mall_stream_config.subvp_limit_cursor_size ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, state, &stream->cursor_attributes));
+ }
+
+ return can_clear_limit;
+}
+
+bool dc_state_is_subvp_in_use(struct dc_state *state)
+{
+ uint32_t i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (dc_state_get_stream_subvp_type(state, state->streams[i]) != SUBVP_NONE)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 0478dd856d8c..b883fb24fa12 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -265,13 +265,16 @@ void program_cursor_attributes(
}
/*
- * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ * dc_stream_check_cursor_attributes() - Check validitity of cursor attributes and surface address
*/
-bool dc_stream_set_cursor_attributes(
- struct dc_stream_state *stream,
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
const struct dc_cursor_attributes *attributes)
{
- struct dc *dc;
+ const struct dc *dc;
+
+ unsigned int max_cursor_size;
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -289,24 +292,38 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4.
- * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case:
- * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs)
- * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
- * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
+ /* SubVP is not compatible with HW cursor larger than what can fit in cursor SRAM.
+ * Therefore, if cursor is greater than this, fallback to SW cursor.
*/
- if (dc->debug.allow_sw_cursor_fallback &&
- attributes->height * attributes->width * 4 > 16384 &&
- !stream->hw_cursor_req) {
- if (check_subvp_sw_cursor_fallback_req(dc, stream))
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ max_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, state, stream);
+ max_cursor_size = max_cursor_size * max_cursor_size * 4;
+
+ if (attributes->height * attributes->width * 4 > max_cursor_size) {
return false;
+ }
}
- stream->cursor_attributes = *attributes;
-
return true;
}
+/*
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ bool result = false;
+
+ if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
+ stream->cursor_attributes = *attributes;
+ result = true;
+ }
+
+ return result;
+}
+
bool dc_stream_program_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes)
@@ -552,6 +569,14 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,
return true;
}
+/**
+ * dc_stream_remove_writeback() - Disables writeback and removes writeback info.
+ * @dc: Display core control structure.
+ * @stream: Display core stream state.
+ * @dwb_pipe_inst: Display writeback pipe.
+ *
+ * Return: returns true on success, false otherwise.
+ */
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
@@ -1109,3 +1134,26 @@ unsigned int dc_stream_get_max_flickerless_instant_vtotal_increase(struct dc_str
return dc_stream_get_max_flickerless_instant_vtotal_delta(stream, is_gaming, false);
}
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool is_limit_pending = false;
+
+ if (dc->current_state)
+ is_limit_pending = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state);
+
+ return is_limit_pending;
+}
+
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool can_clear_limit = false;
+
+ if (dc->current_state)
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state) &&
+ (stream->hw_cursor_req ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, dc->current_state, &stream->cursor_attributes));
+
+ return can_clear_limit;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index e6fcc21bb9bc..922f23557f5d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -109,7 +109,8 @@ struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
*****************************************************************************
*/
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state)
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags)
{
const struct dc_plane_status *plane_status;
struct dc *dc;
@@ -136,7 +137,7 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- if (pipe_ctx->plane_state)
+ if (pipe_ctx->plane_state && flags.bits.address)
pipe_ctx->plane_state->status.is_flip_pending = false;
break;
@@ -151,7 +152,8 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- dc->hwss.update_pending_status(pipe_ctx);
+ if (flags.bits.address)
+ dc->hwss.update_pending_status(pipe_ctx);
}
return plane_status;
@@ -294,3 +296,17 @@ void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
dc->hwss.clear_surface_dcc_and_tiling(pipe_ctx, plane_state, clear_tiling);
}
}
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src)
+{
+ struct kref temp_refcount;
+
+ /* backup persistent info */
+ memcpy(&temp_refcount, &dst->refcount, sizeof(struct kref));
+
+ /* copy all configuration information */
+ memcpy(dst, src, sizeof(struct dc_plane_state));
+
+ /* restore persistent info */
+ memcpy(&dst->refcount, &temp_refcount, sizeof(struct kref));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7c2ee0526926..1d917be36fc4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -53,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.325"
+#define DC_VER "3.2.334"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -249,6 +249,7 @@ struct dc_caps {
uint32_t i2c_speed_in_khz_hdcp;
uint32_t dmdata_alloc_size;
unsigned int max_cursor_size;
+ unsigned int max_buffered_cursor_size;
unsigned int max_video_width;
/*
* max video plane width that can be safely assumed to be always
@@ -282,6 +283,7 @@ struct dc_caps {
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+ bool fused_io_supported;
uint32_t max_otg_num;
uint32_t max_cab_allocation_bytes;
uint32_t cache_line_size;
@@ -447,6 +449,7 @@ struct dc_config {
bool enable_windowed_mpo_odm;
bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520
uint32_t allow_edp_hotplug_detection;
+ bool skip_riommu_prefetch_wa;
bool clamp_min_dcfclk;
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
@@ -496,6 +499,7 @@ enum visual_confirm {
VISUAL_CONFIRM_HW_CURSOR = 20,
VISUAL_CONFIRM_VABC = 21,
VISUAL_CONFIRM_DCC = 22,
+ VISUAL_CONFIRM_EXPLICIT = 0x80000000,
};
enum dc_psr_power_opts {
@@ -902,6 +906,9 @@ struct dc_debug_options {
bool voltage_align_fclk;
bool disable_min_fclk;
+ bool hdcp_lc_force_fw_enable;
+ bool hdcp_lc_enable_sw_fallback;
+
bool disable_dfs_bypass;
bool disable_dpp_power_gate;
bool disable_hubp_power_gate;
@@ -1418,6 +1425,171 @@ struct dc_scratch_space {
struct dc_stream_state stream_state;
};
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+ struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ enum dc_irq_source irq_source_read_request;/* Read Request */
+
+ bool is_hpd_filter_disabled;
+ bool dp_ss_off;
+
+ /**
+ * @link_state_valid:
+ *
+ * If there is no link and local sink, this variable should be set to
+ * false. Otherwise, it should be set to true; usually, the function
+ * core_link_enable_stream sets this field to true.
+ */
+ bool link_state_valid;
+ bool aux_access_disabled;
+ bool sync_lt_in_progress;
+ bool skip_stream_reenable;
+ bool is_internal_display;
+ /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
+ bool is_dig_mapping_flexible;
+ bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
+
+ /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
+ * for every link training. This is incompatible with DP LL compliance automation,
+ * which expects the same link settings to be used every retry on a link loss.
+ * This flag is used to skip the fallback when link loss occurs during automation.
+ */
+ bool skip_fallback_on_link_loss;
+
+ bool edp_sink_present;
+
+ struct dp_trace dp_trace;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
+ struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
+ struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+ /* DIG link encoder ID. Used as index in link encoder resource pool.
+ * For links with fixed mapping to DIG, this is not changed after dc_link
+ * object creation.
+ */
+ enum engine_id eng_id;
+ enum engine_id dpia_preferred_eng_id;
+
+ bool test_pattern_enabled;
+ /* Pending/Current test pattern are only used to perform and track
+ * FIXED_VS retimer test pattern/lane adjustment override state.
+ * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
+ * to perform specific lane adjust overrides before setting certain
+ * PHY test patterns. In cases when lane adjust and set test pattern
+ * calls are not performed atomically (i.e. performing link training),
+ * pending_test_pattern will be invalid or contain a non-PHY test pattern
+ * and current_test_pattern will contain required context for any future
+ * set pattern/set lane adjust to transition between override state(s).
+ * */
+ enum dp_test_pattern current_test_pattern;
+ enum dp_test_pattern pending_test_pattern;
+
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ enum dp_panel_mode panel_mode;
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct panel_cntl *panel_cntl;
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ /* Endpoint type distinguishes display endpoints which do not have entries
+ * in the BIOS connector table from those that do. Helps when tracking link
+ * encoder to display endpoint assignments.
+ */
+ enum display_endpoint_type ep_type;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ uint32_t dongle_max_pix_clk;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ struct hdcp_caps hdcp_caps;
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+ struct psr_settings psr_settings;
+ struct replay_settings replay_settings;
+
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
+ bool dp_skip_fs_144hz;
+ bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
+ /* Forced DPIA into TBT3 compatibility mode. */
+ bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
+ bool blank_stream_on_ocs_change;
+ bool read_dpcd204h_on_irq_hpd;
+ bool force_dp_ffe_preset;
+ } wa_flags;
+ union dc_dp_ffe_preset forced_dp_ffe_preset;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+ struct dprx_states dprx_states;
+
+ struct gpio *hpd_gpio;
+ enum dc_link_fec_state fec_state;
+ bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
+
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
+ uint32_t phy_transition_bitmask;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
+ bool skip_implict_edp_power_control;
+ enum backlight_control_type backlight_control_type;
+};
+
struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
@@ -1485,6 +1657,7 @@ struct dc {
struct dc_scratch_space current_state;
struct dc_scratch_space new_state;
struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ struct dc_link temp_link;
bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */
} scratch;
@@ -1651,170 +1824,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
const enum dc_link_encoding_format link_encoding);
/* Link Interfaces */
-/*
- * A link contains one or more sinks and their connected status.
- * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
- */
-struct dc_link {
- struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
- unsigned int sink_count;
- struct dc_sink *local_sink;
- unsigned int link_index;
- enum dc_connection_type type;
- enum signal_type connector_signal;
- enum dc_irq_source irq_source_hpd;
- enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
-
- bool is_hpd_filter_disabled;
- bool dp_ss_off;
-
- /**
- * @link_state_valid:
- *
- * If there is no link and local sink, this variable should be set to
- * false. Otherwise, it should be set to true; usually, the function
- * core_link_enable_stream sets this field to true.
- */
- bool link_state_valid;
- bool aux_access_disabled;
- bool sync_lt_in_progress;
- bool skip_stream_reenable;
- bool is_internal_display;
- /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
- bool is_dig_mapping_flexible;
- bool hpd_status; /* HPD status of link without physical HPD pin. */
- bool is_hpd_pending; /* Indicates a new received hpd */
-
- /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
- * for every link training. This is incompatible with DP LL compliance automation,
- * which expects the same link settings to be used every retry on a link loss.
- * This flag is used to skip the fallback when link loss occurs during automation.
- */
- bool skip_fallback_on_link_loss;
-
- bool edp_sink_present;
-
- struct dp_trace dp_trace;
-
- /* caps is the same as reported_link_cap. link_traing use
- * reported_link_cap. Will clean up. TODO
- */
- struct dc_link_settings reported_link_cap;
- struct dc_link_settings verified_link_cap;
- struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
- struct dc_link_settings preferred_link_setting;
- /* preferred_training_settings are override values that
- * come from DM. DM is responsible for the memory
- * management of the override pointers.
- */
- struct dc_link_training_overrides preferred_training_settings;
- struct dp_audio_test_data audio_test_data;
-
- uint8_t ddc_hw_inst;
-
- uint8_t hpd_src;
-
- uint8_t link_enc_hw_inst;
- /* DIG link encoder ID. Used as index in link encoder resource pool.
- * For links with fixed mapping to DIG, this is not changed after dc_link
- * object creation.
- */
- enum engine_id eng_id;
- enum engine_id dpia_preferred_eng_id;
-
- bool test_pattern_enabled;
- /* Pending/Current test pattern are only used to perform and track
- * FIXED_VS retimer test pattern/lane adjustment override state.
- * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
- * to perform specific lane adjust overrides before setting certain
- * PHY test patterns. In cases when lane adjust and set test pattern
- * calls are not performed atomically (i.e. performing link training),
- * pending_test_pattern will be invalid or contain a non-PHY test pattern
- * and current_test_pattern will contain required context for any future
- * set pattern/set lane adjust to transition between override state(s).
- * */
- enum dp_test_pattern current_test_pattern;
- enum dp_test_pattern pending_test_pattern;
-
- union compliance_test_state compliance_test_state;
-
- void *priv;
-
- struct ddc_service *ddc;
-
- enum dp_panel_mode panel_mode;
- bool aux_mode;
-
- /* Private to DC core */
-
- const struct dc *dc;
-
- struct dc_context *ctx;
-
- struct panel_cntl *panel_cntl;
- struct link_encoder *link_enc;
- struct graphics_object_id link_id;
- /* Endpoint type distinguishes display endpoints which do not have entries
- * in the BIOS connector table from those that do. Helps when tracking link
- * encoder to display endpoint assignments.
- */
- enum display_endpoint_type ep_type;
- union ddi_channel_mapping ddi_channel_mapping;
- struct connector_device_tag_info device_tag;
- struct dpcd_caps dpcd_caps;
- uint32_t dongle_max_pix_clk;
- unsigned short chip_caps;
- unsigned int dpcd_sink_count;
- struct hdcp_caps hdcp_caps;
- enum edp_revision edp_revision;
- union dpcd_sink_ext_caps dpcd_sink_ext_caps;
-
- struct psr_settings psr_settings;
- struct replay_settings replay_settings;
-
- /* Drive settings read from integrated info table */
- struct dc_lane_settings bios_forced_drive_settings;
-
- /* Vendor specific LTTPR workaround variables */
- uint8_t vendor_specific_lttpr_link_rate_wa;
- bool apply_vendor_specific_lttpr_link_rate_wa;
-
- /* MST record stream using this link */
- struct link_flags {
- bool dp_keep_receiver_powered;
- bool dp_skip_DID2;
- bool dp_skip_reset_segment;
- bool dp_skip_fs_144hz;
- bool dp_mot_reset_segment;
- /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
- bool dpia_mst_dsc_always_on;
- /* Forced DPIA into TBT3 compatibility mode. */
- bool dpia_forced_tbt3_mode;
- bool dongle_mode_timing_override;
- bool blank_stream_on_ocs_change;
- bool read_dpcd204h_on_irq_hpd;
- bool force_dp_ffe_preset;
- } wa_flags;
- union dc_dp_ffe_preset forced_dp_ffe_preset;
- struct link_mst_stream_allocation_table mst_stream_alloc_table;
-
- struct dc_link_status link_status;
- struct dprx_states dprx_states;
-
- struct gpio *hpd_gpio;
- enum dc_link_fec_state fec_state;
- bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
-
- struct dc_panel_config panel_config;
- struct phy_state phy_state;
- uint32_t phy_transition_bitmask;
- // BW ALLOCATON USB4 ONLY
- struct dc_dpia_bw_alloc dpia_bw_alloc_config;
- bool skip_implict_edp_power_control;
- enum backlight_control_type backlight_control_type;
-};
-
/* Return an enumerated dc_link.
* dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
@@ -2589,10 +2598,18 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
/* DSC Interfaces */
#include "dc_dsc.h"
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color);
+
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
+bool dc_is_cursor_limit_pending(struct dc *dc);
+bool dc_can_clear_cursor_limit(struct dc *dc);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 614e03bfd598..afbcf866520e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -39,6 +39,7 @@
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
+#define GPINT_RETRY_NUM 20
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
@@ -70,20 +71,28 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
}
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dmub_srv *dmub;
+ struct dc_context *dc_ctx;
enum dmub_status status;
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_pending(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
+
+ return status == DMUB_STATUS_OK;
}
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
@@ -126,7 +135,49 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
}
}
-bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ struct dc_context *dc_ctx;
+ struct dmub_srv *dmub;
+ enum dmub_status status = DMUB_STATUS_OK;
+ int i;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
+ for (i = 0 ; i < count; i++) {
+ /* confirm no messages pending */
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
+ /* queue command */
+ if (status == DMUB_STATUS_OK)
+ status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
+
+ /* check for errors */
+ if (status != DMUB_STATUS_OK) {
+ break;
+ }
+ }
+
+ if (status != DMUB_STATUS_OK) {
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
@@ -143,20 +194,25 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
for (i = 0 ; i < count; i++) {
// Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
+ dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
+ } else {
+ status = DMUB_STATUS_QUEUE_FULL;
+ }
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
}
if (status != DMUB_STATUS_OK) {
@@ -168,7 +224,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
}
}
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
@@ -180,6 +236,26 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
return true;
}
+bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ bool res = false;
+
+ if (dc_dmub_srv && dc_dmub_srv->dmub) {
+ if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
+ res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ } else {
+ res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ }
+
+ if (res)
+ res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK;
+ }
+
+ return res;
+}
+
bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
enum dm_dmub_wait_type wait_type,
union dmub_rb_cmd *cmd_list)
@@ -202,7 +278,8 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
if (!dmub->debug.timeout_info.timeout_occured) {
dmub->debug.timeout_info.timeout_occured = true;
- dmub->debug.timeout_info.timeout_cmd = *cmd_list;
+ if (cmd_list)
+ dmub->debug.timeout_info.timeout_cmd = *cmd_list;
dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
}
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
@@ -210,8 +287,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
}
// Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
+ if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
+ dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
+ }
}
return true;
@@ -224,74 +302,10 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd
bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
{
- struct dc_context *dc_ctx;
- struct dmub_srv *dmub;
- enum dmub_status status;
- int i;
-
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
- return false;
-
- dc_ctx = dc_dmub_srv->ctx;
- dmub = dc_dmub_srv->dmub;
-
- for (i = 0 ; i < count; i++) {
- // Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
-
- if (status == DMUB_STATUS_QUEUE_FULL) {
- /* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
- if (status == DMUB_STATUS_POWER_STATE_D3)
- return false;
-
- status = dmub_srv_wait_for_idle(dmub, 100000);
- if (status != DMUB_STATUS_OK)
- return false;
-
- /* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
- }
-
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
- return false;
- }
- }
-
- status = dmub_srv_cmd_execute(dmub);
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
+ if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
return false;
- }
- // Wait for DMUB to process command
- if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
- do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
- } while (status != DMUB_STATUS_OK);
- } else
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK) {
- DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- return false;
- }
-
- // Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
- }
-
- return true;
+ return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
}
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
@@ -1243,7 +1257,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
memset(&new_signals, 0, sizeof(new_signals));
@@ -1355,14 +1369,15 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
udelay(dc->debug.ips2_eval_delay_us);
- if (ips_fw->signals.bits.ips2_commit) {
- DC_LOG_IPS(
- "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
- ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ DC_LOG_IPS(
+ "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
- // Tell PMFW to exit low power state
- dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ // Tell PMFW to exit low power state
+ dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+
+ if (ips_fw->signals.bits.ips2_commit) {
DC_LOG_IPS(
"wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
@@ -1400,7 +1415,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
+ dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
}
}
@@ -1654,7 +1669,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ global_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
if (enable) {
/* send global configuration parameters */
@@ -1673,11 +1689,13 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* configure command header */
stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_base_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_base_cmd->header.multi_cmd_pending = 1;
stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_sub_state_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_sub_state_cmd->header.multi_cmd_pending = 1;
/* copy stream static base state */
memcpy(&stream_base_cmd->config,
@@ -1723,7 +1741,8 @@ void dc_dmub_srv_fams2_drr_update(struct dc *dc,
cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
- cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
+ cmd.fams2_drr_update.header.payload_bytes =
+ sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -1759,7 +1778,8 @@ void dc_dmub_srv_fams2_passthrough_flip(
/* build command header */
cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
- cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
+ cmds[num_cmds].fams2_flip.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
/* for chaining multiple commands, all but last command should set to 1 */
cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
@@ -1869,11 +1889,14 @@ void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struc
if (command_code == DMUB_GPINT__INVALID_COMMAND)
return;
- // send gpint commands and wait for ack
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
- (uint16_t)(output->ips_mode),
- &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->residency_percent = 0;
+ for (i = 0; i < GPINT_RETRY_NUM; i++) {
+ // false could mean GPINT timeout, in which case we should retry
+ if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
+ (uint16_t)(output->ips_mode), &output->residency_percent,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ break;
+ udelay(100);
+ }
if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
(uint16_t)(output->ips_mode),
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index a636f4c3f01d..ada5c2fb2db3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -58,7 +58,7 @@ struct dc_dmub_srv {
bool needs_idle_wake;
};
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 77c87ad57220..0bad8304ccf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -159,6 +159,11 @@ struct dc_link_settings {
uint8_t link_rate_set;
};
+struct dc_tunnel_settings {
+ bool should_enable_dp_tunneling;
+ bool should_use_dp_bw_allocation;
+};
+
union dc_dp_ffe_preset {
struct {
uint8_t level : 4;
@@ -943,10 +948,20 @@ union dpia_info {
uint8_t raw;
};
+/* DPCD[0xE0020] USB4_DRIVER_BW_CAPABILITY register. */
+union usb4_driver_bw_cap {
+ struct {
+ uint8_t rsvd :7;
+ uint8_t driver_bw_alloc_support :1;
+ } bits;
+ uint8_t raw;
+};
+
/* DP Tunneling over USB4 */
struct dpcd_usb4_dp_tunneling_info {
union dp_tun_cap_support dp_tun_cap;
union dpia_info dpia_info;
+ union usb4_driver_bw_cap driver_bw_cap;
uint8_t usb4_driver_id;
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
@@ -1486,5 +1501,11 @@ struct dp_trace {
# ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED
# define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3)
# endif
+# ifndef DPTX_BW_ALLOC_UNMASK_IRQ
+# define DPTX_BW_ALLOC_UNMASK_IRQ (1 << 6)
+# endif
+# ifndef DPTX_BW_ALLOC_MODE_ENABLE
+# define DPTX_BW_ALLOC_MODE_ENABLE (1 << 7)
+# endif
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.c b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
new file mode 100644
index 000000000000..fee69642fb93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dc_fused_io.h"
+
+#include "dm_helpers.h"
+#include "gpio.h"
+
+static bool op_i2c_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_i2c *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line,
+ bool over_aux
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_i2c *loc = &req->u.i2c;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = false;
+ loc->ddc_line = ddc_line;
+ loc->over_aux = over_aux;
+ loc->address = op->address;
+ loc->offset = op->offset;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool op_aux_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_aux *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_aux *loc = &req->u.aux;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = true;
+ loc->ddc_line = ddc_line;
+ loc->address = op->address;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool atomic_write_poll_read(
+ struct dc_link *link,
+ union dmub_rb_cmd commands[3],
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ const uint8_t count = 3;
+ const uint32_t timeout_per_request_us = 10000;
+ const uint32_t timeout_per_aux_transaction_us = 10000;
+ uint64_t timeout_us = 0;
+
+ commands[1].fused_io.request.poll_mask_msb = poll_mask_msb;
+ commands[1].fused_io.request.timeout_us = poll_timeout_us;
+
+ for (uint8_t i = 0; i < count; i++) {
+ struct dmub_rb_cmd_fused_io *io = &commands[i].fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_EXECUTE;
+ io->header.multi_cmd_pending = i != count - 1;
+ io->header.payload_bytes = sizeof(commands[i].fused_io) - sizeof(io->header);
+
+ timeout_us += timeout_per_request_us + io->request.timeout_us;
+ if (!io->request.timeout_us && io->request.u.aux.is_aux)
+ timeout_us += timeout_per_aux_transaction_us * (io->request.u.aux.length / 16);
+ }
+
+ if (!dm_helpers_execute_fused_io(link->ctx, link, commands, count, timeout_us))
+ return false;
+
+ return commands[0].fused_io.request.status == FUSED_REQUEST_STATUS_SUCCESS;
+}
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const bool over_aux = false;
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_i2c_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line, over_aux)
+ && op_i2c_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line, over_aux)
+ && op_i2c_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line, over_aux);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_aux_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line)
+ && op_aux_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line)
+ && op_aux_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.h b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
new file mode 100644
index 000000000000..c74917240985
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __DC_FUSED_IO_H__
+#define __DC_FUSED_IO_H__
+
+#include "dc.h"
+#include "mod_hdcp.h"
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+#endif // __DC_FUSED_IO_H__
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 8f077e15b4f0..7217de258851 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -682,13 +682,19 @@ void reg_sequence_wait_done(const struct dc_context *ctx)
if (offload &&
ctx->dc->debug.dmub_offload_enabled &&
!ctx->dc->debug.dmcub_emulation) {
- dc_dmub_srv_wait_idle(ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
}
}
char *dce_version_to_string(const int version)
{
switch (version) {
+ case DCE_VERSION_6_0:
+ return "DCE 6.0";
+ case DCE_VERSION_6_1:
+ return "DCE 6.1";
+ case DCE_VERSION_6_4:
+ return "DCE 6.4";
case DCE_VERSION_8_0:
return "DCE 8.0";
case DCE_VERSION_8_1:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index e9413685ed4f..14feb843e694 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -28,13 +28,24 @@
#include "dc_hw_types.h"
+union dc_plane_status_update_flags {
+ struct {
+ uint32_t address : 1;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state);
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags);
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
bool clear_tiling);
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src);
+
#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
index 1a12ef579ff4..1d9bae56ff6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -105,4 +105,24 @@ bool dc_state_is_fams2_in_use(
const struct dc *dc,
const struct dc_state *state);
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_is_subvp_in_use(struct dc_state *state);
+
#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e0bfddaa23e3..341d2ffb64b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -44,6 +44,8 @@ struct mall_stream_config {
*/
enum mall_stream_type type;
struct dc_stream_state *paired_stream; // master / slave stream
+ bool subvp_limit_cursor_size; /* stream has/is using subvp limiting hw cursor support */
+ bool cursor_size_limit_subvp; /* stream is using hw cursor config preventing subvp */
};
struct dc_stream_status {
@@ -503,6 +505,11 @@ void program_cursor_position(
struct dc *dc,
struct dc_stream_state *stream);
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
+ const struct dc_cursor_attributes *attributes);
+
bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes);
@@ -579,4 +586,8 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates,
struct dc_state *context);
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream);
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream);
+
#endif /* DC_STREAM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 83ffaae9f439..a4cd0eb39a3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -210,6 +210,7 @@ struct dc_edid_caps {
bool edid_hdmi;
bool hdr_supported;
+ bool rr_capable;
struct dc_panel_patch panel_patch;
};
@@ -1089,7 +1090,8 @@ union replay_low_refresh_rate_enable_options {
struct {
//BIT[0-3]: Replay Low Hz Support control
unsigned int ENABLE_LOW_RR_SUPPORT :1;
- unsigned int RESERVED_1_3 :3;
+ unsigned int SKIP_ASIC_CHECK :1;
+ unsigned int RESERVED_2_3 :2;
//BIT[4-15]: Replay Low Hz Enable Scenarios
unsigned int ENABLE_STATIC_SCREEN :1;
unsigned int ENABLE_FULL_SCREEN_VIDEO :1;
@@ -1129,6 +1131,10 @@ struct replay_config {
union replay_low_refresh_rate_enable_options low_rr_enable_options;
/* Replay coasting vtotal is within low refresh rate range. */
bool low_rr_activated;
+ /* Replay low refresh rate supported*/
+ bool low_rr_supported;
+ /* Replay Video Conferencing Optimization Enabled */
+ bool replay_video_conferencing_optimization_enabled;
};
/* Replay feature flags*/
@@ -1249,6 +1255,7 @@ enum dc_cm2_gpu_mem_layout {
enum dc_cm2_gpu_mem_pixel_component_order {
DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA,
+ DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA
};
enum dc_cm2_gpu_mem_format {
@@ -1270,7 +1277,8 @@ struct dc_cm2_gpu_mem_format_parameters {
enum dc_cm2_gpu_mem_size {
DC_CM2_GPU_MEM_SIZE_171717,
- DC_CM2_GPU_MEM_SIZE_TRANSFORMED
+ DC_CM2_GPU_MEM_SIZE_333333,
+ DC_CM2_GPU_MEM_SIZE_TRANSFORMED,
};
struct dc_cm2_gpu_mem_parameters {
@@ -1279,6 +1287,7 @@ struct dc_cm2_gpu_mem_parameters {
struct dc_cm2_gpu_mem_format_parameters format_params;
enum dc_cm2_gpu_mem_pixel_component_order component_order;
enum dc_cm2_gpu_mem_size size;
+ uint16_t bit_depth;
};
enum dc_cm2_transfer_func_source {
@@ -1302,6 +1311,10 @@ struct dc_cm2_func_luts {
const struct dc_3dlut *lut3d_func;
struct dc_cm2_gpu_mem_parameters gpu_mem_params;
};
+ bool rmcm_3dlut_shaper_select;
+ bool mpc_3dlut_enable;
+ bool rmcm_3dlut_enable;
+ bool mpc_mcm_post_blend;
} lut3d_data;
const struct dc_transfer_func *lut1d_func;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index b363f5360818..58c84f555c0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -391,6 +391,7 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg,
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
return;
@@ -411,6 +412,8 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg,
BREAK_TO_DEBUGGER();
break;
}
+ //DC_LOG_DEBUG("%s: inst(%d) DPPCLK rcg_disable: %d\n", __func__, inst, enable ? 0 : 1);
+
}
static void dccg35_set_dpstreamclk_rcg(
@@ -1035,6 +1038,7 @@ static void dccg35_enable_dpp_clk_new(
DPPCLK0_DTO_MODULO, 0xFF);
}
+
static void dccg35_disable_dpp_clk_new(
struct dccg *dccg,
int inst)
@@ -1112,30 +1116,24 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
switch (dpp_inst) {
case 0:
REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
break;
case 1:
REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
break;
case 2:
REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
break;
case 3:
REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
break;
default:
break;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
}
@@ -1163,14 +1161,18 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
ASSERT(false);
phase = 0xff;
}
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, phase,
DPPCLK0_DTO_MODULO, modulo);
dcn35_set_dppclk_enable(dccg, dpp_inst, true);
- } else
+ } else {
dcn35_set_dppclk_enable(dccg, dpp_inst, false);
+ /*we have this in hwss: disable_plane*/
+ //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ }
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
@@ -1182,6 +1184,7 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
return;
+
switch (dpp_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
@@ -1198,6 +1201,8 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
default:
break;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) rcg: %d\n", __func__, dpp_inst, enable);
+
}
static void dccg35_get_pixel_rate_div(
@@ -1521,28 +1526,30 @@ static void dccg35_set_physymclk_root_clock_gating(
switch (phy_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 1:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 2:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 3:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 4:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1);
+
}
static void dccg35_set_physymclk(
@@ -1643,6 +1650,8 @@ static void dccg35_dpp_root_clock_control(
return;
if (clock_on) {
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
+
/* turn off the DTO and leave phase/modulo at max */
dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
@@ -1654,6 +1663,8 @@ static void dccg35_dpp_root_clock_control(
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
+ /*we have this in hwss: disable_plane*/
+ //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
@@ -1771,36 +1782,40 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
//Disable DTO
switch (inst) {
case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
break;
case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
break;
case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
break;
case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -1813,9 +1828,6 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- return;
-
switch (inst) {
case 0:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 077337698e0a..b4f5b4a6331a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,11 +976,12 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- // Apply ssed(spread spectrum) dpref clock for edp only.
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
- && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
- && encoding == DP_8b_10b_ENCODING)
+ // Apply ssed(spread spectrum) dpref clock for edp and dp
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 &&
+ dc_is_dp_signal(pix_clk_params->signal_type) &&
+ encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
+
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
if (e) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 0721ae895ae9..94128f7a18b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -257,7 +257,7 @@ bool dce110_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
struct dc_bios *bios,
- enum clock_source_id,
+ enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index ccc154b0281c..3b9011ef9b68 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -28,6 +28,8 @@
#include "dc.h"
#include "core_types.h"
#include "dmub_cmd.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 0d7e7f3b81a1..a641ae04450c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -240,7 +240,8 @@ bool dmub_abm_save_restore(
cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
- cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+ cmd.abm_save_restore.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_abm_save_restore) - sizeof(struct dmub_cmd_header);
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index c31e4f26a305..fcd3d86ad517 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -280,7 +280,9 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
memset(&cmd, 0, sizeof(cmd));
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
- pCmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ pCmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal) -
+ sizeof(struct dmub_cmd_header);
pCmd->replay_set_power_opt_data.power_opt = power_opt;
pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
@@ -319,7 +321,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_timing_sync.header.sub_type =
DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
cmd.replay_set_timing_sync.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+ sizeof(struct dmub_rb_cmd_replay_set_timing_sync) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
cmd_element->sync_data.panel_inst;
@@ -331,7 +334,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_frameupdate_timer.header.sub_type =
DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
cmd.replay_set_frameupdate_timer.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+ sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_frameupdate_timer.data.panel_inst =
cmd_element->panel_inst;
@@ -345,7 +349,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_pseudo_vtotal.header.sub_type =
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL;
cmd.replay_set_pseudo_vtotal.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal);
+ sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_pseudo_vtotal.data.panel_inst =
cmd_element->pseudo_vtotal_data.panel_inst;
@@ -357,7 +362,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.header.sub_type =
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP;
cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp);
+ sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst =
cmd_element->disabled_adaptive_sync_sdp_data.panel_inst;
@@ -369,7 +375,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_general_cmd.header.sub_type =
DMUB_CMD__REPLAY_SET_GENERAL_CMD;
cmd.replay_set_general_cmd.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_general_cmd);
+ sizeof(struct dmub_rb_cmd_replay_set_general_cmd) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_general_cmd.data.panel_inst =
cmd_element->set_general_cmd_data.panel_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index eede83ad91fa..824f73eb3326 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -25,8 +25,7 @@
CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
-DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
- dce60_resource.o
+DCE60 = dce60_timing_generator.o
AMD_DAL_DCE60 = $(addprefix $(AMDDALPATH)/dc/dce60/,$(DCE60))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 003a9330c286..88e7a1fc9a30 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -105,7 +105,7 @@ static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz)
dm_write_reg(tg->ctx, addr, value);
}
-static void program_timing(struct timing_generator *tg,
+static void dce80_timing_generator_program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
int vready_offset,
int vstartup_start,
@@ -185,7 +185,7 @@ static void dce80_timing_generator_enable_advanced_request(
static const struct timing_generator_funcs dce80_tg_funcs = {
.validate_timing = dce110_tg_validate_timing,
- .program_timing = program_timing,
+ .program_timing = dce80_timing_generator_program_timing,
.enable_crtc = dce110_timing_generator_enable_crtc,
.disable_crtc = dce110_timing_generator_disable_crtc,
.is_counter_moving = dce110_timing_generator_is_counter_moving,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 5efddd48d5c5..9d160b39e8c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -153,6 +153,14 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index f1fe49401bc0..8d24763938ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1002,6 +1002,7 @@ static bool CalculatePrefetchSchedule(
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index f567a9023682..ed59c77bc6f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1105,6 +1105,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 5865e8fa2d8e..9f3938a50240 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -1123,6 +1123,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 56dda686e299..b0fc1fd20208 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -627,6 +627,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
*/
if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
!pipe->stream->hw_cursor_req &&
+ !dc_state_get_stream_cursor_subvp_limit(pipe->stream, context) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 21fd466dba26..157ecf008d6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -99,7 +99,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
@@ -117,11 +116,9 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_floa
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags)
DML21 := src/dml2_top/dml2_top_interfaces.o
DML21 += src/dml2_top/dml2_top_soc15.o
-DML21 += src/inc/dml2_debug.o
DML21 += src/dml2_core/dml2_core_dcn4.o
DML21 += src/dml2_core/dml2_core_factory.o
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 731fbd4bc600..d47cacfdb695 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -526,7 +526,8 @@ static void populate_dml21_output_config_from_stream_state(struct dml2_link_outp
static void populate_dml21_stream_overrides_from_stream_state(
struct dml2_stream_parameters *stream_desc,
- struct dc_stream_state *stream)
+ struct dc_stream_state *stream,
+ struct dc_stream_status *stream_status)
{
switch (stream->debug.force_odm_combine_segments) {
case 0:
@@ -551,7 +552,9 @@ static void populate_dml21_stream_overrides_from_stream_state(
if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy ||
stream->debug.force_odm_combine_segments > 0)
stream_desc->overrides.disable_dynamic_odm = true;
- stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req;
+ stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp ||
+ stream->hw_cursor_req ||
+ stream_status->mall_stream_config.cursor_size_limit_subvp;
}
static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode)
@@ -885,6 +888,9 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
case DC_CM2_GPU_MEM_SIZE_171717:
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
+ case DC_CM2_GPU_MEM_SIZE_333333:
+ plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube;
+ break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
@@ -946,7 +952,7 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d
return location;
}
-static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
const struct dc_plane_state *plane, const struct dc_state *context)
{
unsigned int plane_id;
@@ -1023,7 +1029,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
- populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]);
+ populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]);
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.fclk_pstate = dml2_twait_budgeting_setting_if_needed;
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.uclk_pstate = dml2_twait_budgeting_setting_if_needed;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 069b939c672a..73a013be1e48 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -11,6 +11,7 @@ struct dc_state;
struct dcn_watermarks;
union dcn_watermark_set;
struct pipe_ctx;
+struct dc_plane_state;
struct dml2_context;
struct dml2_configuration_options;
@@ -25,4 +26,5 @@ void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_se
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index ed6584535e89..208d3651b6ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -12,6 +12,8 @@
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
+#define INVALID -1
+
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
*dml_ctx = vzalloc(sizeof(struct dml2_context));
@@ -208,10 +210,40 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
}
}
+static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params)
+{
+ int dc_plane_idx = 0;
+ int dml_prog_idx, stream_idx, plane_idx;
+ struct dml2_per_plane_programming *pln_prog = NULL;
+
+ for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) {
+ for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) {
+ dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context);
+ if (dml_prog_idx == INVALID) {
+ continue;
+ }
+ pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
+ mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid;
+ mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0;
+ mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1;
+ mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache;
+ mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1;
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane0,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane1,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ dc_plane_idx++;
+ }
+ }
+}
+
static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
bool result = false;
struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming;
+ struct dc_mcache_params mcache_params[MAX_PLANES] = {0};
memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
@@ -246,6 +278,14 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state);
/* if subvp phantoms are present, expand them into dc context */
dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx);
+
+ if (in_dc->res_pool->funcs->program_mcache_pipe_config) {
+ //Prepare mcache params for each plane based on mcache output from DML
+ dml21_prepare_mcache_params(dml_ctx, context, mcache_params);
+
+ //populate mcache regs to each pipe
+ dml_ctx->config.callbacks.allocate_mcache(context, mcache_params);
+ }
}
/* Copy DML CLK, WM and REG outputs to bandwidth context */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
index b2075b8c363b..42e715024bc9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
@@ -8,6 +8,7 @@
#include "os_types.h"
#include "dml_top_soc_parameter_types.h"
+#include "dml_top_display_cfg_types.h"
struct dc;
struct dc_state;
@@ -65,4 +66,67 @@ struct socbb_ip_params_external {
struct dml2_ip_capabilities ip_params;
struct dml2_soc_bb soc_bb;
};
+
+/*mcache parameters decided by dml*/
+struct dc_mcache_params {
+ bool valid;
+ /*
+ * For iMALL, dedicated mall mcaches are required (sharing of last
+ * slice possible), for legacy phantom or phantom without return
+ * the only mall mcaches need to be valid.
+ */
+ bool requires_dedicated_mall_mcache;
+ unsigned int num_mcaches_plane0;
+ unsigned int num_mcaches_plane1;
+ /*
+ * Generally, plane0/1 slices must use a disjoint set of caches
+ * but in some cases the final segement of the two planes can
+ * use the same cache. If plane0_plane1 is set, then this is
+ * allowed.
+ *
+ * Similarly, the caches allocated to MALL prefetcher are generally
+ * disjoint, but if mall_prefetch is set, then the final segment
+ * between the main and the mall pixel requestor can use the same
+ * cache.
+ *
+ * Note that both bits may be set at the same time.
+ */
+ struct {
+ bool mall_comb_mcache_p0;
+ bool mall_comb_mcache_p1;
+ bool plane0_plane1;
+ } last_slice_sharing;
+ /*
+ * A plane is divided into vertical slices of mcaches,
+ * which wrap on the surface width.
+ *
+ * For example, if the surface width is 7680, and split into
+ * three slices of equal width, the boundary array would contain
+ * [2560, 5120, 7680]
+ *
+ * The assignments are
+ * 0 = [0 .. 2559]
+ * 1 = [2560 .. 5119]
+ * 2 = [5120 .. 7679]
+ * 0 = [7680 .. INF]
+ * The final element implicitly is the same as the first, and
+ * at first seems invalid since it is never referenced (since)
+ * it is outside the surface. However, its useful when shifting
+ * (see below).
+ *
+ * For any given valid mcache assignment, a shifted version, wrapped
+ * on the surface width boundary is also assumed to be valid.
+ *
+ * For example, shifting [2560, 5120, 7680] by -50 results in
+ * [2510, 5170, 7630].
+ *
+ * The assignments are now:
+ * 0 = [0 .. 2509]
+ * 1 = [2510 .. 5169]
+ * 2 = [5170 .. 7629]
+ * 0 = [7630 .. INF]
+ */
+ int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1];
+ int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1];
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
index a64ec4dcf11a..c047d56527c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
@@ -43,4 +43,5 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
*/
bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index 25b607e7b726..84c90050668c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -156,6 +156,8 @@ struct dml2_dchub_watermark_regs {
uint32_t urgent;
uint32_t sr_enter;
uint32_t sr_exit;
+ uint32_t sr_enter_z8;
+ uint32_t sr_exit_z8;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index 5e1ab6d97640..255f05de362c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -166,7 +166,7 @@ struct dml2_surface_cfg {
enum dml2_swizzle_mode tiling;
struct {
- unsigned long pitch;
+ unsigned long pitch; // In elements, two pixels per element in 422 packed format
unsigned long width;
unsigned long height;
} plane0;
@@ -385,6 +385,7 @@ struct dml2_plane_parameters {
long reserved_vblank_time_ns;
unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay
unsigned int gpuvm_min_page_size_kbytes;
+ unsigned int hostvm_min_page_size_kbytes;
enum dml2_svp_mode_override legacy_svp_config; //TODO remove in favor of svp_config
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index bb863c8c6b39..6ee37386f672 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -456,10 +456,10 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
in_out->mode_support_result.global.active.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.average_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
for (i = 0; i < l->svp_expanded_display_cfg.num_planes; i++) {
in_out->mode_support_result.per_plane[i].dppclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.RequiredDPPCLK[i] * 1000);
@@ -509,7 +509,7 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
stream_index = l->svp_expanded_display_cfg.plane_descriptors[i].stream_index;
in_out->mode_support_result.per_stream[stream_index].dscclk_khz = (unsigned int)core->clean_me_up.mode_lib.ms.required_dscclk_freq_mhz[i] * 1000;
- dml2_printf("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
+ DML_LOG_VERBOSE("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
if (!((stream_bitmask >> stream_index) & 0x1)) {
in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index].odms_used = odm_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 4c504cb0e1c5..c4dad7164d31 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -54,104 +54,104 @@ static double dml2_core_div_rem(double dividend, unsigned int divisor, unsigned
static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg)
@@ -179,11 +179,9 @@ static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg
} else {
out_bpp[k] = 0;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
}
}
@@ -212,9 +210,7 @@ static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const stru
num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
return num_active_pipes;
}
@@ -251,7 +247,7 @@ static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg,
unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[plane_idx]);
- dml2_printf("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
+ DML_LOG_VERBOSE("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
return is_phantom;
}
@@ -415,19 +411,17 @@ static void CalculateMaxDETAndMinCompressedBufferSize(
*nomDETInKByte = (unsigned int)(math_floor2((double)*MaxTotalDETInKByte / (double)MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte));
*MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
- dml2_printf("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
if (nomDETInKByteOverrideEnable) {
*nomDETInKByte = nomDETInKByteOverrideValue;
- dml2_printf("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
}
}
@@ -502,7 +496,7 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -535,7 +529,7 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
}
}
@@ -570,8 +564,8 @@ static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x) {
version = 11;
} else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -645,21 +639,19 @@ static void CalculateBytePerPixelAndBlockSizes(
*BytePerPixelY = 2;
*BytePerPixelC = 4;
} else {
- dml2_printf("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
+ DML_ASSERT(0);
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
- dml2_printf("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
- dml2_printf("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
- dml2_printf("DML::%s: pitch_y = %u\n", __func__, pitch_y);
- dml2_printf("DML::%s: pitch_c = %u\n", __func__, pitch_c);
- dml2_printf("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
- dml2_printf("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
-#endif
+ DML_LOG_VERBOSE("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: pitch_y = %u\n", __func__, pitch_y);
+ DML_LOG_VERBOSE("DML::%s: pitch_c = %u\n", __func__, pitch_c);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
*surf_linear128_l = 0;
@@ -703,12 +695,10 @@ static void CalculateBytePerPixelAndBlockSizes(
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
- dml2_printf("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
- dml2_printf("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
- dml2_printf("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
if (SurfaceTiling == dml2_gfx11_sw_linear) {
@@ -752,8 +742,8 @@ static void CalculateBytePerPixelAndBlockSizes(
} else if (SurfaceTiling == dml2_sw_256kb_2d) {
macro_tile_scale = 32;
} else {
- dml2_printf("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
+ DML_ASSERT(0);
}
*MacroTileHeightY = macro_tile_scale * *BlockHeight256BytesY;
@@ -766,12 +756,10 @@ static void CalculateBytePerPixelAndBlockSizes(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
- dml2_printf("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
- dml2_printf("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
- dml2_printf("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
}
static void CalculateSinglePipeDPPCLKAndSCLThroughput(
@@ -860,10 +848,8 @@ static void CalculateSwathWidth(
unsigned int surface_width_ub_c;
unsigned int surface_height_ub_c;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -872,11 +858,9 @@ static void CalculateSwathWidth(
SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u ViewportWidth=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
MainSurfaceODMMode = ODMMode[k];
@@ -899,13 +883,11 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u HActive=%u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
- dml2_printf("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
- dml2_printf("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
- dml2_printf("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u HActive=%lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format)) {
SwathWidthC[k] = SwathWidthY[k] / 2;
@@ -934,22 +916,20 @@ static void CalculateSwathWidth(
surface_width_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.width, req_width_horz_c);
surface_height_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.height, Read256BytesBlockHeightC[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
- dml2_printf("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
- dml2_printf("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
- dml2_printf("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
req_per_swath_ub_l[k] = 0;
req_per_swath_ub_c[k] = 0;
@@ -995,15 +975,12 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
-#endif
-
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
}
}
@@ -1018,13 +995,11 @@ static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsi
if (unb_req_force_en) {
unb_req_en = unb_req_force_val && unb_req_ok;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
- dml2_printf("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
- dml2_printf("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
- dml2_printf("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
-#endif
- return (unb_req_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
+ DML_LOG_VERBOSE("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
+ DML_LOG_VERBOSE("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
+ return unb_req_en;
}
static void CalculateDETBufferSize(
@@ -1054,16 +1029,14 @@ static void CalculateDETBufferSize(
bool NextPotentialSurfaceToAssignDETPieceFound;
bool MinimizeReallocationSuccess = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
- dml2_printf("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
// Note: Will use default det size if that fits 2 swaths
if (UnboundedRequestEnabled) {
@@ -1092,19 +1065,15 @@ static void CalculateDETBufferSize(
l->minDET = l->minDET + ConfigReturnBufferSegmentSizeInkByte;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
- dml2_printf("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
- dml2_printf("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
if (l->minDET_pipe == 0) {
l->minDET_pipe = (unsigned int)(math_max2(128, math_ceil2(((double)full_swath_bytes_l[k] + (double)full_swath_bytes_c[k]) / 1024.0, ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
}
if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])) {
@@ -1117,12 +1086,10 @@ static void CalculateDETBufferSize(
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - (ForceSingleDPP ? 1 : DPPPerSurface[k]) * l->minDET_pipe;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
}
if (display_cfg->minimize_det_reallocation) {
@@ -1194,14 +1161,12 @@ static void CalculateDETBufferSize(
l->TotalBandwidth = l->TotalBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
}
- dml2_printf("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
-#endif
- dml2_printf("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
+ DML_LOG_VERBOSE("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
l->BandwidthOfSurfacesNotAssignedDETPiece = l->TotalBandwidth;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
@@ -1213,10 +1178,8 @@ static void CalculateDETBufferSize(
} else {
DETPieceAssignedToThisSurfaceAlready[k] = false;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
}
for (unsigned int j = 0; j < NumberOfActiveSurfaces; ++j) {
@@ -1224,22 +1187,18 @@ static void CalculateDETBufferSize(
l->NextSurfaceToAssignDETPiece = 0;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
if (!DETPieceAssignedToThisSurfaceAlready[k] && (!NextPotentialSurfaceToAssignDETPieceFound ||
ReadBandwidthLuma[k] + ReadBandwidthChroma[k] < ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece] + ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece])) {
l->NextSurfaceToAssignDETPiece = k;
NextPotentialSurfaceToAssignDETPieceFound = true;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
}
if (NextPotentialSurfaceToAssignDETPieceFound) {
@@ -1249,20 +1208,16 @@ static void CalculateDETBufferSize(
* (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte,
math_floor2((double)l->DETBufferSizePoolInKByte, (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
- dml2_printf("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
- dml2_printf("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
- dml2_printf("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
- dml2_printf("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] = DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] + l->NextDETBufferPieceInKByte / (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - l->NextDETBufferPieceInKByte;
DETPieceAssignedToThisSurfaceAlready[l->NextSurfaceToAssignDETPiece] = true;
@@ -1274,13 +1229,11 @@ static void CalculateDETBufferSize(
}
*CompressedBufferSizeInkByte = *CompressedBufferSizeInkByte * CompressedBufferSegmentSizeInkByte / ConfigReturnBufferSegmentSizeInkByte;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- After bandwidth adjustment ---\n", __func__);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: --- After bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
}
-#endif
}
static double CalculateRequiredDispclk(
@@ -1510,15 +1463,13 @@ static unsigned int dscceComputeDelay(
//pixel delay is group_delay (converted to pixels) + pipeline, however, first group is a special case since it is processed as soon as it arrives (i.e., in 3 cycles regardless of pixel format)
pixels = (group_delay - 1) * cycles_per_group + 3 + pipeline_delay;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: bpc: %u\n", __func__, bpc);
- dml2_printf("DML::%s: BPP: %f\n", __func__, BPP);
- dml2_printf("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
- dml2_printf("DML::%s: numSlices: %u\n", __func__, numSlices);
- dml2_printf("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Output: %u\n", __func__, Output);
- dml2_printf("DML::%s: pixels: %u\n", __func__, pixels);
-#endif
+ DML_LOG_VERBOSE("DML::%s: bpc: %u\n", __func__, bpc);
+ DML_LOG_VERBOSE("DML::%s: BPP: %f\n", __func__, BPP);
+ DML_LOG_VERBOSE("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
+ DML_LOG_VERBOSE("DML::%s: numSlices: %u\n", __func__, numSlices);
+ DML_LOG_VERBOSE("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Output: %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: pixels: %u\n", __func__, pixels);
return pixels;
}
@@ -1593,10 +1544,8 @@ static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, e
// sft
Delay = Delay + 1;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Delay = %u\n", __func__, Delay);
-#endif
+ DML_LOG_VERBOSE("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Delay = %u\n", __func__, Delay);
return Delay;
}
@@ -1667,10 +1616,8 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
}
meta_surface_bytes = (unsigned int)(p->DCCMetaPitch * vp_height_meta_ub * p->BytePerPixel / 256.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
- dml2_printf("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
+ DML_LOG_VERBOSE("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
if (p->GPUVMEnable == true) {
double meta_vmpg_bytes = 4.0 * 1024.0;
*p->meta_pte_bytes_per_frame_ub = (unsigned int)((math_ceil2((double) (meta_surface_bytes - meta_vmpg_bytes) / (8 * meta_vmpg_bytes), 1) + 1) * 64);
@@ -1724,25 +1671,23 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
vm_bytes = *p->meta_pte_bytes_per_frame_ub + extra_mpde_bytes + *p->dpde0_bytes_per_frame_ub + extra_dpde_bytes;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
- dml2_printf("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
- dml2_printf("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
- dml2_printf("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
- dml2_printf("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
- dml2_printf("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
- dml2_printf("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
- dml2_printf("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
- dml2_printf("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
- dml2_printf("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
- dml2_printf("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
- dml2_printf("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
- dml2_printf("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
+ DML_LOG_VERBOSE("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
+ DML_LOG_VERBOSE("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
+ DML_LOG_VERBOSE("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
if (p->SurfaceTiling == dml2_sw_linear) {
*p->PixelPTEReqHeight = 1;
@@ -1778,22 +1723,20 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
if (p->GPUVMEnable == true) {
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
__func__, p->GPUVMMinPageSizeKBytes, p->SurfaceTiling, dml_get_tile_block_size_bytes(p->SurfaceTiling));
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
- dml2_printf("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
- dml2_printf("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
- dml2_printf("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
- dml2_printf("DML::%s: Pitch = %u\n", __func__, p->Pitch);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
-#endif
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
+ DML_LOG_VERBOSE("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
+ DML_LOG_VERBOSE("DML::%s: Pitch = %u\n", __func__, p->Pitch);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
*p->dpte_row_height_one_row_per_frame = vp_height_dpte_ub;
*p->dpte_row_width_ub_one_row_per_frame = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height_one_row_per_frame / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * (double)*p->PixelPTEReqWidth);
@@ -1811,7 +1754,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_height_linear = 128;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
#endif
} else if (!dml_is_vertical_rotation(p->RotationAngle)) {
@@ -1825,7 +1768,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqWidth, 1) + 1.0) * *p->PixelPTEReqWidth);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
#endif
*p->PixelPTEBytesPerRow = *p->dpte_row_width_ub / *p->PixelPTEReqWidth * *p->PTERequestSize;
@@ -1840,7 +1783,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqHeight * *p->PTERequestSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
#endif
}
@@ -1852,18 +1795,18 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRowStorage = *p->PixelPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
- dml2_printf("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
- dml2_printf("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
- dml2_printf("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
- dml2_printf("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
- dml2_printf("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
- dml2_printf("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
- dml2_printf("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
#endif
return vm_bytes;
@@ -1894,12 +1837,12 @@ static unsigned int CalculatePrefetchSourceLines(
double numLines = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VTaps = %u\n", __func__, VTaps);
- dml2_printf("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
- dml2_printf("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
- dml2_printf("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
- dml2_printf("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VTaps = %u\n", __func__, VTaps);
+ DML_LOG_VERBOSE("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
+ DML_LOG_VERBOSE("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
#endif
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1) / 2.0, 1));
@@ -1934,11 +1877,11 @@ static unsigned int CalculatePrefetchSourceLines(
numLines = *MaxNumSwath * SwathHeight + MaxPartialSwath;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
- dml2_printf("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
- dml2_printf("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
- dml2_printf("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
- dml2_printf("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
+ DML_LOG_VERBOSE("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
+ DML_LOG_VERBOSE("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
+ DML_LOG_VERBOSE("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
#endif
return (unsigned int)(numLines);
@@ -2007,8 +1950,8 @@ static void CalculateMALLUseForStaticScreen(
if (is_using_mall_for_ss[k])
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
- dml2_printf("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
#endif
}
@@ -2022,7 +1965,7 @@ static void CalculateMALLUseForStaticScreen(
(!CanAddAnotherSurfaceToMALL || SurfaceSizeInMALL[k] < SurfaceSizeInMALL[SurfaceToAddToMALL])) {
CanAddAnotherSurfaceToMALL = true;
SurfaceToAddToMALL = k;
- dml2_printf("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
}
}
if (CanAddAnotherSurfaceToMALL) {
@@ -2030,8 +1973,8 @@ static void CalculateMALLUseForStaticScreen(
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[SurfaceToAddToMALL];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
#endif
}
}
@@ -2203,15 +2146,15 @@ static void CalculateDCCConfiguration(
segment_order_vert_contiguous_chroma = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
- dml2_printf("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
- dml2_printf("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
- dml2_printf("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
- dml2_printf("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
- dml2_printf("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
- dml2_printf("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
+ DML_LOG_VERBOSE("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
#endif
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
@@ -2301,12 +2244,12 @@ static void CalculateDCCConfiguration(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
- dml2_printf("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
- dml2_printf("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
- dml2_printf("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
- dml2_printf("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
- dml2_printf("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
#endif
}
@@ -2326,26 +2269,26 @@ static void calculate_mcache_row_bytes(
unsigned int mvmpg_per_mcache;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_chans = %u\n", __func__, p->num_chans);
- dml2_printf("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
- dml2_printf("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
- dml2_printf("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
- dml2_printf("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
- dml2_printf("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
- dml2_printf("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
- dml2_printf("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
- dml2_printf("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
- dml2_printf("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
- dml2_printf("DML::%s: blk_width = %u\n", __func__, p->blk_width);
- dml2_printf("DML::%s: blk_height = %u\n", __func__, p->blk_height);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
- dml2_printf("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
-#endif
- DML2_ASSERT(p->mcache_line_size_bytes != 0);
- DML2_ASSERT(p->mcache_size_bytes != 0);
+ DML_LOG_VERBOSE("DML::%s: num_chans = %u\n", __func__, p->num_chans);
+ DML_LOG_VERBOSE("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
+ DML_LOG_VERBOSE("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
+ DML_LOG_VERBOSE("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
+ DML_LOG_VERBOSE("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
+ DML_LOG_VERBOSE("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
+ DML_LOG_VERBOSE("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
+ DML_LOG_VERBOSE("DML::%s: blk_width = %u\n", __func__, p->blk_width);
+ DML_LOG_VERBOSE("DML::%s: blk_height = %u\n", __func__, p->blk_height);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
+#endif
+ DML_ASSERT(p->mcache_line_size_bytes != 0);
+ DML_ASSERT(p->mcache_size_bytes != 0);
*p->mvmpg_width = 0;
*p->mvmpg_height = 0;
@@ -2370,8 +2313,8 @@ static void calculate_mcache_row_bytes(
*p->mvmpg_width = p->vmpg_width;
*p->mvmpg_height = p->vmpg_height;
} else if (!((blk_bytes == 65536) && (vmpg_bytes == 4096))) {
- dml2_printf("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
+ DML_ASSERT(0);
}
}
@@ -2439,25 +2382,25 @@ static void calculate_mcache_row_bytes(
*p->mvmpg_per_mcache_lb = (unsigned int)math_floor2(mvmpg_per_mcache, 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
- dml2_printf("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
- dml2_printf("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
- dml2_printf("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
- dml2_printf("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
- dml2_printf("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
- dml2_printf("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
+ DML_LOG_VERBOSE("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
- dml2_printf("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
- dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
#endif
- DML2_ASSERT(*p->num_mcaches > 0);
+ DML_ASSERT(*p->num_mcaches > 0);
}
static void calculate_mcache_setting(
@@ -2523,7 +2466,7 @@ static void calculate_mcache_setting(
l->l_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_l;
calculate_mcache_row_bytes(scratch, &l->l_p);
- DML2_ASSERT(*p->num_mcaches_l > 0);
+ DML_ASSERT(*p->num_mcaches_l > 0);
if (l->is_dual_plane) {
l->c_p.num_chans = p->num_chans;
@@ -2559,7 +2502,7 @@ static void calculate_mcache_setting(
l->c_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_c;
calculate_mcache_row_bytes(scratch, &l->c_p);
- DML2_ASSERT(*p->num_mcaches_c > 0);
+ DML_ASSERT(*p->num_mcaches_c > 0);
}
// Sharing for iMALL access
@@ -2598,28 +2541,28 @@ static void calculate_mcache_setting(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
- dml2_printf("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
- dml2_printf("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
- dml2_printf("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
- dml2_printf("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
- dml2_printf("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
- dml2_printf("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
- dml2_printf("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
- dml2_printf("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
- dml2_printf("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
+ DML_LOG_VERBOSE("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
+ DML_LOG_VERBOSE("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
+ DML_LOG_VERBOSE("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
if (l->is_dual_plane) {
- dml2_printf("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
- dml2_printf("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
- dml2_printf("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
- dml2_printf("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
- dml2_printf("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
- dml2_printf("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
- dml2_printf("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
- dml2_printf("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
- dml2_printf("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
- dml2_printf("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
+ DML_LOG_VERBOSE("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
}
#endif
// calculate split_coordinate
@@ -2639,11 +2582,11 @@ static void calculate_mcache_setting(
}
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
if (l->is_dual_plane) {
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
}
#endif
@@ -2660,10 +2603,10 @@ static void calculate_mcache_setting(
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
#endif
}
@@ -2694,8 +2637,8 @@ static void calculate_mall_bw_overhead_factor(
mall_prefetch_dram_overhead_factor[k] = 2.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
- dml2_printf("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
#endif
}
}
@@ -2772,22 +2715,20 @@ static double dml_get_return_bandwidth_available(
else // dml2_core_internal_bw_dram
return_bw_mbps = derate_dram_bandwidth;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
- dml2_printf("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
- dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
- dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
- dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
- dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
- dml2_printf("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
- dml2_printf("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
- dml2_printf("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
- dml2_printf("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
- dml2_printf("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
+ DML_LOG_VERBOSE("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
+ DML_LOG_VERBOSE("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
+ DML_LOG_VERBOSE("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
return return_bw_mbps;
}
@@ -2807,9 +2748,9 @@ static noinline_for_stack void calculate_bandwidth_available(
{
unsigned int n, m;
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
// Calculate all the bandwidth availabe
for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
@@ -2828,8 +2769,8 @@ static noinline_for_stack void calculate_bandwidth_available(
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
#endif
// urg_bandwidth_available_vm_only is indexed by soc_state
@@ -2843,9 +2784,9 @@ static noinline_for_stack void calculate_bandwidth_available(
urg_bandwidth_available_min[m] = math_min2(urg_bandwidth_available[m][dml2_core_internal_bw_dram], urg_bandwidth_available[m][dml2_core_internal_bw_sdp]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
#endif
}
}
@@ -2879,13 +2820,13 @@ static void calculate_avg_bandwidth_required(
// SysActive and SVP Prefetch AVG bandwidth Check
for (k = 0; k < num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: plane %0d\n", __func__, k);
- dml2_printf("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
- dml2_printf("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: plane %0d\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
#endif
sdp_overhead_factor = mall_prefetch_sdp_overhead_factor[k];
@@ -2902,10 +2843,10 @@ static void calculate_avg_bandwidth_required(
avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
#endif
}
}
@@ -3080,10 +3021,10 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->MaxNumSwathY[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
- dml2_printf("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
#endif
p->vm_bytes[k] = (s->vm_bytes_l + s->vm_bytes_c) * (1 + 8 * s->HostVMDynamicLevels);
p->meta_row_bytes[k] = s->meta_row_bytes_per_row_ub_l[k] + s->meta_row_bytes_per_row_ub_c[k];
@@ -3091,8 +3032,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->meta_row_bytes_per_row_ub_c[k] = s->meta_row_bytes_per_row_ub_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
#endif
if (s->PixelPTEBytesPerRowStorageY[k] <= 64 * s->PTEBufferSizeInRequestsForLuma[k] && s->PixelPTEBytesPerRowStorageC[k] <= 64 * s->PTEBufferSizeInRequestsForChroma[k]) {
p->PTEBufferSizeNotExceeded[k] = true;
@@ -3104,18 +3045,18 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
s->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForChroma[k]);
#ifdef __DML_VBA_DEBUG__
if (p->PTEBufferSizeNotExceeded[k] == 0 || s->one_row_per_frame_fits_in_buffer[k] == 0) {
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
}
#endif
}
@@ -3146,8 +3087,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
p->DCCMetaBufferSizeNotExceeded[k] = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
#endif
p->use_one_row_for_frame[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) ||
(dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle));
@@ -3170,9 +3111,9 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->DCCMetaBufferSizeNotExceeded[k] = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
#endif
}
@@ -3209,20 +3150,20 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->dpte_row_bw[k],
&p->meta_row_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
- dml2_printf("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
- dml2_printf("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
- dml2_printf("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
#endif
}
}
@@ -3257,19 +3198,19 @@ static double CalculateUrgentLatency(
}
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
} else {
- dml2_printf("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
- dml2_printf("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
- dml2_printf("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
}
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
#endif
return urgent_latency;
}
@@ -3296,18 +3237,18 @@ static double CalculateTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
- dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
- dml2_printf("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
- dml2_printf("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
#endif
@@ -3334,14 +3275,14 @@ static double CalculateMetaTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
- dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
#endif
@@ -3358,7 +3299,6 @@ static void calculate_cursor_req_attributes(
unsigned int *cursor_bytes_per_chunk,
unsigned int *cursor_bytes)
{
- unsigned int cursor_pitch = 0;
unsigned int cursor_bytes_per_req = 0;
unsigned int cursor_width_bytes = 0;
unsigned int cursor_height = 0;
@@ -3366,10 +3306,6 @@ static void calculate_cursor_req_attributes(
//SW determines the cursor pitch to support the maximum cursor_width that will be used but the following restrictions apply.
//- For 2bpp, cursor_pitch = 256 pixels due to min cursor request size of 64B
//- For 32 or 64 bpp, cursor_pitch = 64, 128 or 256 pixels depending on the cursor width
- if (cursor_bpp == 2)
- cursor_pitch = 256;
- else
- cursor_pitch = (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1);
//The cursor requestor uses a cursor request size of 64B, 128B, or 256B depending on the cursor_width and cursor_bpp as follows.
@@ -3409,8 +3345,8 @@ static void calculate_cursor_req_attributes(
*cursor_lines_per_chunk = 1;
} else {
if (cursor_width > 0) {
- dml2_printf("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_ASSERT(0);
}
}
@@ -3421,15 +3357,15 @@ static void calculate_cursor_req_attributes(
cursor_height = cursor_width;
*cursor_bytes = *cursor_bytes_per_line * cursor_height;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_printf("DML::%s: cursor_width = %d\n", __func__, cursor_width);
- dml2_printf("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
- dml2_printf("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
- dml2_printf("DML::%s: cursor_pitch = %d\n", __func__, cursor_pitch);
+ DML_LOG_VERBOSE("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_LOG_VERBOSE("DML::%s: cursor_width = %d\n", __func__, cursor_width);
+ DML_LOG_VERBOSE("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1));
#endif
}
@@ -3460,13 +3396,13 @@ static void calculate_cursor_urgent_burst_factor(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
- dml2_printf("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
- dml2_printf("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
- dml2_printf("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
}
@@ -3501,15 +3437,15 @@ static void CalculateUrgentBurstFactor(
*UrgentBurstFactorChroma = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VRatioC = %f\n", __func__, VRatioC);
- dml2_printf("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
- dml2_printf("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
- dml2_printf("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
- dml2_printf("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VRatioC = %f\n", __func__, VRatioC);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
- DML2_ASSERT(VRatio > 0);
+ DML_ASSERT(VRatio > 0);
LinesInDETLuma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeY) / BytePerPixelInDETY / swath_width_luma_ub;
@@ -3534,12 +3470,12 @@ static void CalculateUrgentBurstFactor(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
- dml2_printf("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
- dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
}
@@ -3600,10 +3536,10 @@ static void CalculateDCFCLKDeepSleepTdlut(
if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && tdlut_bytes_to_deliver[k] > 0) {
double tdlut_required_deepsleep_dcfclk = (double) tdlut_bytes_to_deliver[k] / 64.0 / prefetch_swath_time_us[k];
- dml2_printf("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
- dml2_printf("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
- dml2_printf("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
- dml2_printf("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
// increase the deepsleep dcfclk to match the original dispclk throughput rate
if (tdlut_required_deepsleep_dcfclk > DCFClkDeepSleepPerSurface[k]) {
@@ -3613,8 +3549,8 @@ static void CalculateDCFCLKDeepSleepTdlut(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
- dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
#endif
}
@@ -3625,17 +3561,17 @@ static void CalculateDCFCLKDeepSleepTdlut(
*DCFClkDeepSleep = math_max2(8.0, __DML2_CALCS_DCFCLK_FACTOR__ * ReadBandwidth / (double)ReturnBusWidth);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
- dml2_printf("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
- dml2_printf("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
*DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
}
- dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
}
static noinline_for_stack void CalculateDCFCLKDeepSleep(
@@ -3731,12 +3667,12 @@ static unsigned int CalculateMaxVStartup(
else
max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VBlankNom = %u\n", __func__, timing->vblank_nom);
- dml2_printf("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
- dml2_printf("DML::%s: line_time_us = %f\n", __func__, line_time_us);
- dml2_printf("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
- dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
- dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
+ DML_LOG_VERBOSE("DML::%s: VBlankNom = %lu\n", __func__, timing->vblank_nom);
+ DML_LOG_VERBOSE("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
+ DML_LOG_VERBOSE("DML::%s: line_time_us = %f\n", __func__, line_time_us);
+ DML_LOG_VERBOSE("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
+ DML_LOG_VERBOSE("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
+ DML_LOG_VERBOSE("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
#endif
max_vstartup_lines = (unsigned int)math_min2(max_vstartup_lines, DML_MAX_VSTARTUP_START);
return max_vstartup_lines;
@@ -3761,9 +3697,9 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
const long MAXIMUMCOMPRESSION = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
}
#endif
CalculateSwathWidth(
@@ -3797,15 +3733,15 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->full_swath_bytes_l[k] = (unsigned int)(p->swath_width_luma_ub[k] * p->BytePerPixDETY[k] * MaximumSwathHeightY[k]);
p->full_swath_bytes_c[k] = (unsigned int)(p->swath_width_chroma_ub[k] * p->BytePerPixDETC[k] * MaximumSwathHeightC[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
#endif
if (p->display_cfg->plane_descriptors[k].pixel_format == dml2_420_10) {
p->full_swath_bytes_l[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_l[k], 256));
@@ -3848,11 +3784,11 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
#endif
*p->ViewportSizeSupport = true;
@@ -3860,7 +3796,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
DETBufferSizeInKByteForSwathCalculation = (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 1024 : p->DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
#endif
if (p->display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) {
p->SwathHeightY[k] = MaximumSwathHeightY[k];
@@ -3917,13 +3853,13 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if ((p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] / 2 > DETBufferSizeInKByteForSwathCalculation * 1024 / 2) ||
p->SwathWidth[k] > p->MaximumSwathWidthLuma[k] || (p->SwathHeightC[k] > 0 && p->SwathWidthChroma[k] > p->MaximumSwathWidthChroma[k])) {
*p->ViewportSizeSupport = false;
- dml2_printf("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
- dml2_printf("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
p->ViewportSizeSupportPerSurface[k] = false;
} else {
p->ViewportSizeSupportPerSurface[k] = true;
@@ -3931,35 +3867,35 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if (p->SwathHeightC[k] == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024;
p->DETBufferSizeC[k] = 0;
} else if (RoundedUpSwathSizeBytesY[k] <= 1.5 * RoundedUpSwathSizeBytesC[k]) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
} else {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = (unsigned int)(math_floor2(p->DETBufferSizeInKByte[k] * 1024 * 2 / 3, 1024));
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 - p->DETBufferSizeY[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
- dml2_printf("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
#endif
}
@@ -3969,12 +3905,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
*p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b,
(double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / (p->mrq_present ? MAXIMUMCOMPRESSION : 1) / 64)), 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
#endif
*p->hw_debug5 = false;
@@ -3989,12 +3925,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
+ *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k])))
*p->hw_debug5 = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
- dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
- dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
+ DML_LOG_VERBOSE("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
+ DML_LOG_VERBOSE("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
+ DML_LOG_VERBOSE("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
#endif
}
#endif
@@ -4192,15 +4128,15 @@ static noinline_for_stack void CalculateODMMode(
SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMUse = %d\n", __func__, ODMUse);
- dml2_printf("DML::%s: Output = %d\n", __func__, Output);
- dml2_printf("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
- dml2_printf("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
- dml2_printf("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
+ DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
+ DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
+ DML_LOG_VERBOSE("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
#endif
if (ODMUse == dml2_odm_mode_auto)
DecidedODMMode = DecideODMMode(HActive,
@@ -4245,10 +4181,10 @@ static noinline_for_stack void CalculateODMMode(
*NumberOfDPP = NumberOfDPPRequired;
*RequiredDISPCLKPerSurface = success ? DISPCLKRequired : 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
- dml2_printf("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
- dml2_printf("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
- dml2_printf("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
+ DML_LOG_VERBOSE("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
#endif
}
@@ -4292,17 +4228,17 @@ static noinline_for_stack void CalculateOutputLink(
*OutputRate = dml2_core_internal_output_rate_unknown;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
- dml2_printf("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
- dml2_printf("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
- dml2_printf("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
- dml2_printf("DML::%s: Output (encoder) = %u\n", __func__, Output);
- dml2_printf("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
+ DML_LOG_VERBOSE("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
+ DML_LOG_VERBOSE("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
+ DML_LOG_VERBOSE("DML::%s: Output (encoder) = %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
#endif
{
if (Output == dml2_hdmi) {
@@ -4487,9 +4423,9 @@ static noinline_for_stack void CalculateOutputLink(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
- dml2_printf("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
- dml2_printf("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
+ DML_LOG_VERBOSE("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
+ DML_LOG_VERBOSE("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
+ DML_LOG_VERBOSE("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
#endif
}
@@ -4571,17 +4507,17 @@ static unsigned int DSCDelayRequirement(
DSCDelayRequirement_val = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, ODMMode);
- dml2_printf("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
- dml2_printf("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
- dml2_printf("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
- dml2_printf("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
+ DML_LOG_VERBOSE("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, ODMMode);
+ DML_LOG_VERBOSE("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
+ DML_LOG_VERBOSE("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
+ DML_LOG_VERBOSE("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
#endif
return DSCDelayRequirement_val;
@@ -4654,10 +4590,10 @@ static void CalculateSurfaceSizeInMall(
(TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
- dml2_printf("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
+ DML_LOG_VERBOSE("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
+ DML_LOG_VERBOSE("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
#endif
}
@@ -4674,7 +4610,6 @@ static void calculate_tdlut_setting(
unsigned int tdlut_vmpg_per_frame;
unsigned int tdlut_pte_req_per_frame;
unsigned int tdlut_bytes_per_line;
- unsigned int tdlut_delivery_cycles;
double tdlut_drain_rate;
unsigned int tdlut_mpc_width;
unsigned int tdlut_bytes_per_group_simple;
@@ -4737,13 +4672,13 @@ static void calculate_tdlut_setting(
*p->tdlut_bytes_per_frame = tdlut_bytes_per_line * tdlut_mpc_width * tdlut_mpc_width;
*p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
//the delivery cycles is DispClk cycles per line * number of lines * number of slices
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
} else {
//tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
*p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
*p->tdlut_bytes_per_group = tdlut_bytes_per_group_simple;
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
tdlut_drain_rate = 2 * tdlut_bpe * p->dispclk_mhz;
}
@@ -4756,25 +4691,25 @@ static void calculate_tdlut_setting(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
- dml2_printf("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
- dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
- dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
- dml2_printf("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
- dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
- dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
- dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
- dml2_printf("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
- dml2_printf("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
- dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
- dml2_printf("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
- dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
+ DML_LOG_VERBOSE("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
+ DML_LOG_VERBOSE("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
+ DML_LOG_VERBOSE("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
+ DML_LOG_VERBOSE("DML::%s: tdlut_delivery_cycles = %u\n", __func__, p->tdlut_addressing_mode == dml2_tdlut_sw_linear ? (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width : (unsigned int)math_ceil2(tdlut_width/2.0, 1));
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
+ DML_LOG_VERBOSE("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
#endif
}
@@ -4820,10 +4755,10 @@ static void CalculateTarb(
*Tarb = extra_bytes / ReturnBW;
*Tarb_prefetch = extra_bytes_prefetch / ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
- dml2_printf("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
- dml2_printf("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
- dml2_printf("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
#endif
}
@@ -4838,10 +4773,10 @@ static double CalculateTWait(
TWait = math_max2(reserved_vblank_time_ns/1000.0, g6_temp_read_blackout_us) + t_urg_trip;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, Ttrip);
- dml2_printf("DML::%s: TWait = %f\n", __func__, TWait);
+ DML_LOG_VERBOSE("DML::%s: reserved_vblank_time_ns = %ld\n", __func__, reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, Ttrip);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, TWait);
#endif
return TWait;
}
@@ -4887,20 +4822,20 @@ static void CalculateVUpdateAndDynamicMetadataParameters(
*Tdmsks = *Tdmsks / 2;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
- dml2_printf("DML::%s: VBlank = %u\n", __func__, VBlank);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, Dppclk);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
- dml2_printf("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
- dml2_printf("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
+ DML_LOG_VERBOSE("DML::%s: VBlank = %u\n", __func__, VBlank);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
+ DML_LOG_VERBOSE("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
- dml2_printf("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
- dml2_printf("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
- dml2_printf("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
+ DML_LOG_VERBOSE("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
- dml2_printf("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
#endif
}
@@ -4962,11 +4897,11 @@ static double get_urgent_bandwidth_required(
l->adj_factor_cur_pre = UrgentBurstFactorCursorPre[k];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]);
- bool exclude_this_plane = 0;
+ bool exclude_this_plane = false;
// Exclude phantom pipe in bw calculation for non svp prefetch state
if (state_type != dml2_core_internal_soc_state_svp_prefetch && is_phantom)
- exclude_this_plane = 1;
+ exclude_this_plane = true;
// The qualified row bandwidth, qual_row_bw, accounts for the regular non-flip row bandwidth when there is no possible immediate flip or HostVM invalidation flip.
// The qual_row_bw is zero if HostVM is possible and only non-zero and equal to row_bw(i) if immediate flip is not allowed for that pipe.
@@ -4995,12 +4930,12 @@ static double get_urgent_bandwidth_required(
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
- dml2_printf("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
- dml2_printf("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
- dml2_printf("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
- dml2_printf("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
- dml2_printf("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
#endif
} else {
surface_required_bw[k] = 0.0;
@@ -5009,34 +4944,34 @@ static double get_urgent_bandwidth_required(
l->required_bandwidth_mbps += surface_required_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
- dml2_printf("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
- dml2_printf("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
- dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
- dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
- dml2_printf("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
- dml2_printf("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
- dml2_printf("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
- dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
- dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
- dml2_printf("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
#endif
}
@@ -5120,19 +5055,19 @@ static void CalculateExtraLatency(
*ExtraLatency_sr = *ExtraLatency_sr + Tarb;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
- dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
- dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
- dml2_printf("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
- dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
- dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
- dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
- dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
- dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
- dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
- dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
- dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
- dml2_printf("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: qos_type=%u\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
+ DML_LOG_VERBOSE("DML::%s: Tex_trips=%f\n", __func__, Tex_trips);
+ DML_LOG_VERBOSE("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
+ DML_LOG_VERBOSE("DML::%s: FabricClock=%f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
+ DML_LOG_VERBOSE("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
+ DML_LOG_VERBOSE("DML::%s: Tarb=%f\n", __func__, Tarb);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
#endif
}
@@ -5199,20 +5134,20 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->HostVMDynamicLevelsTrips = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
- dml2_printf("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
- dml2_printf("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
- dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
+ DML_LOG_VERBOSE("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
+ DML_LOG_VERBOSE("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: VStartup = %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
#endif
CalculateVUpdateAndDynamicMetadataParameters(
p->MaxInterDCNTileRepeaters,
@@ -5258,11 +5193,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (p->DynamicMetadataEnable == true) {
if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) {
*p->NotEnoughTimeForDynamicMetadata = true;
- dml2_printf("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
} else {
*p->NotEnoughTimeForDynamicMetadata = false;
}
@@ -5288,21 +5223,21 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
((p->myPipe->ODMMode == dml2_odm_mode_mso_1to4) ? (double)p->myPipe->HActive * 3.0 / 4.0 : 0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
- dml2_printf("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
- dml2_printf("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
- dml2_printf("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
- dml2_printf("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
- dml2_printf("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
+ DML_LOG_VERBOSE("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
+ DML_LOG_VERBOSE("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
+ DML_LOG_VERBOSE("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
#endif
if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
@@ -5314,17 +5249,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->DSTYAfterScaler = (unsigned int)(math_floor2(s->DSTTotalPixelsAfterScaler / p->myPipe->HTotal, 1));
*p->DSTXAfterScaler = (unsigned int)(s->DSTTotalPixelsAfterScaler - ((double)(*p->DSTYAfterScaler * p->myPipe->HTotal)));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
#endif
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
#endif
if (p->display_cfg->gpuvm_enable) {
s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
@@ -5402,7 +5337,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
}
/* oto prefetch bw should be always be less than total vactive bw */
- //DML2_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
+ //DML_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
@@ -5421,9 +5356,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->RequiredPrefetchBWOTO = s->prefetch_bw_oto;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
- dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
- dml2_printf("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
#endif
if (p->display_cfg->gpuvm_enable == true) {
@@ -5433,9 +5368,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
s->Tvm_oto = s->Tvm_trips_rounded;
@@ -5447,9 +5382,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
} else
s->Tr0_oto = s->LineTime / 4.0;
@@ -5459,11 +5394,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
#ifdef DML_GLOBAL_PREFETCH_CHECK
- dml2_printf("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
+ DML_LOG_VERBOSE("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
if (p->impacted_dst_y_pre > 0) {
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
s->dst_y_prefetch_oto = math_max2(s->dst_y_prefetch_oto, p->impacted_dst_y_pre);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
}
#endif
*p->Tpre_oto = s->dst_y_prefetch_oto * s->LineTime;
@@ -5492,72 +5427,71 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
- dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
- dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
- dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
- dml2_printf("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
- dml2_printf("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
- dml2_printf("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
- dml2_printf("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
- dml2_printf("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
- dml2_printf("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
- dml2_printf("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
-#endif
- double Tpre = s->dst_y_prefetch_equ * s->LineTime;
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
+ DML_LOG_VERBOSE("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
+ DML_LOG_VERBOSE("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
+#endif
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
*p->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
- dml2_printf("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
- dml2_printf("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
- dml2_printf("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
- dml2_printf("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
- dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime: %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: VStartup: %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
+ DML_LOG_VERBOSE("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
+ DML_LOG_VERBOSE("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, (s->dst_y_prefetch_equ * s->LineTime), *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
#endif
*p->dst_y_per_vm_vblank = 0;
@@ -5596,19 +5530,19 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw1 = 0;
- dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
(*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
- dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
- dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
- dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
+ DML_LOG_VERBOSE("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
+ DML_LOG_VERBOSE("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
@@ -5620,10 +5554,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw2 = 0;
- dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
- dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
}
// prefetch_bw3: 2*R0 + SW
@@ -5634,10 +5568,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw3 = 0;
- dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
- dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
// prefetch_bw4: SW
@@ -5647,17 +5581,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw4 = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
- dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
- dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
- dml2_printf("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
- dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
- dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
- dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
- dml2_printf("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
- dml2_printf("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, s->dst_y_prefetch_equ * s->LineTime, *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
+ DML_LOG_VERBOSE("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
#endif
{
bool Case1OK = false;
@@ -5676,14 +5610,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
double total_row_bytes = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
- dml2_printf("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
- dml2_printf("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
if (s->prefetch_bw1 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1;
double row_transfer_time = total_row_bytes / s->prefetch_bw1;
- dml2_printf("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case1OK = true;
}
@@ -5696,8 +5630,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw2 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2;
double row_transfer_time = total_row_bytes / s->prefetch_bw2;
- dml2_printf("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time < s->Tr0_trips_rounded) {
Case2OK = true;
}
@@ -5709,8 +5643,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw3 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3;
double row_transfer_time = total_row_bytes / s->prefetch_bw3;
- dml2_printf("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time < s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case3OK = true;
}
@@ -5730,10 +5664,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK);
- dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK);
- dml2_printf("DML::%s: Case3OK: %u\n", __func__, Case3OK);
- dml2_printf("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: Case1OK: %u\n", __func__, Case1OK);
+ DML_LOG_VERBOSE("DML::%s: Case2OK: %u\n", __func__, Case2OK);
+ DML_LOG_VERBOSE("DML::%s: Case3OK: %u\n", __func__, Case3OK);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
#endif
if (s->prefetch_bw_equ > 0) {
@@ -5753,12 +5687,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
s->Tvm_equ = 0;
s->Tr0_equ = 0;
- dml2_printf("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
- dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
+ DML_LOG_VERBOSE("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
+ DML_LOG_VERBOSE("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
@@ -5769,7 +5703,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
} else {
@@ -5785,7 +5719,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
@@ -5797,31 +5731,31 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->prefetch_swath_time_us = (s->LinesToRequestPrefetchPixelData * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
- dml2_printf("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
- dml2_printf("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
+ DML_LOG_VERBOSE("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
#endif
- DML2_ASSERT(*p->dst_y_prefetch < 64);
+ DML_ASSERT(*p->dst_y_prefetch < 64);
unsigned int min_lsw_required = (unsigned int)math_max2(2, p->tdlut_drain_time / s->LineTime);
if (s->LinesToRequestPrefetchPixelData >= min_lsw_required && s->prefetch_bw_equ > 0) {
*p->VRatioPrefetchY = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData;
*p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
- dml2_printf("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
#endif
if ((p->SwathHeightY > 4) && (p->VInitPreFillY > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillY - 3.0) / 2.0) {
@@ -5829,13 +5763,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
*p->VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
#endif
}
@@ -5843,22 +5777,22 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
- dml2_printf("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
#endif
if ((p->SwathHeightC > 4) && (p->VInitPreFillC > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillC - 3.0) / 2.0) {
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
*p->VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
#endif
}
@@ -5866,36 +5800,34 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->RequiredPrefetchPixelDataBWChroma = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelC * p->swath_width_chroma_ub / s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
#endif
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
- dml2_printf("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
*p->VRatioPrefetchY = 0;
*p->VRatioPrefetchC = 0;
*p->RequiredPrefetchPixelDataBWLuma = 0;
*p->RequiredPrefetchPixelDataBWChroma = 0;
}
- dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
- dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
- dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
- dml2_printf("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
- dml2_printf("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
- dml2_printf("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
- dml2_printf("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
- dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
+ DML_LOG_VERBOSE("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
+ DML_LOG_VERBOSE("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
+ DML_LOG_VERBOSE("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
+ DML_LOG_VERBOSE("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
} else {
- dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
__func__, min_Lsw_equ_ok, *p->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded+Tvm_trips_rounded+2.0*Tr0_trips_rounded+min_Tsw_equ (%f) should be > \n",
- __func__, tpre_gt_req_latency, (s->min_Lsw_equ*s->LineTime + s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded), p->Turg, s->trip_to_mem, p->ExtraLatencyPrefetch);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5916,18 +5848,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_vm_bw = 0;
} else if (*p->dst_y_per_vm_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
}
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
@@ -5936,14 +5868,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
#endif
} else {
prefetch_row_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
}
*p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw);
@@ -5963,12 +5895,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->prefetch_vmrow_bw = 0;
}
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
- dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
return s->NoTimeToPrefetch;
}
@@ -6005,7 +5937,7 @@ static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned
}
}
if (max_idx <= 0) {
- DML2_ASSERT(max_idx >= 0);
+ DML_ASSERT(max_idx >= 0);
max_idx = this_plane_idx;
}
@@ -6037,12 +5969,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
// worst case if the rob and cdb is fully hogged
s->max_Trpd_dcfclk_cycles = (unsigned int) math_ceil2((p->rob_buffer_size_kbytes*1024 + p->compressed_buffer_size_kbytes*DML_MAX_COMPRESSION_RATIO*1024)/64.0, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
- dml2_printf("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
- dml2_printf("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
- dml2_printf("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
- dml2_printf("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
+ DML_LOG_VERBOSE("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
#endif
// calculate the return impact from each plane, request is 256B per dcfclk
@@ -6063,12 +5995,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_l[i] / p->swath_height_l[i], 1) * s->src_swath_bytes_l[i]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
- dml2_printf("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
- dml2_printf("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
#endif
if (s->src_swath_bytes_c[i] > 0) { // dual_plane
@@ -6079,10 +6011,10 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
- dml2_printf("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
#endif
}
@@ -6090,9 +6022,9 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
s->accumulated_return_path_dcfclk_cycles[i] = (unsigned int) math_ceil2(((DML_MAX_COMPRESSION_RATIO-1) * 64 * p->estimated_dcfclk_mhz) * s->time_to_fill_det_us / 64.0, 1.0); //for 64B per DCFClk
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
- dml2_printf("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
- dml2_printf("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
+ DML_LOG_VERBOSE("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
#endif
// clamping to worst case delay which is one which occupy the full rob+cdb
if (s->accumulated_return_path_dcfclk_cycles[i] > s->max_Trpd_dcfclk_cycles)
@@ -6109,7 +6041,7 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
p->impacted_dst_y_pre[i] = math_ceil2(p->impacted_dst_y_pre[i] / p->line_time[i], 0.25);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
+ DML_LOG_VERBOSE("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
#endif
}
@@ -6120,8 +6052,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
*p->recalc_prefetch_schedule = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
- dml2_printf("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
#endif
}
} else {
@@ -6131,8 +6063,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
- dml2_printf("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
+ DML_LOG_VERBOSE("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
+ DML_LOG_VERBOSE("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
#endif
return s->prefetch_global_check_passed;
@@ -6150,8 +6082,8 @@ static void calculate_peak_bandwidth_required(
memset(l, 0, sizeof(struct dml2_core_shared_calculate_peak_bandwidth_required_locals));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
#endif
for (unsigned int k = 0; k < p->num_active_planes; ++k) {
@@ -6347,12 +6279,12 @@ static void calculate_peak_bandwidth_required(
p->surface_peak_required_bw[m][n]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
#endif
- DML2_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
+ DML_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
}
}
}
@@ -6414,18 +6346,18 @@ static void check_urgent_bandwidth_support(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
- dml2_printf("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
+ DML_LOG_VERBOSE("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required[m][n]) ? "<" : ">=", urg_bandwidth_required[m][n]);
}
@@ -6446,14 +6378,14 @@ static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal
flip_bw_available_mbps = flip_bw_available_sdp_mbps < flip_bw_available_dram_mbps ? flip_bw_available_sdp_mbps : flip_bw_available_dram_mbps;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
- dml2_printf("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
- dml2_printf("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
#endif
return flip_bw_available_mbps;
@@ -6478,28 +6410,28 @@ static void calculate_immediate_flip_bandwidth_support(
*flip_bandwidth_support_ok &= urg_bandwidth_available[eval_state][n] >= urg_bandwidth_required_flip[eval_state][n];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
- dml2_printf("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
#endif
- DML2_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
}
*frac_urg_bandwidth_flip = (frac_urg_bw_flip_sdp > frac_urg_bw_flip_dram) ? frac_urg_bw_flip_sdp : frac_urg_bw_flip_dram;
*flip_bandwidth_support_ok &= (*frac_urg_bandwidth_flip <= 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
- dml2_printf("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required_flip[m][n]) ? "<" : ">=", urg_bandwidth_required_flip[m][n]);
}
@@ -6549,27 +6481,27 @@ static void CalculateFlipSchedule(
l->dpte_row_bytes = DPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
- dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
- dml2_printf("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
- dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
- dml2_printf("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
- dml2_printf("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
- dml2_printf("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
- dml2_printf("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
- dml2_printf("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
- dml2_printf("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
- dml2_printf("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
#endif
if (TotImmediateFlipBytes > 0 && (GPUVMEnable || dcc_mrq_enable)) {
@@ -6596,9 +6528,9 @@ static void CalculateFlipSchedule(
l->min_row_time = l->min_row_height * LineTime / VRatio;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
#endif
- DML2_ASSERT(l->min_row_time > 0);
+ DML_ASSERT(l->min_row_time > 0);
if (use_lb_flip_bw) {
// For mode check, calculation the flip bw requirement with worst case flip time
@@ -6619,20 +6551,20 @@ static void CalculateFlipSchedule(
l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded),
l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
- dml2_printf("DML::%s: total row bytes (%d row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
- dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
- dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
- dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
- dml2_printf("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (%f row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
if (l->lb_flip_bw > 0) {
- dml2_printf("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
- dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
- dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
- dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
}
#endif
l->lb_flip_bw = math_max3(l->lb_flip_bw,
@@ -6640,8 +6572,8 @@ static void CalculateFlipSchedule(
(l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
- dml2_printf("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#endif
}
@@ -6653,13 +6585,12 @@ static void CalculateFlipSchedule(
} else {
if (iflip_enable) {
l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i)
- double portion = (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
- dml2_printf("DML::%s: portion of flip bw = %f\n", __func__, portion);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
+ DML_LOG_VERBOSE("DML::%s: portion of flip bw = %f\n", __func__, (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes);
#endif
if (l->ImmediateFlipBW == 0) {
l->Tvm_flip = 0;
@@ -6674,11 +6605,11 @@ static void CalculateFlipSchedule(
LineTime / 4.0);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
- dml2_printf("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
#endif
*dst_y_per_vm_flip = math_ceil2(4.0 * (l->Tvm_flip / LineTime), 1.0) / 4.0;
*dst_y_per_row_flip = math_ceil2(4.0 * (l->Tr0_flip / LineTime), 1.0) / 4.0;
@@ -6711,14 +6642,14 @@ static void CalculateFlipSchedule(
#ifdef __DML_VBA_DEBUG__
if (!use_lb_flip_bw) {
- dml2_printf("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
- dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
- dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
- dml2_printf("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
}
- dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
- dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
+ DML_LOG_VERBOSE("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
#endif
}
@@ -6736,7 +6667,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->UrgentWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
#endif
p->Watermark->USRRetrainingWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency + p->mmSOCParameters.USRRetrainingLatency + p->mmSOCParameters.SMNLatency;
@@ -6755,20 +6686,20 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->temp_read_or_ppt_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
- dml2_printf("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
- dml2_printf("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
- dml2_printf("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
- dml2_printf("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
- dml2_printf("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
- dml2_printf("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
- dml2_printf("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
- dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
#endif
s->TotalActiveWriteback = 0;
@@ -6801,11 +6732,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->WritebackFCLKChangeWatermark = p->Watermark->WritebackFCLKChangeWatermark + p->mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
- dml2_printf("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
- dml2_printf("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
- dml2_printf("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
+ DML_LOG_VERBOSE("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
#endif
s->TotalPixelBW = 0.0;
@@ -6836,11 +6767,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
- dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
- dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
- dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
- dml2_printf("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBBitPerPixel = %f\n", __func__, k, LBBitPerPixel);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
#endif
s->EffectiveLBLatencyHidingY = s->LBLatencyHidingSourceLinesY[k] / v_ratio * (h_total / pixel_clock_mhz);
@@ -6943,16 +6874,16 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->sub_vp_lines_l = s->src_y_pstate_l + s->src_y_ahead_l + p->meta_row_height_l[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
- dml2_printf("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
- dml2_printf("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
- dml2_printf("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
- dml2_printf("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
#endif
p->SubViewportLinesNeededInMALL[k] = s->sub_vp_lines_l;
@@ -6967,10 +6898,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
- dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
- dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
#endif
}
}
@@ -6992,10 +6923,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
- dml2_printf("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
- dml2_printf("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
- dml2_printf("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
+ DML_LOG_VERBOSE("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
#endif
}
@@ -7141,7 +7072,7 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -7153,32 +7084,30 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
break;
}
}
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
-#endif
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
return index;
}
static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
if (!clk_entry_found)
- DML2_ASSERT(clk_entry_found);
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
@@ -7218,10 +7147,10 @@ static void calculate_hostvm_inefficiency_factor(
if ((*HostVMInefficiencyFactorPrefetch < 4) && (remote_iommu_outstanding_translations < max_outstanding_reqs))
*HostVMInefficiencyFactorPrefetch = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
- dml2_printf("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
- dml2_printf("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
#endif
}
}
@@ -7335,30 +7264,659 @@ static void calculate_pstate_keepout_dst_lines(
}
}
+static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_internal_display_mode_lib *mode_lib,
+ const struct dml2_display_cfg *display_cfg)
+{
+ struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
+ struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
+ struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
+#endif
+ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
+
+ double min_return_bw_for_latency;
+ unsigned int k;
+
+ mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
+
+ calculate_hostvm_inefficiency_factor(
+ &s->HostVMInefficiencyFactor,
+ &s->HostVMInefficiencyFactorPrefetch,
+
+ display_cfg->gpuvm_enable,
+ display_cfg->hostvm_enable,
+ mode_lib->ip.remote_iommu_outstanding_translations,
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
+ mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
+
+ mode_lib->ms.Total3dlutActive = 0;
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
+ mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
+
+ // Calculate tdlut schedule related terms
+ calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
+ calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
+ calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
+ calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
+ calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
+ calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
+ calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
+ calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
+
+ // output
+ calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
+ calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
+ calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
+
+ calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
+ }
+
+ min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
+
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
+ CalculateExtraLatency(
+ display_cfg,
+ mode_lib->ip.rob_buffer_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
+ s->ReorderingBytes,
+ mode_lib->ms.DCFCLK,
+ mode_lib->ms.FabricClock,
+ mode_lib->ip.pixel_chunk_size_kbytes,
+ min_return_bw_for_latency,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.dpte_group_bytes,
+ s->tdlut_bytes_per_group,
+ s->HostVMInefficiencyFactor,
+ s->HostVMInefficiencyFactorPrefetch,
+ mode_lib->soc.hostvm_min_page_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_type,
+ !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.request_size_bytes_luma,
+ mode_lib->ms.support.request_size_bytes_chroma,
+ mode_lib->ip.meta_chunk_size_kbytes,
+ mode_lib->ip.dchub_arb_to_ret_delay,
+ mode_lib->ms.TripToMemory,
+ mode_lib->ip.hostvm_mode,
+
+ // output
+ &mode_lib->ms.ExtraLatency,
+ &mode_lib->ms.ExtraLatency_sr,
+ &mode_lib->ms.ExtraLatencyPrefetch);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ s->impacted_dst_y_pre[k] = 0;
+
+ s->recalc_prefetch_schedule = 0;
+ s->recalc_prefetch_done = 0;
+ do {
+ mode_lib->ms.support.PrefetchSupported = true;
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
+
+ mode_lib->ms.TWait[k] = CalculateTWait(
+ display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx) : 0.0);
+
+ myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
+ myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
+ myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
+ myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
+ myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
+ myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
+ myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
+ myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
+ myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
+ myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
+ myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
+ myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
+ myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
+ myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
+ myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
+ myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
+ myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
+ myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ myPipe->ODMMode = mode_lib->ms.ODMMode[k];
+ myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
+ myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
+ myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
+ myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
+#endif
+ CalculatePrefetchSchedule_params->display_cfg = display_cfg;
+ CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
+ CalculatePrefetchSchedule_params->myPipe = myPipe;
+ CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
+ CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
+ CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
+ CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
+ CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
+ CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
+ CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
+ CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
+ CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
+ CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
+ CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
+ CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
+ CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
+ CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
+ CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
+ CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
+ CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
+ CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
+ CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
+ CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
+ CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
+ CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
+ CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
+ CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
+ CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
+ CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
+ CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
+ CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
+ CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
+ CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
+ CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
+
+ // output
+ CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
+ CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
+ CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
+ CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
+ CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
+ CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
+ CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
+ CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
+ CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
+ CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
+ CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
+ CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
+ CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
+ CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
+ CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
+ CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
+ CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
+
+ mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
+
+ mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
+ } // for k num_planes
+
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.BytePerPixelY,
+ mode_lib->ms.BytePerPixelC,
+ mode_lib->ms.SwathWidthY,
+ mode_lib->ms.SwathWidthC,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.PSCL_FACTOR,
+ mode_lib->ms.PSCL_FACTOR_CHROMA,
+ mode_lib->ms.RequiredDPPCLK,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
+ mode_lib->soc.return_bus_width_bytes,
+ mode_lib->ms.RequiredDISPCLK,
+ s->tdlut_bytes_to_deliver,
+ s->prefetch_swath_time_us,
+
+ /* Output */
+ &mode_lib->ms.dcfclk_deepsleep);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.dst_y_prefetch[k] < 2.0
+ || mode_lib->ms.LinesForVM[k] >= 32.0
+ || mode_lib->ms.LinesForDPTERow[k] >= 16.0
+ || mode_lib->ms.NoTimeForPrefetch[k] == true
+ || s->DSTYAfterScaler[k] > 8) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
+ }
+ }
+
+ mode_lib->ms.support.DynamicMetadataSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
+ mode_lib->ms.support.DynamicMetadataSupported = false;
+ }
+ }
+
+ mode_lib->ms.support.VRatioInPrefetchSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
+ mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
+ mode_lib->ms.support.VRatioInPrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
+ }
+ }
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
+
+ // By default, do not recalc prefetch schedule
+ s->recalc_prefetch_schedule = 0;
+
+ // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
+ if (mode_lib->ms.support.PrefetchSupported) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ // Calculate Urgent burst factor for prefetch
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
+#endif
+ CalculateUrgentBurstFactor(
+ &display_cfg->plane_descriptors[k],
+ mode_lib->ms.swath_width_luma_ub[k],
+ mode_lib->ms.swath_width_chroma_ub[k],
+ mode_lib->ms.SwathHeightY[k],
+ mode_lib->ms.SwathHeightC[k],
+ s->line_times[k],
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.VRatioPreY[k],
+ mode_lib->ms.VRatioPreC[k],
+ mode_lib->ms.BytePerPixelInDETY[k],
+ mode_lib->ms.BytePerPixelInDETC[k],
+ mode_lib->ms.DETBufferSizeY[k],
+ mode_lib->ms.DETBufferSizeC[k],
+ /* Output */
+ &mode_lib->ms.UrgentBurstFactorLumaPre[k],
+ &mode_lib->ms.UrgentBurstFactorChromaPre[k],
+ &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+
+ // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
+ // assume flip bw is 0 at this point
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ mode_lib->ms.final_flip_bw[k] = 0;
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 0;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ // Check urg peak bandwidth against available urg bw
+ // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
+ check_urgent_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth
+ &s->dummy_single[1], // double* frac_urg_bandwidth_mall
+ &mode_lib->ms.support.UrgVactiveBandwidthSupport,
+ &mode_lib->ms.support.PrefetchBandwidthSupported,
+
+ mode_lib->soc.mall_allocated_for_dcn_mbytes,
+ mode_lib->ms.support.non_urg_bandwidth_required,
+ mode_lib->ms.support.urg_vactive_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
+ DML_LOG_VERBOSE("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+ }
+
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
+ if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
+
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
+ ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
+ s->recalc_prefetch_done = 1;
+ s->recalc_prefetch_schedule = 1;
+ }
+#endif
+ } // prefetch schedule ok, do urg bw and flip schedule
+ } while (s->recalc_prefetch_schedule);
+
+ // Flip Schedule
+ // Both prefetch schedule and BW okay
+ if (mode_lib->ms.support.PrefetchSupported == true) {
+ mode_lib->ms.BandwidthAvailableForImmediateFlip =
+ get_bandwidth_available_for_immediate_flip(
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip) {
+ s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
+ s->HostVMInefficiencyFactor,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.meta_row_bytes[k]);
+ } else {
+ s->per_pipe_flip_bytes[k] = 0;
+ }
+ mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
+
+ }
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ CalculateFlipSchedule(
+ &mode_lib->scratch,
+ display_cfg->plane_descriptors[k].immediate_flip,
+ 1, // use_lb_flip_bw
+ s->HostVMInefficiencyFactor,
+ s->Tvm_trips_flip[k],
+ s->Tr0_trips_flip[k],
+ s->Tvm_trips_flip_rounded[k],
+ s->Tr0_trips_flip_rounded[k],
+ display_cfg->gpuvm_enable,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.BandwidthAvailableForImmediateFlip,
+ mode_lib->ms.TotImmediateFlipBytes,
+ display_cfg->plane_descriptors[k].pixel_format,
+ (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
+ mode_lib->ms.Tno_bw_flip[k],
+ mode_lib->ms.dpte_row_height[k],
+ mode_lib->ms.dpte_row_height_chroma[k],
+ mode_lib->ms.use_one_row_for_frame_flip[k],
+ mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
+ s->per_pipe_flip_bytes[k],
+ mode_lib->ms.meta_row_bytes[k],
+ s->meta_row_height_luma[k],
+ s->meta_row_height_chroma[k],
+ mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
+
+ /* Output */
+ &mode_lib->ms.dst_y_per_vm_flip[k],
+ &mode_lib->ms.dst_y_per_row_flip[k],
+ &mode_lib->ms.final_flip_bw[k],
+ &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
+ }
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 1;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ calculate_immediate_flip_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth_flip
+ &mode_lib->ms.support.ImmediateFlipSupport,
+
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_flip,
+ mode_lib->ms.support.non_urg_bandwidth_required_flip,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ } else { // if prefetch not support, assume iflip is not supported too
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
+ s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
+ s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
+ s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
+ s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
+ s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.USRRetrainingLatency = 0;
+ s->mSOCParameters.SMNLatency = 0;
+ s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx);
+ s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, mode_lib->ms.state_idx);
+ s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
+
+ CalculateWatermarks_params->display_cfg = display_cfg;
+ CalculateWatermarks_params->USRRetrainingRequired = false;
+ CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
+ CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
+ CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
+ CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
+ CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
+ CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
+ CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
+ CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
+ CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
+ CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
+ CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
+ CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
+ CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
+ CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
+ CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
+ CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
+ CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
+ CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
+ CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
+ CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
+ CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
+ CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
+ CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
+ CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
+ CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
+ CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
+
+ // Output
+ CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
+ CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
+ CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
+ CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
+ CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
+ CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
+ CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
+ CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
+ CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
+ CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
+ CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
+ CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
+
+ CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
+
+ calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
+ DML_LOG_VERBOSE("DML::%s: Done prefetch calculation\n", __func__);
+
+}
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table;
-#if defined(__DML_VBA_DEBUG__)
- double old_ReadBandwidthLuma;
- double old_ReadBandwidthChroma;
-#endif
double outstanding_latency_us = 0;
- double min_return_bw_for_latency;
struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
- struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
-#endif
- struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
- struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params *calculate_bytes_to_fetch_required_to_hide_latency_params = &mode_lib->scratch.calculate_bytes_to_fetch_required_to_hide_latency_params;
unsigned int k, m, n;
@@ -7374,9 +7932,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.FabricClock = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz / 1000);
mode_lib->ms.MaxDCFCLK = (double)min_clk_table->max_clocks_khz.dcfclk / 1000;
mode_lib->ms.MaxFabricClock = (double)min_clk_table->max_clocks_khz.fclk / 1000;
- mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dispclk / 1000;
+ mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dispclk / 1000;
mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000;
- mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000;
+ mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dppclk / 1000;
mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000);
@@ -7384,25 +7942,25 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: --- START --- \n", __func__);
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
- dml2_printf("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
- dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
- dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
- dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
- dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
+ DML_LOG_VERBOSE("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
+ DML_LOG_VERBOSE("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
+ DML_LOG_VERBOSE("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -7504,12 +8062,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
#ifdef __DML_VBA_DEBUG__
- old_ReadBandwidthLuma = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
#endif
}
@@ -7629,13 +8185,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.MaximumSwathWidthLuma[k] = math_min2(s->MaximumSwathWidthSupportLuma, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
mode_lib->ms.MaximumSwathWidthChroma[k] = math_min2(s->MaximumSwathWidthSupportChroma, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
}
/* Cursor Support Check */
@@ -7672,11 +8228,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.AlignedCPitch[k] > display_cfg->plane_descriptors[k].surface.plane1.pitch) {
mode_lib->ms.support.PitchSupport = false;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
- dml2_printf("DML::%s: k=%u PitchY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
- dml2_printf("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
- dml2_printf("DML::%s: k=%u PitchC = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
- dml2_printf("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchC = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
#endif
}
@@ -7708,11 +8264,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
mode_lib->ms.support.ViewportExceedsSurface = true;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u SurfaceWidthY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
- dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceWidthY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceHeightY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
#endif
}
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
@@ -7894,8 +8450,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPDSC;
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
#endif
// ensure the number dsc slices is integer multiple based on ODM mode
@@ -7911,9 +8467,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DSCSlicesODMModeSupported = ((mode_lib->ms.support.NumberOfDSCSlices[k] % 4) == 0);
#if defined(__DML_VBA_DEBUG__)
if (!mode_lib->ms.support.DSCSlicesODMModeSupported) {
- dml2_printf("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
- dml2_printf("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
}
#endif
} else {
@@ -7958,7 +8514,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
if (!mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
- dml2_printf("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
+ DML_LOG_VERBOSE("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
}
} else {
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
@@ -7968,7 +8524,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
#endif
}
@@ -8138,7 +8694,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout);
- if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_clocks_khz.dtbclk / 1000)) {
+ if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_ss_clocks_khz.dtbclk / 1000)) {
mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = true;
}
} else {
@@ -8167,7 +8723,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->DSCFormatFactor = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
#endif
if (mode_lib->ms.RequiresDSC[k] == true) {
s->PixelClockBackEndFactor = 3.0;
@@ -8185,10 +8741,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
- dml2_printf("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
- dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
- dml2_printf("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
#endif
}
}
@@ -8423,13 +8979,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
- dml2_printf("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
#endif
/* VActive bytes to fetch for UCLK P-State */
@@ -8502,7 +9058,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
calculate_cursor_req_attributes(
@@ -8531,9 +9087,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
#endif
CalculateUrgentBurstFactor(
@@ -8605,7 +9161,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
#endif
/* Immediate Flip and MALL parameters */
@@ -8654,16 +9210,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
(s->SubViewportMALLPStateMethod && s->FullFrameMALLPStateMethod) || s->SubViewportMALLRefreshGreaterThan120Hz;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
- dml2_printf("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
- dml2_printf("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
- dml2_printf("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
- dml2_printf("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
- dml2_printf("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: urgent latency tolarance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
+ DML_LOG_VERBOSE("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
#endif
mode_lib->ms.support.OutstandingRequestsSupport = true;
@@ -8703,10 +9258,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
- dml2_printf("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
- dml2_printf("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
#endif
}
@@ -8722,8 +9277,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
#endif
}
}
@@ -8869,7 +9424,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
if (mode_lib->ms.NotEnoughUrgentLatencyHiding[k]) {
mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = false;
- dml2_printf("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
}
}
@@ -8878,639 +9433,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.avg_bandwidth_support_ok[m][n] && (m == dml2_core_internal_soc_state_sys_active || mode_lib->soc.mall_allocated_for_dcn_mbytes > 0)) {
mode_lib->ms.support.AvgBandwidthSupport = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
#endif
}
}
}
- /* Prefetch Check */
- {
- mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
-
- calculate_hostvm_inefficiency_factor(
- &s->HostVMInefficiencyFactor,
- &s->HostVMInefficiencyFactorPrefetch,
-
- display_cfg->gpuvm_enable,
- display_cfg->hostvm_enable,
- mode_lib->ip.remote_iommu_outstanding_translations,
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
- mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
-
- mode_lib->ms.Total3dlutActive = 0;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
-
- // Calculate tdlut schedule related terms
- calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
- calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
- calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
- calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
- calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
- calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
-
- // output
- calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
- calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
- calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
- calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
- calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
- }
-
- min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
- s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
-
- CalculateExtraLatency(
- display_cfg,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
- s->ReorderingBytes,
- mode_lib->ms.DCFCLK,
- mode_lib->ms.FabricClock,
- mode_lib->ip.pixel_chunk_size_kbytes,
- min_return_bw_for_latency,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.dpte_group_bytes,
- s->tdlut_bytes_per_group,
- s->HostVMInefficiencyFactor,
- s->HostVMInefficiencyFactorPrefetch,
- mode_lib->soc.hostvm_min_page_size_kbytes,
- mode_lib->soc.qos_parameters.qos_type,
- !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.request_size_bytes_luma,
- mode_lib->ms.support.request_size_bytes_chroma,
- mode_lib->ip.meta_chunk_size_kbytes,
- mode_lib->ip.dchub_arb_to_ret_delay,
- mode_lib->ms.TripToMemory,
- mode_lib->ip.hostvm_mode,
-
- // output
- &mode_lib->ms.ExtraLatency,
- &mode_lib->ms.ExtraLatency_sr,
- &mode_lib->ms.ExtraLatencyPrefetch);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- s->impacted_dst_y_pre[k] = 0;
-
- s->recalc_prefetch_schedule = 0;
- s->recalc_prefetch_done = 0;
- do {
- mode_lib->ms.support.PrefetchSupported = true;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
-
- s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
-
- mode_lib->ms.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->ms.UrgLatency,
- mode_lib->ms.TripToMemory,
- !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
- get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
-
- myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
- myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
- myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
- myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
- myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
- myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
- myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
- myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
- myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
- myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
- myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
- myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
- myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- myPipe->ODMMode = mode_lib->ms.ODMMode[k];
- myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
- myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
- myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
- dml2_printf("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
-#endif
- CalculatePrefetchSchedule_params->display_cfg = display_cfg;
- CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
- CalculatePrefetchSchedule_params->myPipe = myPipe;
- CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
- CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
- CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
- CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
- CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
- CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
- CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
- CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
- CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
- CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
- CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
- CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
- CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
- CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
- CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
- CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
- CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
- CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
- CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
- CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
- CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
- CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
- CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
- CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
- CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
- CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
- CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
- CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
- CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
- CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
- CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
- CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
- CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
-
- // output
- CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
- CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
- CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
- CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
- CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
- CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
- CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
- CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
- CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
- CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
- CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
- CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
- CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
- CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
- CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
- CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
- CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
- CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
- CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
- CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
- CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
-
- mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
-
- mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
- dml2_printf("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
- } // for k num_planes
-
- CalculateDCFCLKDeepSleepTdlut(
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.BytePerPixelY,
- mode_lib->ms.BytePerPixelC,
- mode_lib->ms.SwathWidthY,
- mode_lib->ms.SwathWidthC,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.PSCL_FACTOR,
- mode_lib->ms.PSCL_FACTOR_CHROMA,
- mode_lib->ms.RequiredDPPCLK,
- mode_lib->ms.vactive_sw_bw_l,
- mode_lib->ms.vactive_sw_bw_c,
- mode_lib->soc.return_bus_width_bytes,
- mode_lib->ms.RequiredDISPCLK,
- s->tdlut_bytes_to_deliver,
- s->prefetch_swath_time_us,
-
- /* Output */
- &mode_lib->ms.dcfclk_deepsleep);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.dst_y_prefetch[k] < 2.0
- || mode_lib->ms.LinesForVM[k] >= 32.0
- || mode_lib->ms.LinesForDPTERow[k] >= 16.0
- || mode_lib->ms.NoTimeForPrefetch[k] == true
- || s->DSTYAfterScaler[k] > 8) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
- dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
- dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
- dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
- dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
- }
- }
-
- mode_lib->ms.support.DynamicMetadataSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
- mode_lib->ms.support.DynamicMetadataSupported = false;
- }
- }
-
- mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
- mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
- mode_lib->ms.support.VRatioInPrefetchSupported = false;
- dml2_printf("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
- }
- }
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
-
- // By default, do not recalc prefetch schedule
- s->recalc_prefetch_schedule = 0;
-
- // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
- if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- // Calculate Urgent burst factor for prefetch
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
- dml2_printf("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
-#endif
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->ms.swath_width_luma_ub[k],
- mode_lib->ms.swath_width_chroma_ub[k],
- mode_lib->ms.SwathHeightY[k],
- mode_lib->ms.SwathHeightC[k],
- s->line_times[k],
- mode_lib->ms.UrgLatency,
- mode_lib->ms.VRatioPreY[k],
- mode_lib->ms.VRatioPreC[k],
- mode_lib->ms.BytePerPixelInDETY[k],
- mode_lib->ms.BytePerPixelInDETC[k],
- mode_lib->ms.DETBufferSizeY[k],
- mode_lib->ms.DETBufferSizeC[k],
- /* Output */
- &mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChromaPre[k],
- &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
-
- // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
- // assume flip bw is 0 at this point
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- mode_lib->ms.final_flip_bw[k] = 0;
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 0;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- // Check urg peak bandwidth against available urg bw
- // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
- check_urgent_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth
- &s->dummy_single[1], // double* frac_urg_bandwidth_mall
- &mode_lib->ms.support.UrgVactiveBandwidthSupport,
- &mode_lib->ms.support.PrefetchBandwidthSupported,
-
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->ms.support.non_urg_bandwidth_required,
- mode_lib->ms.support.urg_vactive_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
- dml2_printf("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
- }
-
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
- CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
- CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
- CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
- CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
- CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
- CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
- CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
- CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
- CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
- if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
-
- CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
- ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
-
- // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
- CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
- CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
- mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
- s->recalc_prefetch_done = 1;
- s->recalc_prefetch_schedule = 1;
- }
-#endif
- } // prefetch schedule ok, do urg bw and flip schedule
- } while (s->recalc_prefetch_schedule);
-
- // Flip Schedule
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
-
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- mode_lib->ip.max_flip_time_lines,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 1;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- } else { // if prefetch not support, assume iflip is not supported too
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
- s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
- s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
- s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
- s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
- s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
- s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.USRRetrainingLatency = 0;
- s->mSOCParameters.SMNLatency = 0;
- s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
- s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
- s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
- s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
-
- CalculateWatermarks_params->display_cfg = display_cfg;
- CalculateWatermarks_params->USRRetrainingRequired = false;
- CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
- CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
- CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
- CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
- CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
- CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
- CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
- CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
- CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
- CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
- CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
- CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
- CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
- CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
- CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
- CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
- CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
- CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
- CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
- CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
- CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
- CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
- CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
-
- // Output
- CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
- CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
- CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
- CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
- CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
- CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
- CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
- CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
-
- CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
-
- calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
- }
- dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
- // End of Prefetch Check
+ dml_core_ms_prefetch_check(mode_lib, display_cfg);
mode_lib->ms.support.max_urgent_latency_us = s->mSOCParameters.max_urgent_latency_us;
@@ -9546,8 +9475,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.dram_change_vactive_det_fill_delay_us);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
- dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
+ DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
#endif
/*Mode Support, Voltage State and SOC Configuration*/
@@ -9597,17 +9526,17 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&& !mode_lib->ms.support.ExceededMALLSize
&& mode_lib->ms.support.g6_temp_read_support
&& ((!display_cfg->hostvm_enable && !s->ImmediateFlipRequired) || mode_lib->ms.support.ImmediateFlipSupport)) {
- dml2_printf("DML::%s: mode is supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is supported\n", __func__);
mode_lib->ms.support.ModeSupport = true;
} else {
- dml2_printf("DML::%s: mode is NOT supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is NOT supported\n", __func__);
mode_lib->ms.support.ModeSupport = false;
}
}
// Since now the mode_support work on 1 particular power state, so there is only 1 state idx (index 0).
- dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
- dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
@@ -9623,8 +9552,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutputRate[k] = mode_lib->ms.OutputRate[k];
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
- dml2_printf("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
#endif
}
@@ -9632,7 +9561,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.ModeSupport)
dml2_print_mode_support_info(&mode_lib->ms.support, true);
- dml2_printf("DML::%s: --- DONE --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- DONE --- \n", __func__);
#endif
return mode_lib->ms.support.ModeSupport;
@@ -9642,18 +9571,18 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
{
unsigned int result;
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
result = dml_core_mode_support(in_out_params);
if (result)
*in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support;
- dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -9687,19 +9616,19 @@ static void CalculatePixelDeliveryTimes(
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
- dml2_printf("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
- dml2_printf("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
- dml2_printf("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
- dml2_printf("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
+ DML_LOG_VERBOSE("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
#endif
if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz;
@@ -9733,10 +9662,10 @@ static void CalculatePixelDeliveryTimes(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
#endif
}
@@ -9752,12 +9681,12 @@ static void CalculatePixelDeliveryTimes(
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub_c[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
#endif
}
}
@@ -9853,14 +9782,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
#endif
}
@@ -9881,7 +9810,7 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
else
p->time_per_tdlut_group[k] = 0;
- dml2_printf("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
if (p->display_cfg->gpuvm_enable == true) {
if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -9897,14 +9826,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_luma_ub <= 2) {
dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
p->time_per_pte_group_nom_luma[k] = p->DST_Y_PER_PTE_ROW_NOM_L[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
p->time_per_pte_group_vblank_luma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
@@ -9928,9 +9857,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_chroma_ub <= 2) {
dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
p->time_per_pte_group_nom_chroma[k] = p->DST_Y_PER_PTE_ROW_NOM_C[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
p->time_per_pte_group_vblank_chroma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
@@ -9945,17 +9874,17 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
p->time_per_pte_group_flip_chroma[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
#endif
}
} // CalculateMetaAndPTETimes
@@ -9991,18 +9920,18 @@ static void CalculateVMGroupAndRequestTimes(
double line_time;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
bool dcc_mrq_enable = display_cfg->plane_descriptors[k].surface.dcc.enable && mrq_present;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
- dml2_printf("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
#endif
if (display_cfg->gpuvm_enable) {
@@ -10071,13 +10000,13 @@ static void CalculateVMGroupAndRequestTimes(
else
TimePerVMRequestFlip[k] = 0.0;
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
- dml2_printf("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %f\n", __func__, k, num_group_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %f\n", __func__, k, num_group_per_lower_vm_stage_flip);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %f\n", __func__, k, num_req_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %f\n", __func__, k, num_req_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %d\n", __func__, k, num_group_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %d\n", __func__, k, num_group_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %d\n", __func__, k, num_req_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %d\n", __func__, k, num_req_per_lower_vm_stage_flip);
if (display_cfg->gpuvm_max_page_table_levels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
@@ -10094,10 +10023,10 @@ static void CalculateVMGroupAndRequestTimes(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
#endif
}
}
@@ -10113,7 +10042,6 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
unsigned int SingleVTotal = 0;
bool SameTiming = true;
bool FoundCriticalSurface = false;
- double LastZ8StutterPeriod = 0;
memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals));
@@ -10127,9 +10055,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0, l->MaximumEffectiveCompressionLuma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0 / l->MaximumEffectiveCompressionLuma;
@@ -10142,9 +10070,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1, l->MaximumEffectiveCompressionChroma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1 / l->MaximumEffectiveCompressionChroma;
@@ -10160,19 +10088,19 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->AverageDCCZeroSizeFraction = l->TotalZeroSizeRequestReadBandwidth / p->TotalDataReadBandwidth;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
- dml2_printf("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
- dml2_printf("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
- dml2_printf("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
- dml2_printf("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
#endif
if (l->AverageDCCZeroSizeFraction == 1) {
l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth;
@@ -10189,10 +10117,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
- dml2_printf("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
+ DML_LOG_VERBOSE("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
#endif
} else {
l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate,
@@ -10200,16 +10128,16 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * (p->rob_alloc_compressed ? l->AverageDCCCompressionRate : 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
- dml2_printf("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
- dml2_printf("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
+ DML_LOG_VERBOSE("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
+ DML_LOG_VERBOSE("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
#endif
*p->StutterPeriod = 0;
@@ -10220,15 +10148,15 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->LinesInDETYRoundedDownToSwath = math_floor2(l->LinesInDETY, p->SwathHeightY[k]);
l->DETBufferingTimeY = l->LinesInDETYRoundedDownToSwath * ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
- dml2_printf("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
- dml2_printf("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
#endif
if (!FoundCriticalSurface || l->DETBufferingTimeY < *p->StutterPeriod) {
@@ -10248,17 +10176,17 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->SinglePipeCriticalSurface = (p->DPPPerSurface[k] == 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
- dml2_printf("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
- dml2_printf("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
- dml2_printf("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
- dml2_printf("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
#endif
}
}
@@ -10276,14 +10204,14 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = math_min2(*p->StutterPeriod * p->TotalDataReadBandwidth, l->EffectiveCompressedBufferSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
- dml2_printf("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
- dml2_printf("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
- dml2_printf("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
#endif
l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer
@@ -10292,10 +10220,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
/ math_min2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
*p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
- dml2_printf("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
+ DML_LOG_VERBOSE("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
#endif
l->TotalActiveWriteback = 0;
memset(l->stream_visited, 0, DML2_MAX_PLANES * sizeof(bool));
@@ -10324,9 +10252,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
if (l->TotalActiveWriteback == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
- dml2_printf("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
+ DML_LOG_VERBOSE("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
#endif
*p->StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitTime + l->StutterBurstTime) / *p->StutterPeriod) * 100;
*p->Z8StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitZ8Time + l->StutterBurstTime) / *p->StutterPeriod) * 100;
@@ -10339,11 +10267,11 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
*p->Z8NumberOfStutterBurstsPerFrame = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
if (*p->StutterEfficiencyNotIncludingVBlank > 0) {
@@ -10358,7 +10286,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
if (*p->Z8StutterEfficiencyNotIncludingVBlank > 0) {
- LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
+ //LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) {
*p->Z8StutterEfficiency = *p->Z8StutterEfficiencyNotIncludingVBlank;
} else {
@@ -10370,25 +10298,25 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
- dml2_printf("DML::%s: SameTiming = %u\n", __func__, SameTiming);
- dml2_printf("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
- dml2_printf("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
- dml2_printf("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
- dml2_printf("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
+ DML_LOG_VERBOSE("DML::%s: SameTiming = %u\n", __func__, SameTiming);
+ DML_LOG_VERBOSE("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
+ DML_LOG_VERBOSE("DML::%s: LastZ8StutterPeriod = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank > 0 ? l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod : 0);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
*p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
- dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
#endif
}
@@ -10422,7 +10350,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
double max_uclk_mhz = 0;
double min_return_latency_in_DCFCLK_cycles = 0;
- dml2_printf("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
memset(&mode_lib->scratch, 0, sizeof(struct dml2_core_internal_scratch));
memset(&mode_lib->mp, 0, sizeof(struct dml2_core_internal_mode_program));
@@ -10444,13 +10372,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
if (cfg_support_info->stream_support_info[stream_index].odms_used > 1)
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
switch (cfg_support_info->stream_support_info[stream_index].odms_used) {
case (4):
@@ -10476,51 +10404,51 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
- DML2_ASSERT(mode_lib->mp.Dppclk[k] > 0);
+ DML_ASSERT(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
- dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
}
mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
- DML2_ASSERT(mode_lib->mp.Dcfclk > 0);
- DML2_ASSERT(mode_lib->mp.FabricClock > 0);
- DML2_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
- DML2_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
- DML2_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
- DML2_ASSERT(mode_lib->mp.Dispclk > 0);
- DML2_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
- DML2_ASSERT(s->SOCCLK > 0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
- dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
- dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
- dml2_printf("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
+ DML_ASSERT(mode_lib->mp.Dcfclk > 0);
+ DML_ASSERT(mode_lib->mp.FabricClock > 0);
+ DML_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
+ DML_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
+ DML_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
+ DML_ASSERT(mode_lib->mp.Dispclk > 0);
+ DML_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
+ DML_ASSERT(s->SOCCLK > 0);
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
for (k = 0; k < s->num_active_planes; ++k) {
- dml2_printf("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
- }
- dml2_printf("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
- dml2_printf("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: min_clk_table min_fclk_khz = %d\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
- dml2_printf("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
+ DML_LOG_VERBOSE("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
+ }
+ DML_LOG_VERBOSE("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
+ DML_LOG_VERBOSE("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table min_fclk_khz = %ld\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) {
- dml2_printf("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
- dml2_printf("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
+ DML_LOG_VERBOSE("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
}
for (k = 0; k < s->num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -10617,8 +10545,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
mode_lib->mp.vactive_sw_bw_l[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
mode_lib->mp.vactive_sw_bw_c[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
}
CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
@@ -11097,7 +11025,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
for (k = 0; k < s->num_active_planes; ++k) {
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
@@ -11173,8 +11101,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.WritebackDelay[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
#endif
}
@@ -11183,7 +11111,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->immediate_flip_required = s->immediate_flip_required || display_cfg->plane_descriptors[k].immediate_flip;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
#endif
if (s->num_active_planes > 1) {
@@ -11219,12 +11147,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->DestinationLineTimesForPrefetchLessThan2 = false;
s->VRatioPrefetchMoreThanMax = false;
- dml2_printf("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
mode_lib->mp.TWait[k] = CalculateTWait(
display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
mode_lib->mp.UrgentLatency,
@@ -11261,7 +11189,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
#endif
CalculatePrefetchSchedule_params->display_cfg = display_cfg;
CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
@@ -11356,7 +11284,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.impacted_prefetch_margin_us[k] = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
#endif
mode_lib->mp.VStartupMin[k] = s->MaxVStartupLines[k];
} // for k
@@ -11366,9 +11294,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.NoTimeToPrefetch[k] == true ||
mode_lib->mp.NotEnoughTimeForDynamicMetadata[k] ||
mode_lib->mp.DSTYAfterScaler[k] > 8) {
- dml2_printf("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
- dml2_printf("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
- dml2_printf("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
if (mode_lib->mp.dst_y_prefetch[k] < 2)
@@ -11377,24 +11305,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
s->VRatioPrefetchMoreThanMax = true;
- dml2_printf("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
}
if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
if (s->VRatioPrefetchMoreThanMax == true || s->DestinationLineTimesForPrefetchLessThan2 == true) {
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
- dml2_printf("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
mode_lib->mp.PrefetchModeSupported = false;
}
- dml2_printf("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
+ DML_LOG_VERBOSE("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
mode_lib->mp.PrefetchModeSupported ? "" : "NOT ", CalculatePrefetchSchedule_params->VStartup);
// Prefetch schedule OK, now check prefetch bw
@@ -11422,24 +11350,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
&mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
- dml2_printf("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
- dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
- dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
- dml2_printf("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
#endif
}
@@ -11503,11 +11431,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.PrefetchModeSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
@@ -11533,12 +11461,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
mode_lib->mp.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->mp.NoOfDPP[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k = %u\n", __func__, k);
- dml2_printf("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: k = %u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
#endif
}
for (k = 0; k < s->num_active_planes; ++k) {
@@ -11631,13 +11559,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.ImmediateFlipSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for flip!", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for flip!", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (display_cfg->plane_descriptors[k].immediate_flip && mode_lib->mp.ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->mp.ImmediateFlipSupported = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
#endif
}
}
@@ -11650,28 +11578,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.PrefetchAndImmediateFlipSupported = (mode_lib->mp.PrefetchModeSupported == true && (!must_support_iflip || mode_lib->mp.ImmediateFlipSupported));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
- dml2_printf("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
- dml2_printf("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
#endif
- dml2_printf("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
}
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
if (!mode_lib->mp.PrefetchAndImmediateFlipSupported) {
- dml2_printf("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
} else {
- dml2_printf("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
// DCC Configuration
for (k = 0; k < s->num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
#endif
CalculateDCCConfiguration(
display_cfg->plane_descriptors[k].surface.dcc.enable,
@@ -11780,8 +11708,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->mp.Watermark, mode_lib->mp.pstate_keepout_dst_lines);
- dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
- dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
+ DML_LOG_VERBOSE("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
+ DML_LOG_VERBOSE("DML::%s: DEBUG PixelClock = %ld kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
//Display Pipeline Delivery Time in Prefetch, Groups
CalculatePixelDeliveryTimes(
@@ -11893,15 +11821,15 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TCalc + mode_lib->mp.MinTTUVBlank[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
s->Tvstartup_margin = (s->MaxVStartupLines[k] - mode_lib->mp.VStartupMin[k]) * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.MinTTUVBlank[k] + s->Tvstartup_margin;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
- dml2_printf("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
mode_lib->mp.Tdmdl[k] = mode_lib->mp.Tdmdl[k] + s->Tvstartup_margin;
@@ -11920,9 +11848,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->blank_lines_remaining = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active) - mode_lib->mp.VStartup[k];
if (s->blank_lines_remaining < 0) {
- dml2_printf("ERROR: Vstartup is larger than vblank!?\n");
+ DML_LOG_VERBOSE("ERROR: Vstartup is larger than vblank!?\n");
s->blank_lines_remaining = 0;
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup;
@@ -11936,18 +11864,18 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
- dml2_printf("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
- dml2_printf("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
- dml2_printf("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, HTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
- dml2_printf("DML::%s: k=%u, VTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
- dml2_printf("DML::%s: k=%u, VActive = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
- dml2_printf("DML::%s: k=%u, VFrontPorch = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
- dml2_printf("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
- dml2_printf("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
- dml2_printf("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActive = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VFrontPorch = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
#endif
}
@@ -11969,9 +11897,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.vactive_sw_bw_l[k] + mode_lib->mp.vactive_sw_bw_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
#endif
}
@@ -12051,28 +11979,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
min_return_latency_in_DCFCLK_cycles = (min_return_uclk_cycles / max_uclk_mhz + min_return_fclk_cycles / max_fclk_mhz) * hard_minimum_dcfclk_mhz;
mode_lib->mp.min_return_latency_in_dcfclk = (unsigned int)min_return_latency_in_DCFCLK_cycles;
mode_lib->mp.dcfclk_deep_sleep_hysteresis = (unsigned int)math_max2(32, (double)mode_lib->ip.pixel_chunk_size_kbytes * 1024 * 3 / 4 / 64 - min_return_latency_in_DCFCLK_cycles);
- DML2_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
+ DML_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
- dml2_printf("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
- dml2_printf("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
- dml2_printf("DML::%s: min_return_uclk_cycles = %d\n", __func__, min_return_uclk_cycles);
- dml2_printf("DML::%s: min_return_fclk_cycles = %d\n", __func__, min_return_fclk_cycles);
- dml2_printf("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
- dml2_printf("DML::%s: --- END --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: min_return_uclk_cycles = %ld\n", __func__, min_return_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_fclk_cycles = %ld\n", __func__, min_return_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: --- END --- \n", __func__);
#endif
return (in_out_params->mode_lib->mp.PrefetchAndImmediateFlipSupported);
}
bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params)
{
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
bool result = dml_core_mode_programming(in_out_params);
- dml2_printf("DML::%s: result = %0d\n", __func__, result);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: result = %0d\n", __func__, result);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -12130,16 +12058,16 @@ void dml2_core_calcs_get_dpte_row_height(
unsigned int MacroTileHeight = is_plane1 ? MacroTileHeightC : MacroTileHeightY;
unsigned int PTEBufferSizeInRequests = is_plane1 ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
- dml2_printf("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
- dml2_printf("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
- dml2_printf("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
- dml2_printf("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
- dml2_printf("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
- dml2_printf("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
- dml2_printf("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
+ DML_LOG_VERBOSE("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
+ DML_LOG_VERBOSE("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
+ DML_LOG_VERBOSE("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
+ DML_LOG_VERBOSE("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
+ DML_LOG_VERBOSE("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
#endif
unsigned int dummy_integer[21];
@@ -12193,16 +12121,16 @@ void dml2_core_calcs_get_dpte_row_height(
CalculateVMAndRowBytes(&mode_lib->scratch.calculate_vm_and_row_bytes_params);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
+ DML_LOG_VERBOSE("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
#endif
}
static bool is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -12220,6 +12148,8 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -12246,11 +12176,11 @@ void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs
cursor_dlg_regs->dst_x_offset = (unsigned int) ((dst_x_offset > 0) ? dst_x_offset : 0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
- dml2_printf("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
- dml2_printf("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
+ DML_LOG_VERBOSE("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
#endif
cursor_dlg_regs->chunk_hdl_adjust = 3;
@@ -12286,7 +12216,7 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
double stored_swath_c_bytes;
bool is_phantom_pipe;
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
pixel_chunk_bytes = (unsigned int)(mode_lib->ip.pixel_chunk_size_kbytes * 1024);
min_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.min_pixel_chunk_size_bytes);
@@ -12329,19 +12259,19 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (sw_mode == dml2_sw_linear && display_cfg->gpuvm_enable) {
unsigned int p0_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_l(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
#endif
- DML2_ASSERT(p0_pte_row_height_linear >= 8);
+ DML_ASSERT(p0_pte_row_height_linear >= 8);
rq_regs->rq_regs_l.pte_row_height_linear = math_log2_approx(p0_pte_row_height_linear) - 3;
if (dual_plane) {
unsigned int p1_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_c(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
#endif
if (sw_mode == dml2_sw_linear) {
- DML2_ASSERT(p1_pte_row_height_linear >= 8);
+ DML_ASSERT(p1_pte_row_height_linear >= 8);
}
rq_regs->rq_regs_c.pte_row_height_linear = math_log2_approx(p1_pte_row_height_linear) - 3;
}
@@ -12375,12 +12305,12 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) {
detile_buf_plane1_addr = (unsigned int)(detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
#endif
} else {
detile_buf_plane1_addr = (unsigned int)(dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0); // 2/3 to luma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
#endif
}
}
@@ -12388,15 +12318,15 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
rq_regs->plane1_base_address = detile_buf_plane1_addr;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
- dml2_printf("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
- dml2_printf("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
- dml2_printf("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
+ DML_LOG_VERBOSE("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
#endif
- //dml2_printf_rq_regs_st(rq_regs);
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ //DML_LOG_VERBOSE_rq_regs_st(rq_regs);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
static void rq_dlg_get_dlg_reg(
@@ -12411,10 +12341,10 @@ static void rq_dlg_get_dlg_reg(
memset(l, 0, sizeof(struct dml2_core_shared_rq_dlg_get_dlg_reg_locals));
- dml2_printf("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
l->plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- DML2_ASSERT(l->plane_idx < DML2_MAX_PLANES);
+ DML_ASSERT(l->plane_idx < DML2_MAX_PLANES);
l->source_format = dml2_444_8;
l->odm_mode = dml2_odm_mode_bypass;
@@ -12444,18 +12374,18 @@ static void rq_dlg_get_dlg_reg(
l->pclk_freq_in_mhz = (double)l->timing->pixel_clock_khz / 1000;
l->ref_freq_to_pix_freq = l->refclk_freq_in_mhz / l->pclk_freq_in_mhz;
- dml2_printf("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
- dml2_printf("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
- dml2_printf("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
- dml2_printf("DML_DLG: %s: soc.refclk_mhz = %3.2f\n", __func__, mode_lib->soc.dchub_refclk_mhz);
- dml2_printf("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
+ DML_LOG_VERBOSE("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
+ DML_LOG_VERBOSE("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: soc.refclk_mhz = %d\n", __func__, mode_lib->soc.dchub_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
- DML2_ASSERT(l->refclk_freq_in_mhz != 0);
- DML2_ASSERT(l->pclk_freq_in_mhz != 0);
- DML2_ASSERT(l->ref_freq_to_pix_freq < 4.0);
+ DML_ASSERT(l->refclk_freq_in_mhz != 0);
+ DML_ASSERT(l->pclk_freq_in_mhz != 0);
+ DML_ASSERT(l->ref_freq_to_pix_freq < 4.0);
// Need to figure out which side of odm combine we're in
// Assume the pipe instance under the same plane is in order
@@ -12484,14 +12414,14 @@ static void rq_dlg_get_dlg_reg(
l->pipe_idx_in_combine = pipe_idx - l->first_pipe_idx_in_plane; // DML assumes the pipes in the same plane will have continuous indexing (i.e. plane 0 use pipe 0, 1, and plane 1 uses pipe 2, 3, etc.)
disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double)l->hblank_end + (double)l->pipe_idx_in_combine * (double)l->hactive / (double)l->odm_combine_factor) * l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
- dml2_printf("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
- dml2_printf("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
- dml2_printf("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
+ DML_LOG_VERBOSE("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
}
- dml2_printf("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
- DML2_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int)(l->ref_freq_to_pix_freq * math_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int)(l->ref_freq_to_pix_freq * (double)l->htotal * math_pow(2, 8));
@@ -12500,20 +12430,20 @@ static void rq_dlg_get_dlg_reg(
l->min_ttu_vblank = mode_lib->mp.MinTTUVBlank[mode_lib->mp.pipe_plane[pipe_idx]];
l->min_dst_y_next_start = (unsigned int)(mode_lib->mp.MIN_DST_Y_NEXT_START[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
- dml2_printf("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
l->vready_after_vcount0 = (unsigned int)(mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[mode_lib->mp.pipe_plane[pipe_idx]]);
disp_dlg_regs->vready_after_vcount0 = l->vready_after_vcount0;
- dml2_printf("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
+ DML_LOG_VERBOSE("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
l->dst_x_after_scaler = (unsigned int)(mode_lib->mp.DSTXAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
l->dst_y_after_scaler = (unsigned int)(mode_lib->mp.DSTYAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
- dml2_printf("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
l->dst_y_prefetch = mode_lib->mp.dst_y_prefetch[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_vm_vblank = mode_lib->mp.dst_y_per_vm_vblank[mode_lib->mp.pipe_plane[pipe_idx]];
@@ -12521,28 +12451,28 @@ static void rq_dlg_get_dlg_reg(
l->dst_y_per_vm_flip = mode_lib->mp.dst_y_per_vm_flip[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
if (l->dst_y_prefetch > 0 && l->dst_y_per_vm_vblank > 0 && l->dst_y_per_row_vblank > 0) {
- DML2_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
+ DML_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
}
l->vratio_pre_l = mode_lib->mp.VRatioPrefetchY[mode_lib->mp.pipe_plane[pipe_idx]];
l->vratio_pre_c = mode_lib->mp.VRatioPrefetchC[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
- dml2_printf("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
// Active
l->refcyc_per_line_delivery_pre_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
l->refcyc_per_line_delivery_pre_c = 0.0;
l->refcyc_per_line_delivery_c = 0.0;
@@ -12551,8 +12481,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_line_delivery_pre_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
}
disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int)(mode_lib->mp.Tdmdl_vm[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12561,8 +12491,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
l->refcyc_per_req_delivery_pre_c = 0.0;
l->refcyc_per_req_delivery_c = 0.0;
@@ -12570,16 +12500,16 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
}
// TTU - Cursor
- DML2_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
+ DML_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
// Assign to register structures
disp_dlg_regs->min_dst_y_next_start = (unsigned int)((double)l->min_dst_y_next_start * math_pow(2, 2));
- DML2_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
+ DML_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
disp_dlg_regs->dst_y_after_scaler = l->dst_y_after_scaler; // in terms of line
disp_dlg_regs->refcyc_x_after_scaler = (unsigned int)((double)l->dst_x_after_scaler * l->ref_freq_to_pix_freq); // in terms of refclk
@@ -12592,10 +12522,10 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->vratio_prefetch = (unsigned int)(l->vratio_pre_l * math_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int)(l->vratio_pre_c * math_pow(2, 19));
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(mode_lib->mp.TimePerVMGroupVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(mode_lib->mp.TimePerVMGroupFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12662,11 +12592,11 @@ static void rq_dlg_get_dlg_reg(
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->min_ttu_vblank = (unsigned int)(l->min_ttu_vblank * l->refclk_freq_in_mhz);
- // CHECK for HW registers' range, DML2_ASSERT or clamp
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
+ // CHECK for HW registers' range, DML_ASSERT or clamp
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(math_pow(2, 23) - 1);
@@ -12680,16 +12610,16 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1);
- DML2_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
- DML2_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
+ DML_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_l = (unsigned int)math_pow(2, 17) - 1;
}
if (l->dual_plane) {
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_c = (unsigned int)math_pow(2, 17) - 1;
}
}
@@ -12700,20 +12630,20 @@ static void rq_dlg_get_dlg_reg(
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(math_pow(2, 23) - 1);
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
if (l->dual_plane) {
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
}
@@ -12736,11 +12666,11 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co
arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
- dml2_printf("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
- dml2_printf("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
- dml2_printf("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
+ DML_LOG_VERBOSE("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
+ DML_LOG_VERBOSE("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
#endif
}
@@ -13013,10 +12943,10 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
- dml2_printf("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
- dml2_printf("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
- dml2_printf("DML::%s: vblank_reserved_time_us = %f\n", __func__, out->vblank_reserved_time_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
+ DML_LOG_VERBOSE("DML::%s: vblank_reserved_time_us = %u\n", __func__, out->vblank_reserved_time_us);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 4e502f0a6d20..bdee6ad7bc59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -1078,6 +1078,8 @@ struct dml2_core_calcs_mode_programming_locals {
enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
unsigned int lb_source_lines_l[DML2_MAX_PLANES];
unsigned int lb_source_lines_c[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
};
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index 2504d9c2ec34..7a220c0141c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -82,7 +82,7 @@ bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -145,7 +145,7 @@ bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -208,7 +208,7 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
val = 1;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -216,104 +216,104 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
@@ -358,9 +358,9 @@ void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_di
out_bpp[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
#endif
}
}
@@ -391,7 +391,7 @@ unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
#endif
return num_active_pipes;
}
@@ -452,7 +452,7 @@ unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
};
}
@@ -498,8 +498,8 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -511,7 +511,7 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -524,8 +524,8 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
#endif
return index;
}
@@ -533,32 +533,32 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
if (!clk_entry_found)
- DML2_ASSERT(clk_entry_found);
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if (dml2_core_utils_is_420(source_format) || dml2_core_utils_is_422_planar(source_format) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 15507926f3a4..f486b090bbfc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -754,6 +754,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -768,6 +770,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index f4b1a7d02d42..a265f254152c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
@@ -182,6 +182,10 @@ static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_
min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
+ min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dtbclk = (unsigned int)((double)min_table->max_clocks_khz.dtbclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+
min_table->max_clocks_khz.dcfclk = soc_bb->clk_table.dcfclk.clk_values_khz[soc_bb->clk_table.dcfclk.num_clk_values - 1];
min_table->max_clocks_khz.fclk = soc_bb->clk_table.fclk.clk_values_khz[soc_bb->clk_table.fclk.num_clk_values - 1];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index f50662b83296..d88b3e0082dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -659,7 +659,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
switch (i) {
case 1:
- DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -670,7 +670,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 2:
- DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -681,7 +681,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 3:
- DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -692,7 +692,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 4:
- DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
index dc2ce5e77f57..4a7c4c62111e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
@@ -761,7 +761,7 @@ bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache
total_mcaches_required--;
}
}
- dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
+ DML_LOG_VERBOSE("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
deleted file mode 100644
index c506667897c4..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_debug.h"
-
-int dml2_log_internal(const char *format, ...)
-{
- return 0;
-}
-
-int dml2_printf(const char *format, ...)
-{
-#ifdef _DEBUG
-#ifdef _DEBUG_PRINTS
- int result;
- va_list args;
- va_start(args, format);
-
- result = vprintf(format, args);
-
- va_end(args);
-
- return result;
-#else
- return 0;
-#endif
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index bfe6f236d2e4..b226225103c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -5,55 +5,62 @@
#ifndef __DML2_DEBUG_H__
#define __DML2_DEBUG_H__
-#ifndef DML2_ASSERT
-#define DML2_ASSERT(condition) ((void)0)
-#endif
+#include "os_types.h"
+#define DML_ASSERT(condition) ASSERT(condition)
+#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN
+#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__)
-/*
- * DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart.
- * DML_LOG_ERROR - unexpected but recoverable failures inside DML
- * DML_LOG_WARN - unexpected inputs or events to DML
- * DML_LOG_INFO - high level tracing of DML interfaces
- * DML_LOG_DEBUG - detailed tracing of DML internal components
- * DML_LOG_VERBOSE - detailed tracing of DML calculation procedure
- */
-#if !defined(DML_LOG_LEVEL)
-#if defined(_DEBUG) && defined(_DEBUG_PRINTS)
-/* for backward compatibility with old macros */
-#define DML_LOG_LEVEL 5
-#else
-#define DML_LOG_LEVEL 0
-#endif
-#endif
+/* ASSERT with message output */
+#define DML_ASSERT_MSG(condition, fmt, ...) \
+ do { \
+ if (!(condition)) { \
+ DML_LOG_ERROR("DML ASSERT hit in %s line %d\n", __func__, __LINE__); \
+ DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
+ DML_ASSERT(condition); \
+ } \
+ } while (0)
+
+/* fatal errors for unrecoverable DML states until a full reset */
+#define DML_LOG_LEVEL_FATAL 0
+/* unexpected but recoverable failures inside DML */
+#define DML_LOG_LEVEL_ERROR 1
+/* unexpected inputs or events to DML */
+#define DML_LOG_LEVEL_WARN 2
+/* high level tracing of DML interfaces */
+#define DML_LOG_LEVEL_INFO 3
+/* detailed tracing of DML internal components */
+#define DML_LOG_LEVEL_DEBUG 4
+/* detailed tracing of DML calculation procedure */
+#define DML_LOG_LEVEL_VERBOSE 5
-#define DML_LOG_FATAL(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#if DML_LOG_LEVEL >= 1
-#define DML_LOG_ERROR(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#ifndef DML_LOG_LEVEL
+#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT
+#endif /* #ifndef DML_LOG_LEVEL */
+
+#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR
+#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_ERROR(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 2
-#define DML_LOG_WARN(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN
+#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_WARN(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 3
-#define DML_LOG_INFO(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO
+#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_INFO(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 4
-#define DML_LOG_DEBUG(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG
+#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL("[DML DEBUG] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_DEBUG(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 5
-#define DML_LOG_VERBOSE(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE
+#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL("[DML VERBOSE] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
#endif
-
-int dml2_log_internal(const char *format, ...);
-int dml2_printf(const char *format, ...);
-
-#endif
+#endif /* __DML2_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index d8d01dceacdd..00688b9f1df4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -38,6 +38,12 @@ struct dml2_mcg_min_clock_table {
} max_clocks_khz;
struct {
+ unsigned int dispclk;
+ unsigned int dppclk;
+ unsigned int dtbclk;
+ } max_ss_clocks_khz;
+
+ struct {
unsigned int dprefclk;
unsigned int xtalclk;
unsigned int pcierefclk;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index a966abd40788..5f1b49a50049 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -1082,22 +1082,22 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
if (stream_disp_cfg_index >= disp_cfg_index_max)
continue;
- if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
- scratch.odm_info.odm_factor = 1;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
- scratch.odm_info.odm_factor = 2;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
- scratch.odm_info.odm_factor = 4;
- } else {
- ASSERT(false);
- scratch.odm_info.odm_factor = 1;
- }
-
+ if (ctx->architecture == dml2_architecture_20) {
+ if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
+ scratch.odm_info.odm_factor = 1;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
+ scratch.odm_info.odm_factor = 2;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
+ scratch.odm_info.odm_factor = 4;
+ } else {
+ ASSERT(false);
+ scratch.odm_info.odm_factor = 1;
+ }
+ } else if (ctx->architecture == dml2_architecture_21) {
/* After DML2.1 update, ODM interpretation needs to change and is no longer same as for DML2.0.
* This is not an issue with new resource management logic. This block ensure backcompat
* with legacy pipe management with updated DML.
* */
- if (ctx->architecture == dml2_architecture_21) {
if (ODMMode[stream_disp_cfg_index] == 1) {
scratch.odm_info.odm_factor = 1;
} else if (ODMMode[stream_disp_cfg_index] == 2) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index ab6baf269801..5de775fd8fce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -896,7 +896,7 @@ static void populate_dummy_dml_surface_cfg(struct dml_surface_cfg_st *out, unsig
out->SurfaceWidthC[location] = in->timing.h_addressable;
out->SurfaceHeightC[location] = in->timing.v_addressable;
out->PitchY[location] = ((out->SurfaceWidthY[location] + 127) / 128) * 128;
- out->PitchC[location] = 0;
+ out->PitchC[location] = 1;
out->DCCEnable[location] = false;
out->DCCMetaPitchY[location] = 0;
out->DCCMetaPitchC[location] = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index e89571874185..525b7d04bf84 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -663,7 +663,10 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_copy_clocks_to_dc_state(&out_clks, context);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
- memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
+ if (context->streams[0]->sink->link->dc->caps.is_apu)
+ dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.dml_core_ctx);
+ else
+ memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
//copy for deciding zstate use
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 785226945699..5100f269368e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -40,6 +40,7 @@ struct dc_sink;
struct dc_stream_state;
struct resource_context;
struct display_stream_compressor;
+struct dc_mcache_params;
// Configuration of the MALL on the SoC
struct dml2_soc_mall_info {
@@ -107,6 +108,7 @@ struct dml2_dc_callbacks {
unsigned int (*get_max_flickerless_instant_vtotal_increase)(
struct dc_stream_state *stream,
bool is_gaming);
+ bool (*allocate_mcache)(struct dc_state *context, const struct dc_mcache_params *mcache_params);
};
struct dml2_dc_svp_callbacks {
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index abf439e743f2..2d70586cef40 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -790,8 +790,7 @@ static bool dpp3_program_blnd_lut(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_blnd_lut(dpp_base, false);
+ dpp3_power_on_blnd_lut(dpp_base, false);
return false;
}
@@ -1204,8 +1203,7 @@ static bool dpp3_program_shaper(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_shaper(dpp_base, false);
+ dpp3_power_on_shaper(dpp_base, false);
return false;
}
@@ -1399,8 +1397,7 @@ static bool dpp3_program_3dlut(struct dpp *dpp_base,
if (params == NULL) {
dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_hdr3dlut(dpp_base, false);
+ dpp3_power_on_hdr3dlut(dpp_base, false);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index 62b7012cda43..f7a373a3d70a 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -138,7 +138,7 @@ bool dpp35_construct(
dpp->base.funcs = &dcn35_dpp_funcs;
// w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs
- if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10)
+ if (dpp->base.ctx->asic_id.hw_internal_rev < 0x40)
dpp->dispclk_r_gate_disable = true;
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index 75128fd34306..bd1b9aef6d5c 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -57,13 +57,6 @@ static const struct dsc_funcs dcn20_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
-
/* API functions (external or via structure->function_pointer) */
void dsc2_construct(struct dcn20_dsc *dsc,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index 1fb90b52b814..a9c04fc95bd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -457,6 +457,12 @@
type DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \
type DSCRM_DSC_FORWARD_EN_STATUS
+enum dsc_bits_per_comp {
+ DSC_BPC_8 = 8,
+ DSC_BPC_10 = 10,
+ DSC_BPC_12 = 12,
+ DSC_BPC_UNKNOWN
+};
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 4893b793fec0..4222679fd4c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -45,12 +45,6 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
/* API functions (external or via structure->function_pointer) */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index b099989d9364..942d9f0b6df2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -411,6 +411,20 @@ enum dc_irq_source dal_irq_get_rx_source(
}
}
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_DCI2C_RR_DDC1 +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config)
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
index 2546224b326a..e4496ad203b2 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
@@ -132,9 +132,9 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
// Init VMID 0 based on PA config
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
}
-
- dcn21_dchvm_init(hubbub);
-
+ if (!hubbub1->base.ctx->dc->config.skip_riommu_prefetch_wa) {
+ dcn21_dchvm_init(hubbub);
+ }
return hubbub1->num_vmid;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 5ed195377a6c..baed31611477 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -1032,7 +1032,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
- .hubp_clear_tiling = hubp2_clear_tiling,
+ .hubp_clear_tiling = hubp401_clear_tiling,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index 40ecebea1ba0..bee617ca0838 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+HWSS_DCE60 = dce60_hwseq.o
+
+AMD_DAL_HWSS_DCE60 = $(addprefix $(AMDDALPATH)/dc/hwss/dce60/,$(HWSS_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE60)
+endif
+
+###############################################################################
+
+HWSS_DCE80 = dce80_hwseq.o
+
+AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
+
+###############################################################################
+
HWSS_DCE = dce_hwseq.o
AMD_DAL_HWSS_DCE = $(addprefix $(AMDDALPATH)/dc/hwss/dce/,$(HWSS_DCE))
@@ -65,14 +83,6 @@ AMD_DAL_HWSS_DCE120 = $(addprefix $(AMDDALPATH)/dc/hwss/dce120/,$(HWSS_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE120)
-###############################################################################
-
-HWSS_DCE80 = dce80_hwseq.o
-
-AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 5656d10368ad..23bec5d25ed6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -2763,12 +2763,12 @@ static void dce110_enable_per_frame_crtc_position_reset(
}
-static void init_pipes(struct dc *dc, struct dc_state *context)
+static void dce110_init_pipes(struct dc *dc, struct dc_state *context)
{
// Do nothing
}
-static void init_hw(struct dc *dc)
+static void dce110_init_hw(struct dc *dc)
{
int i;
struct dc_bios *bp;
@@ -3327,7 +3327,7 @@ void dce110_disable_link_output(struct dc_link *link,
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
- .init_hw = init_hw,
+ .init_hw = dce110_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
.post_unlock_program_front_end = dce110_post_unlock_program_front_end,
@@ -3371,7 +3371,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
};
static const struct hwseq_private_funcs dce110_private_funcs = {
- .init_pipes = init_pipes,
+ .init_pipes = dce110_init_pipes,
.set_input_transfer_func = dce110_set_input_transfer_func,
.set_output_transfer_func = dce110_set_output_transfer_func,
.power_down = dce110_power_down,
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
index 44b56490e152..a08e9f9eec17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
-#include "dce60_hw_sequencer.h"
+#include "dce60_hwseq.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hwseq.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
index f3b2d8b60d5b..f3b2d8b60d5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 912f96323ed6..f9ee55998b6b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -94,6 +94,128 @@ static void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
+/*
+ * Delay until we passed busy-until-point to which we can
+ * do necessary locking/programming on consecutive full updates
+ */
+void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
+{
+ struct crtc_position position;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int vpos, frame_count;
+ uint32_t vupdate_start, vupdate_end, vblank_start;
+ unsigned int lines_to_vupdate, us_to_vupdate;
+ unsigned int us_per_line, us_vupdate;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ if (pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream)
+ return;
+
+ if (!pipe_ctx->wait_is_required)
+ return;
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ frame_count = tg->funcs->get_frame_count(tg);
+
+ if (frame_count - pipe_ctx->wait_frame_count > 2)
+ return;
+
+ vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;
+
+ if (vpos >= vupdate_start && vupdate_start >= vblank_start)
+ lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
+ else
+ lines_to_vupdate = vupdate_start - vpos;
+
+ us_per_line =
+ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+
+ if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
+ us_to_vupdate = 0;
+
+ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+
+ if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
+ //surface updates come in at high irql
+ pipe_ctx->wait_is_required = true;
+ return;
+ }
+
+ fsleep(us_to_vupdate + us_vupdate);
+
+ //clear
+ pipe_ctx->next_vupdate = 0;
+ pipe_ctx->wait_frame_count = 0;
+ pipe_ctx->wait_is_required = false;
+}
+
+/*
+ * On pipe unlock and programming, indicate pipe will be busy
+ * until some frame and line (vupdate), this is required for consecutive
+ * full updates, need to wait for updates
+ * to latch to try and program the next update
+ */
+void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ uint32_t vupdate_start, vupdate_end;
+ struct crtc_position position;
+ unsigned int vpos, cur_frame;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ ASSERT(optc1->max_frame_count != 0);
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ pipe_ctx->next_vupdate = vupdate_start;
+
+ cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (vpos < vupdate_start) {
+ pipe_ctx->wait_frame_count = cur_frame;
+ } else {
+ if (cur_frame + 1 > optc1->max_frame_count)
+ pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
+ else
+ pipe_ctx->wait_frame_count = cur_frame + 1;
+ }
+
+ pipe_ctx->wait_is_required = true;
+}
+
void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
@@ -2664,7 +2786,6 @@ void dcn10_update_visual_confirm_color(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
if (mpc->funcs->set_bg_color) {
- memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
index 42ffd1e1299c..57d30ea225f2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
@@ -50,6 +50,13 @@ void dcn10_optimize_bandwidth(
void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dcn10_wait_for_pipe_update_if_needed(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool is_surface_update_only);
+void dcn10_set_wait_for_update_needed_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 846c9c51f2d9..858288c3b1ac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -2053,7 +2053,7 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state) {
ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
@@ -2482,7 +2482,7 @@ bool dcn20_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
return false;
/* apply updated bandwidth parameters */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index be26c925fdfa..e68f21fd5f0f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -84,6 +84,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
+ struct dcn_dsc_state dsc_state = {0};
+
+ if (!dsc) {
+ DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+
+ if (dsc->funcs->dsc_read_state) {
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+ if (!dsc_state.dsc_fw_en) {
+ DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ }
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index cd0adf72b223..a0b05b9ef660 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1181,6 +1181,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
+ struct dce_hwseq *hws = stream->ctx->dc->hwseq;
two_pix_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
@@ -1201,7 +1202,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ if ((odm_combine_factor == 2) || (hws->funcs.is_dp_dig_pixel_rate_div_policy &&
+ hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx)))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 922b8d71cf1a..c814d957305a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -241,11 +241,6 @@ void dcn35_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
- if (res_pool->dccg->funcs->dccg_root_gate_disable_control) {
- for (i = 0; i < res_pool->pipe_count; i++)
- res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0);
- }
-
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -901,12 +896,18 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
-
+ /*make sure DPPCLK is on*/
+ dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true);
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
@@ -923,6 +924,7 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
// Program system aperture settings
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) =\n", __func__, dpp->inst);
if (!pipe_ctx->top_pipe
&& pipe_ctx->plane_state
@@ -938,6 +940,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dccg *dccg = dc->res_pool->dccg;
+
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
@@ -955,7 +959,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
-/*to do, need to support both case*/
+ dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false);
+
hubp->power_gated = true;
hubp->funcs->hubp_reset(hubp);
@@ -967,6 +972,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->top_pipe = NULL;
pipe_ctx->bottom_pipe = NULL;
pipe_ctx->plane_state = NULL;
+ //DC_LOG_DEBUG("%s: dpp_inst(%d)=\n", __func__, dpp->inst);
+
}
void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
@@ -1543,7 +1550,7 @@ static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
const struct dc *dc = pipe_ctx->stream->link->dc;
- if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false)
return false;
// Not necessary for MST configurations
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 6a82a865209c..a3ccf805bd16 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -168,6 +168,8 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn35_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 902a96940a01..58f2be2a326b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -158,10 +158,12 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.set_mcm_luts = dcn32_set_mcm_luts,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn351_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 3af6a3402b89..c4177a9a662f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -396,6 +396,249 @@ static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ct
}
}
+static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend)
+{
+ struct mpc *mpc = dc->res_pool->mpc;
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+
+ if (!pipe_ctx->plane_state)
+ return;
+
+ mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
+ pipe_ctx->plane_state->mcm_location = (bPostBlend) ?
+ MPCC_MOVABLE_CM_LOCATION_AFTER :
+ MPCC_MOVABLE_CM_LOCATION_BEFORE;
+}
+
+static void dc_get_lut_mode(
+ enum dc_cm2_gpu_mem_layout layout,
+ enum hubp_3dlut_fl_mode *mode,
+ enum hubp_3dlut_fl_addressing_mode *addr_mode)
+{
+ switch (layout) {
+ case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
+ *mode = hubp_3dlut_fl_mode_native_1;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
+ *mode = hubp_3dlut_fl_mode_native_2;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
+ *mode = hubp_3dlut_fl_mode_transform;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
+ break;
+ default:
+ *mode = hubp_3dlut_fl_mode_disable;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ }
+}
+
+static void dc_get_lut_format(
+ enum dc_cm2_gpu_mem_format dc_format,
+ enum hubp_3dlut_fl_format *format)
+{
+ switch (dc_format) {
+ case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
+ *format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
+ break;
+ case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
+ *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
+ break;
+ case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
+ *format = hubp_3dlut_fl_format_float_fp1_5_10;
+ break;
+ }
+}
+
+static void dc_get_lut_xbar(
+ enum dc_cm2_gpu_mem_pixel_component_order order,
+ enum hubp_3dlut_fl_crossbar_bit_slice *cr_r,
+ enum hubp_3dlut_fl_crossbar_bit_slice *y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice *cb_b)
+{
+ switch (order) {
+ case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
+ *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ break;
+ case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA:
+ *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
+}
+
+static void dc_get_lut_width(
+ enum dc_cm2_gpu_mem_size size,
+ enum hubp_3dlut_fl_width *width)
+{
+ switch (size) {
+ case DC_CM2_GPU_MEM_SIZE_333333:
+ *width = hubp_3dlut_fl_width_33;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_171717:
+ *width = hubp_3dlut_fl_width_17;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ *width = hubp_3dlut_fl_width_transformed;
+ break;
+ }
+}
+static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
+{
+ if (mpc->funcs->rmcm.update_3dlut_fast_load_select &&
+ mpc->funcs->rmcm.program_lut_read_write_control &&
+ hubp->funcs->hubp_program_3dlut_fl_addr &&
+ mpc->funcs->rmcm.program_bit_depth &&
+ hubp->funcs->hubp_program_3dlut_fl_mode &&
+ hubp->funcs->hubp_program_3dlut_fl_addressing_mode &&
+ hubp->funcs->hubp_program_3dlut_fl_format &&
+ hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
+ mpc->funcs->rmcm.program_bias_scale &&
+ hubp->funcs->hubp_program_3dlut_fl_crossbar &&
+ hubp->funcs->hubp_program_3dlut_fl_width &&
+ mpc->funcs->rmcm.update_3dlut_fast_load_select &&
+ mpc->funcs->rmcm.populate_lut &&
+ mpc->funcs->rmcm.program_lut_mode &&
+ hubp->funcs->hubp_enable_3dlut_fl &&
+ mpc->funcs->rmcm.enable_3dlut_fl)
+ return true;
+
+ return false;
+}
+
+bool dcn401_program_rmcm_luts(
+ struct hubp *hubp,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_cm2_transfer_func_source lut3d_src,
+ struct dc_cm2_func_luts *mcm_luts,
+ struct mpc *mpc,
+ bool lut_bank_a,
+ int mpcc_id)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ union mcm_lut_params m_lut_params;
+ enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable;
+ enum hubp_3dlut_fl_mode mode;
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+ enum hubp_3dlut_fl_format format = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
+ enum hubp_3dlut_fl_width width = 0;
+ struct dc *dc = hubp->ctx->dc;
+
+ bool bypass_rmcm_3dlut = false;
+ bool bypass_rmcm_shaper = false;
+
+ dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
+
+ /* 3DLUT */
+ switch (lut3d_src) {
+ case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
+ // Don't know what to do in this case.
+ //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
+ break;
+ case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width);
+ if (!dc_is_rmcm_3dlut_supported(hubp, mpc) ||
+ !mpc->funcs->rmcm.is_config_supported(width))
+ return false;
+
+ //0. disable fl on mpc
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF);
+
+ //1. power down the block
+ mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false);
+
+ //2. program RMCM
+ //2a. 3dlut reg programming
+ mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a,
+ (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id);
+
+ hubp->funcs->hubp_program_3dlut_fl_addr(hubp,
+ mcm_luts->lut3d_data.gpu_mem_params.addr);
+
+ mpc->funcs->rmcm.program_bit_depth(mpc,
+ mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+
+ // setting native or transformed mode,
+ dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
+
+ //these program the mcm 3dlut
+ hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
+
+ hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
+
+ //seems to be only for the MCM
+ dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format);
+ hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
+
+ mpc->funcs->rmcm.program_bias_scale(mpc,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale,
+ mpcc_id);
+ hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale);
+
+ dc_get_lut_xbar(
+ mcm_luts->lut3d_data.gpu_mem_params.component_order,
+ &crossbar_bit_slice_cr_r,
+ &crossbar_bit_slice_y_g,
+ &crossbar_bit_slice_cb_b);
+
+ hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
+ crossbar_bit_slice_cr_r,
+ crossbar_bit_slice_y_g,
+ crossbar_bit_slice_cb_b);
+
+ mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id);
+
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
+
+ //2b. shaper reg programming
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
+
+ if (mcm_luts->shaper->type == TF_TYPE_HWPWL) {
+ m_lut_params.pwl = &mcm_luts->shaper->pwl;
+ } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ ASSERT(false);
+ cm_helper_translate_curve_to_hw_format(
+ dc->ctx,
+ mcm_luts->shaper,
+ &dpp_base->regamma_params, true);
+ m_lut_params.pwl = &dpp_base->regamma_params;
+ }
+ if (m_lut_params.pwl) {
+ mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
+ mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id);
+ } else {
+ //RMCM 3dlut won't work without its shaper
+ return false;
+ }
+
+ //3. Select the hubp connected to this RMCM
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
+ mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id);
+
+ //4. power on the block
+ if (m_lut_params.pwl)
+ mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true);
+
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
void dcn401_populate_mcm_luts(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_cm2_func_luts mcm_luts,
@@ -407,21 +650,39 @@ void dcn401_populate_mcm_luts(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
union mcm_lut_params m_lut_params;
enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
- enum hubp_3dlut_fl_format format;
+ enum hubp_3dlut_fl_format format = 0;
enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_width width = 0;
enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
- bool is_17x17x17 = true;
bool rval;
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
+ //MCM - setting its location (Before/After) blender
+ //set to post blend (true)
+ dcn401_set_mcm_location_post_blend(
+ dc,
+ pipe_ctx,
+ mcm_luts.lut3d_data.mpc_mcm_post_blend);
+
+ //RMCM - 3dLUT+Shaper
+ if (mcm_luts.lut3d_data.rmcm_3dlut_enable) {
+ dcn401_program_rmcm_luts(
+ hubp,
+ pipe_ctx,
+ lut3d_src,
+ &mcm_luts,
+ mpc,
+ lut_bank_a,
+ mpcc_id);
+ }
+
/* 1D LUT */
if (mcm_luts.lut1d_func) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
@@ -442,7 +703,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
/* Shaper */
- if (mcm_luts.shaper) {
+ if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.shaper->pwl;
@@ -454,11 +715,11 @@ void dcn401_populate_mcm_luts(struct dc *dc,
m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
if (m_lut_params.pwl) {
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->mcm.populate_lut)
+ mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
}
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id);
}
/* 3DLUT */
@@ -467,6 +728,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (hubp->funcs->hubp_enable_3dlut_fl)
hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
+
if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
if (mpc->funcs->populate_lut)
@@ -476,16 +738,35 @@ void dcn401_populate_mcm_luts(struct dc *dc,
mpcc_id);
}
break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
+ case DC_CM2_GPU_MEM_SIZE_333333:
+ width = hubp_3dlut_fl_width_33;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_171717:
+ width = hubp_3dlut_fl_width_17;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ width = hubp_3dlut_fl_width_transformed;
+ break;
+ }
+
+ //check for support
+ if (mpc->funcs->mcm.is_config_supported &&
+ !mpc->funcs->mcm.is_config_supported(width))
+ break;
if (mpc->funcs->program_lut_read_write_control)
mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
if (mpc->funcs->program_lut_mode)
mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_3dlut_size)
- mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id);
+
if (hubp->funcs->hubp_program_3dlut_fl_addr)
hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
+
+ if (mpc->funcs->mcm.program_bit_depth)
+ mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+
switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
mode = hubp_3dlut_fl_mode_native_1;
@@ -512,7 +793,6 @@ void dcn401_populate_mcm_luts(struct dc *dc,
switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- default:
format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
break;
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
@@ -524,37 +804,37 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
if (hubp->funcs->hubp_program_3dlut_fl_format)
hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
- if (hubp->funcs->hubp_update_3dlut_fl_bias_scale)
+ if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
+ mpc->funcs->mcm.program_bias_scale) {
+ mpc->funcs->mcm.program_bias_scale(mpc,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
+ mpcc_id);
hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
-
- switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- default:
- crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
}
+ //navi 4x has a bug and r and blue are swapped and need to be worked around here in
+ //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
+ dc_get_lut_xbar(
+ mcm_luts.lut3d_data.gpu_mem_params.component_order,
+ &crossbar_bit_slice_cr_r,
+ &crossbar_bit_slice_y_g,
+ &crossbar_bit_slice_cb_b);
+
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
+ crossbar_bit_slice_cr_r,
crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b,
- crossbar_bit_slice_cr_r);
+ crossbar_bit_slice_cb_b);
+
+ if (mpc->funcs->mcm.program_lut_read_write_control)
+ mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
+
+ if (mpc->funcs->mcm.program_3dlut_size)
+ mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
- switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_171717:
- default:
- width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- width = hubp_3dlut_fl_width_transformed;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_width)
- hubp->funcs->hubp_program_3dlut_fl_width(hubp, width);
if (mpc->funcs->update_3dlut_fast_load_select)
mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
@@ -2081,7 +2361,7 @@ void dcn401_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state) {
if (pipe->plane_state->triplebuffer_flips)
BREAK_TO_DEBUGGER();
@@ -2371,7 +2651,7 @@ bool dcn401_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
return false;
/* apply updated bandwidth parameters */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 781cf0efccc6..ce65b4f6c672 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -109,4 +109,12 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+bool dcn401_program_rmcm_luts(
+ struct hubp *hubp,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_cm2_transfer_func_source lut3d_src,
+ struct dc_cm2_func_luts *mcm_luts,
+ struct mpc *mpc,
+ bool lut_bank_a,
+ int mpcc_id);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index c8b5ed834579..3a0795045bc6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -195,6 +195,8 @@ enum block_sequence_func {
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
+ /* This must be the last value in this enum, add new ones above */
+ HWSS_BLOCK_SEQUENCE_FUNC_COUNT
};
struct block_sequence {
@@ -202,6 +204,8 @@ struct block_sequence {
enum block_sequence_func func;
};
+#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES)
+
struct hw_sequencer_funcs {
void (*hardware_release)(struct dc *dc);
/* Embedded Display Related */
@@ -534,13 +538,13 @@ void set_drr_and_clear_adjust_pending(
struct drr_params *params);
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps);
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 22a5d4a03c98..09bc65c2fa23 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -183,6 +183,8 @@ struct hwseq_private_funcs {
struct dc_cm2_func_luts mcm_luts,
bool lut_bank_a);
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
+ void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
+ void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index b5afd8c3103d..f3696143590c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -26,6 +26,8 @@
#ifndef _CORE_STATUS_H_
#define _CORE_STATUS_H_
+#include "dc_hw_types.h"
+
enum dc_status {
DC_OK = 1,
@@ -56,6 +58,7 @@ enum dc_status {
DC_NO_LINK_ENC_RESOURCE = 26,
DC_FAIL_DP_PAYLOAD_ALLOCATION = 27,
DC_FAIL_DP_LINK_BANDWIDTH = 28,
+ DC_FAIL_HW_CURSOR_SUPPORT = 29,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d0021f25f3d8..0cf349cafb3e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -65,6 +65,7 @@ struct resource_pool;
struct dc_state;
struct resource_context;
struct clk_bw_params;
+struct dc_mcache_params;
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
@@ -78,8 +79,7 @@ struct resource_funcs {
/* Create a minimal link encoder object with no dc_link object
* associated with it. */
struct link_encoder *(*link_enc_create_minimal)(struct dc_context *ctx, enum engine_id eng_id);
-
- bool (*validate_bandwidth)(
+ enum dc_status (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
@@ -218,6 +218,11 @@ struct resource_funcs {
int (*get_power_profile)(const struct dc_state *context);
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx);
+ unsigned int (*get_max_hw_cursor_size)(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+ bool (*program_mcache_pipe_config)(struct dc_state *context,
+ const struct dc_mcache_params *mcache_params);
};
struct audio_support{
@@ -382,7 +387,9 @@ struct link_resource {
struct link_config {
struct dc_link_settings dp_link_settings;
+ struct dc_tunnel_settings dp_tunnel_settings;
};
+
union pipe_update_flags {
struct {
uint32_t enable : 1;
@@ -480,6 +487,10 @@ struct pipe_ctx {
struct pixel_rate_divider pixel_rate_divider;
/* pixels borrowed from hblank to hactive */
uint8_t hblank_borrow;
+ /* next vupdate */
+ uint32_t next_vupdate;
+ uint32_t wait_frame_count;
+ bool wait_is_required;
};
/* Data used for dynamic link encoder assignment.
@@ -507,7 +518,7 @@ struct resource_context {
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
bool is_mpc_3dlut_acquired[MAX_PIPES];
- /* solely used for build scalar data in dml2 */
+ /* used to build scalar data in dml2 and for edp backlight programming */
struct pipe_ctx temp_pipe;
};
@@ -630,7 +641,7 @@ struct dc_state {
*/
struct bw_context bw_ctx;
- struct block_sequence block_sequence[100];
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE];
unsigned int block_sequence_steps;
struct dc_dmub_cmd dc_dmub_cmd[10];
unsigned int dmub_cmd_count;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 221645c023b5..bac8febad69a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -199,6 +199,7 @@ enum dentist_divider_range {
CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
CLK_SR_DCN35(CLK5_spll_field_8), \
+ CLK_SR_DCN35(CLK6_spll_field_8), \
SR(DENTIST_DISPCLK_CNTL), \
#define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
@@ -307,7 +308,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK4_ALLOW_DS;
uint32_t CLK1_CLK5_ALLOW_DS;
uint32_t CLK5_spll_field_8;
-
+ uint32_t CLK6_spll_field_8;
};
struct clk_mgr_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 3a89cc0cffc1..6e303b81bfb0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -967,23 +967,6 @@ struct mpc_funcs {
*/
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
- /**
- * @get_3dlut_fast_load_status:
- *
- * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] mpcc_id
- * - [in/out] done
- * - [in/out] soft_underflow
- * - [in/out] hard_underflow
- *
- * Return:
- *
- * void
- */
- void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow);
/**
* @populate_lut:
@@ -1054,6 +1037,35 @@ struct mpc_funcs {
* void
*/
void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
+
+ struct {
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } mcm;
+
+ struct {
+ void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
+ void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_XABLE xable,
+ bool lut_bank_a, int mpcc_id);
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+
+ void (*power_on_shaper_3dlut)(struct mpc *mpc, uint32_t mpcc_id, bool power_on);
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } rmcm;
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 7f371cbb35cd..0d5a8358a778 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -68,6 +68,7 @@ struct optc {
int pstate_keepout;
struct dc_crtc_timing orginal_patched_timing;
enum signal_type signal;
+ uint32_t max_frame_count;
};
void optc1_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 2948a696ee12..7d16351bba99 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -207,6 +207,9 @@ struct link_service {
bool (*dp_decide_link_settings)(
struct dc_stream_state *stream,
struct dc_link_settings *link_setting);
+ void (*dp_decide_tunnel_settings)(
+ struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
enum dp_link_encoding (*mst_decide_link_encoding_format)(
const struct dc_link *link);
bool (*edp_decide_link_settings)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index a402df225a76..26cb1459b743 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -508,6 +508,10 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
initial_val, \
n, __VA_ARGS__)
+#define IX_REG_SET_SYNC(index, init_value, f1, v1) \
+ IX_REG_SET_N_SYNC(index, 1, init_value, \
+ FN(reg, f1), v1)
+
#define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2) \
IX_REG_SET_N_SYNC(index, 2, init_value, \
FN(reg, f1), v1,\
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 9458187b834d..a890f581f4e8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -32,6 +32,7 @@
#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define MEMORY_TYPE_HBM 2
+#define MAX_MCACHES 8
#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
@@ -65,6 +66,13 @@ struct resource_straps {
uint32_t audio_stream_number;
};
+struct dc_mcache_allocations {
+ int global_mcache_ids_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_plane1[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane1[MAX_MCACHES + 1];
+};
+
struct resource_create_funcs {
void (*read_dce_straps)(
struct dc_context *ctx, struct resource_straps *straps);
@@ -628,8 +636,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct dc_state *context,
struct pipe_ctx *pipe_ctx);
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
-
/* Get hw programming parameters container from pipe context
* @pipe_ctx: pipe context
* @dscl_prog_data: struct to hold programmable hw reg values
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 953f4a4dacad..33ce470e4c88 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -37,36 +37,9 @@
#include "ivsrcid/ivsrcid_vislands30.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
index 2c72074310c7..d777b85e70da 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
@@ -46,36 +46,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -391,5 +364,3 @@ struct irq_service *dal_irq_service_dce60_create(
dce60_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 49317934ef4f..3a9163acb49b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -37,36 +37,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -303,5 +276,3 @@ struct irq_service *dal_irq_service_dce80_create(
dce80_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 9ca28565a9d1..4ce9edd16344 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 916f0c974637..5847af0e66cb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn20(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 1d61d475d36f..6417011d2246 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -80,36 +80,9 @@ static enum dc_irq_source to_dal_irq_source_dcn201(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 42cdfe6c3538..71d2f065140b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -132,36 +132,9 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic
return DC_IRQ_SOURCE_INVALID;
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index a443a8abb1ea..2a4080bdcf6b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -139,36 +139,9 @@ static enum dc_irq_source to_dal_irq_source_dcn30(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -447,4 +420,3 @@ struct irq_service *dal_irq_service_dcn30_create(
dcn30_irq_construct(irq_service, init_data);
return irq_service;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
index 8ffc7e2c681a..624f1ac309f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
@@ -126,26 +126,9 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
index 262bb8b74b15..137caffae916 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
@@ -77,26 +77,9 @@ static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
index 53e78ae7eecf..921cb167d920 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
@@ -128,36 +128,9 @@ static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
index e0563e880432..0118fd6e5db0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_servi
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
index 2ef22299101a..adebfc888618 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
@@ -135,36 +135,9 @@ static enum dc_irq_source to_dal_irq_source_dcn315(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
index f0ac0aeeac51..e9e315c75d76 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn32(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -191,6 +164,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -259,6 +242,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -270,14 +260,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
-}
-
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -285,6 +267,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -387,21 +383,29 @@ irq_source_info_dcn32[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3)
};
static const struct irq_service_funcs irq_service_funcs_dcn32 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
index ea8c271171bc..79e5e8c137ca 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
@@ -127,36 +127,9 @@ static enum dc_irq_source to_dal_irq_source_dcn35(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
index 7ec8e0de2f01..163b8ee9ebf7 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
@@ -106,36 +106,9 @@ static enum dc_irq_source to_dal_irq_source_dcn351(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
index ea958628f8b8..f716ab0fd30e 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
@@ -105,36 +105,9 @@ static enum dc_irq_source to_dal_irq_source_dcn36(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
index b43c9524b0de..fd9bb1950c20 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
@@ -109,36 +109,9 @@ static enum dc_irq_source to_dal_irq_source_dcn401(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -171,6 +144,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -239,6 +222,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -250,13 +240,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
- }
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -264,6 +247,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -364,21 +361,29 @@ irq_source_info_dcn401[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3),
};
static const struct irq_service_funcs irq_service_funcs_dcn401 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index eca3d7ee7e4e..b595a11c5eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -41,6 +41,16 @@
#include "reg_helper.h"
#include "irq_service.h"
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+//HPD1_DC_HPD_INT_STATUS
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x10
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x4
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x100
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x8
#define CTX \
@@ -177,3 +187,57 @@ enum dc_irq_source dal_irq_service_to_irq_source(
src_id,
ext_id);
}
+
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD1_INT_STATUS,
+ DC_HPD1_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD1_INT_CONTROL,
+ DC_HPD1_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
index b178f85944cd..bbcef3d2fe33 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -82,4 +82,12 @@ void dal_irq_service_set_generic(
const struct irq_source_info *info,
bool enable);
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index 110f656d43ae..a2f7b933bebf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -161,6 +161,20 @@ enum dc_irq_source {
DC_IRQ_SOURCE_DPCX_TX_PHYE,
DC_IRQ_SOURCE_DPCX_TX_PHYF,
+ DC_IRQ_SOURCE_DC1_VLINE2,
+ DC_IRQ_SOURCE_DC2_VLINE2,
+ DC_IRQ_SOURCE_DC3_VLINE2,
+ DC_IRQ_SOURCE_DC4_VLINE2,
+ DC_IRQ_SOURCE_DC5_VLINE2,
+ DC_IRQ_SOURCE_DC6_VLINE2,
+
+ DC_IRQ_SOURCE_DCI2C_RR_DDC1,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC2,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC3,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC4,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC5,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC6,
+
DAL_IRQ_SOURCES_NUMBER
};
@@ -170,6 +184,8 @@ enum irq_type
IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
+ IRQ_TYPE_VLINE1 = DC_IRQ_SOURCE_DC1_VLINE1,
+ IRQ_TYPE_VLINE2 = DC_IRQ_SOURCE_DC1_VLINE2,
IRQ_TYPE_DCUNDERFLOW = DC_IRQ_SOURCE_DC1UNDERFLOW,
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index cc9191a5c9e6..9655e6fa53a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -611,6 +611,7 @@ static bool detect_dp(struct dc_link *link,
link->dpcd_caps.dongle_type = sink_caps->dongle_type;
link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
link->dpcd_caps.dpcd_rev.raw = 0;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = 0;
}
return true;
@@ -1007,21 +1008,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
- /*
- * If this is DP over USB4 link then we need to:
- * - Enable BW ALLOC support on DPtx if applicable
- */
- if (dc->config.usb4_bw_alloc_support) {
- if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
- /* update with non reduced link cap if bw allocation mode is supported */
- if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
- link->dpia_bw_alloc_config.nrd_max_lane_count) {
- link->reported_link_cap.link_rate =
- link->dpia_bw_alloc_config.nrd_max_link_rate;
- link->reported_link_cap.lane_count =
- link->dpia_bw_alloc_config.nrd_max_lane_count;
- }
- }
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
+ if (link_dpia_enable_usb4_dp_bw_alloc_mode(link) == false)
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc = false;
}
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 53c961f86d43..273a3be6d593 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -2374,7 +2374,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
deallocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -2442,7 +2442,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (link->connector_signal == SIGNAL_TYPE_EDP && dc->debug.psp_disabled_wa) {
/* reset internal save state to default since eDP is off */
enum dp_panel_mode panel_mode = dp_get_panel_mode(pipe_ctx->stream->link);
- /* since current psp not loaded, we need to reset it to default*/
+ /* since current psp not loaded, we need to reset it to default */
link->panel_mode = panel_mode;
}
}
@@ -2620,7 +2620,7 @@ void link_set_dpms_on(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_hblank_reduction_on_rx(pipe_ctx);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
allocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index f6b6b19e7481..1a04f4b74585 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -156,6 +156,7 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_get_encoding_format = link_dp_get_encoding_format;
link_srv->dp_should_enable_fec = dp_should_enable_fec;
link_srv->dp_decide_link_settings = link_decide_link_settings;
+ link_srv->dp_decide_tunnel_settings = link_decide_dp_tunnel_settings;
link_srv->mst_decide_link_encoding_format =
mst_decide_link_encoding_format;
link_srv->edp_decide_link_settings = edp_decide_link_settings;
@@ -464,6 +465,7 @@ static bool construct_phy(struct dc_link *link,
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_read_request = DC_IRQ_SOURCE_INVALID;
link->link_status.dpcd_caps = &link->dpcd_caps;
link->dc = init_params->dc;
@@ -514,6 +516,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_HDMI_TYPE_A:
link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ if (link->hpd_gpio)
+ link->irq_source_read_request =
+ dal_irq_get_read_request(link->hpd_gpio);
break;
case CONNECTOR_ID_SINGLE_LINK_DVID:
case CONNECTOR_ID_SINGLE_LINK_DVII:
@@ -653,7 +658,7 @@ static bool construct_phy(struct dc_link *link,
}
/* Look for device tag that matches connector signal,
- * CRT for rgb, LCD for other supported signal tyes
+ * CRT for rgb, LCD for other supported signal types
*/
if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
link->device_tag.dev_id))
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 21ee0d96c9d4..8f79881ad9f1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -158,6 +158,14 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count)
+{
+ /* Calculate offset for LTTPR closest to DPTX which is highest in the chain
+ * Offset is 0 for single LTTPR cases as base LTTPR DPCD addresses target LTTPR 1
+ */
+ return DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE * (lttpr_count - 1);
+}
+
uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
{
switch (bw) {
@@ -2013,11 +2021,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
/* Read DP tunneling information. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dpcd_get_tunneling_device_data(link);
- if (status != DC_OK)
- dm_error("%s: Read DP tunneling device data failed.\n", __func__);
- }
+ status = dpcd_get_tunneling_device_data(link);
+ if (status != DC_OK)
+ dm_error("%s: Read DP tunneling device data failed.\n", __func__);
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 0ce0af3ddbeb..940b147cc5d4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -48,6 +48,9 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
/* Convert PHY repeater count read from DPCD uint8_t. */
uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
+/* Calculate embedded LTTPR address offset for vendor-specific behaviour */
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count);
+
bool dp_is_sink_present(struct dc_link *link);
bool dp_is_lttpr_present(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 0d123e647652..22bfdced64ab 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -62,6 +62,36 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
+ dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling == false)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
+ dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) {
+ status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY,
+ dpcd_dp_tun_data, 1);
+
+ if (status != DC_OK)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0];
+ }
+
+ DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) "
+ "DPIA_BW_Alloc_support=%d "
+ "CM_BW_Alloc_support=%d ",
+ __func__, link->link_index,
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id,
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.bits.dpia_num,
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc,
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
+
status = core_link_read_dpcd(
link,
DP_USB4_ROUTER_TOPOLOGY_ID,
@@ -71,13 +101,6 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
- link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
- dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
- dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
- dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
-
for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
@@ -92,6 +115,7 @@ bool dpia_query_hpd_status(struct dc_link *link)
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.header.payload_bytes = sizeof(cmd.query_hpd.data);
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
@@ -119,3 +143,20 @@ bool dpia_query_hpd_status(struct dc_link *link)
return link->hpd_status;
}
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting)
+{
+ struct dc_link *link = stream->link;
+
+ memset(dp_tunnel_setting, 0, sizeof(*dp_tunnel_setting));
+
+ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT) || (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
+ dp_tunnel_setting->should_enable_dp_tunneling =
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling;
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support)
+ dp_tunnel_setting->should_use_dp_bw_allocation = true;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index 363f45a1a964..a61edfc9ca7a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -38,4 +38,10 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
* Returns true if HPD high.
*/
bool dpia_query_hpd_status(struct dc_link *link);
+
+/* Decide the DP tunneling settings based on the DPCD capabilities
+ */
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
+
#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index a254ead2f7e8..3af7564a84f1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -46,9 +46,10 @@
*/
static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA
- && link->hpd_status
- && link->dpia_bw_alloc_config.bw_alloc_enabled);
+ return (link && link->hpd_status
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
static void reset_bw_alloc_struct(struct dc_link *link)
@@ -141,7 +142,7 @@ static int get_non_reduced_max_lane_count(struct dc_link *link)
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
* for host router and dpia
*/
-static void init_usb4_bw_struct(struct dc_link *link)
+static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link)
{
reset_bw_alloc_struct(link);
@@ -282,49 +283,26 @@ static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
// ------------------------------------------------------------------
// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
{
bool ret = false;
- uint8_t response = 0,
- bw_support_dpia = 0,
- bw_support_cm = 0;
+ uint8_t val;
- if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status))
- goto out;
+ if (link->hpd_status) {
+ val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
- if (core_link_read_dpcd(
- link,
- DP_TUNNELING_CAPABILITIES,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_dpia = (response >> 7) & 1;
-
- if (core_link_read_dpcd(
- link,
- USB4_DRIVER_BW_CAPABILITY,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_cm = (response >> 7) & 1;
-
- /* Send request acknowledgment to Turn ON DPTX support */
- if (bw_support_cm && bw_support_dpia) {
-
- response = 0x80;
- if (core_link_write_dpcd(
- link,
- DPTX_BW_ALLOCATION_MODE_CONTROL,
- &response,
- sizeof(uint8_t)) != DC_OK) {
- DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
- } else {
- // SUCCESS Enabled DPtx BW Allocation Mode Support
- DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
+ if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
+
+ retrieve_usb4_dp_bw_allocation_info(link);
+
+ if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
- ret = true;
- init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ ret = true;
/*
* During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
@@ -332,11 +310,12 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
* to make the CM to release preallocation and update estimated BW correctly for
* all DPIAs per host router
*/
+ // TODO: Zero allocation can be removed once the MSFT CM fix has been released
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
- }
+ } else
+ DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
}
-out:
return ret;
}
@@ -378,7 +357,8 @@ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
*/
void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
{
- if (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dpia_bw_alloc_config.bw_alloc_enabled) {
+ if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpia_bw_alloc_config.bw_alloc_enabled) {
//1. Hot Plug
if (link->hpd_status && peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
@@ -401,7 +381,7 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
if (link_dp_is_bw_alloc_available(link))
link_dpia_send_bw_alloc_request(link, req_bw);
else
- DC_LOG_DEBUG("%s: Not able to send the BW Allocation request", __func__);
+ DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__);
}
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 6df9b946b00f..801965b5f9a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -43,13 +43,13 @@ enum bw_type {
};
/*
- * Enable BW Allocation Mode Support from the DP-Tx side
+ * Enable USB4 DP BW allocation mode
*
* @link: pointer to the dc_link struct instance
*
* return: SUCCESS or FAILURE
*/
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link);
/*
* Allocates only what the stream needs for bw, so if:
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 5be00e4ce10b..693477413347 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -229,6 +229,10 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
+ /* Increment desync error counter if a desync error is detected */
+ if (replay_configuration.bits.DESYNC_ERROR_STATUS)
+ link->replay_settings.replay_desync_error_fail_count++;
+
if (link->replay_settings.config.force_disable_desync_error_check)
return;
@@ -240,9 +244,6 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_configuration.raw,
sizeof(replay_configuration.raw));
- /* Update desync error counter */
- link->replay_settings.replay_desync_error_fail_count++;
-
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -351,7 +352,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->raw,
DP_SINK_STATUS - DP_SINK_COUNT + 1);
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
retval = core_link_read_dpcd(
link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
&irq_data->bytes.link_service_irq_esi0.raw, 1);
@@ -520,7 +521,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
dp_trace_link_loss_increment(link);
}
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ)
dp_handle_tunneling_irq(link);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index ef358afdfb65..2dc1a660e504 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -785,7 +785,6 @@ void override_training_settings(
lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
-
}
enum dc_dp_training_pattern decide_cr_training_pattern(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 5a5d48fadbf2..66d0fb1b9b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -142,6 +142,14 @@ void decide_8b_10b_training_settings(
lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
+ /* Some embedded LTTPRs rely on receiving TPS2 before LT to interop reliably with sensitive VGA dongles
+ * This allows these LTTPRs to minimize freq/phase and skew variation during lock and deskew sequences
+ */
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) ==
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2) {
+ lt_settings->lttpr_early_tps2 = true;
+ }
}
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -173,6 +181,42 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
return LTTPR_MODE_NON_LTTPR;
}
+static void set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t lttpr_count)
+{
+ /* Vendor-specific LTTPR early TPS2 sequence:
+ * 1. Output TPS2
+ * 2. Wait 400us
+ * 3. Set link settings as usual
+ * 4. Write TPS1 to DP_TRAINING_PATTERN_SET_PHY_REPEATERx targeting LTTPR closest to host
+ * 5. Wait 1ms
+ * 6. Begin link training as usual
+ * */
+
+ uint32_t closest_lttpr_address_offset = dp_get_closest_lttpr_offset(lttpr_count);
+
+ union dpcd_training_pattern dpcd_pattern = {0};
+
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = 1;
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE = 1;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS2. Wait 400us.\n", __func__);
+
+ dp_set_hw_training_pattern(link, link_res, DP_TRAINING_PATTERN_SEQUENCE_2, DPRX);
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+
+ udelay(400);
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + closest_lttpr_address_offset, &dpcd_pattern.raw, 1);
+
+ udelay(1000);
+ }
+
enum link_training_result perform_8b_10b_clock_recovery_sequence(
struct dc_link *link,
const struct link_resource *link_res,
@@ -383,7 +427,7 @@ enum link_training_result dp_perform_8b_10b_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t repeater_cnt;
+ uint8_t repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
uint8_t repeater_id;
uint8_t lane = 0;
@@ -391,14 +435,16 @@ enum link_training_result dp_perform_8b_10b_link_training(
start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
/* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, lt_settings);
+ if (lt_settings->lttpr_early_tps2)
+ set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(link, link_res, lt_settings, repeater_cnt);
+ else
+ dpcd_set_link_settings(link, lt_settings);
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
*/
- repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
repeater_id--) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 1e4adbc764ea..da74c2b5854f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -524,7 +524,7 @@ bool edp_set_backlight_level(const struct dc_link *link,
struct dc *dc = link->ctx->dc;
uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16;
uint32_t frame_ramp = backlight_level_params->frame_ramp;
- DC_LOGGER_INIT(link->ctx->logger);
+
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
@@ -1022,6 +1022,9 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
&alpm_config.raw,
sizeof(alpm_config.raw));
}
+
+ link->replay_settings.config.replay_video_conferencing_optimization_enabled = false;
+
return true;
}
@@ -1130,11 +1133,11 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
struct abm *abm = NULL;
for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx.stream;
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx->stream;
if (stream && stream->link == link) {
- abm = pipe_ctx.stream_res.abm;
+ abm = pipe_ctx->stream_res.abm;
break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index a0e9e9f0441a..b4cea2b8cb2a 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -370,275 +370,279 @@ void mpc32_program_shaper_luta_settings(
MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-}
-
-
-void mpc32_program_shaper_lutb_settings(
- struct mpc *mpc,
- const struct pwl_params *params,
- uint32_t mpcc_id)
-{
- const struct gamma_curve *curve;
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
-
- curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+}
+
+
+void mpc32_program_shaper_lutb_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id)
+{
+ const struct gamma_curve *curve;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index ad67197557ca..98cf0cbd59ba 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -47,16 +47,6 @@ void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp
REG_SET(MPCC_MCM_3DLUT_FAST_LOAD_SELECT[mpcc_id], 0, MPCC_MCM_3DLUT_FL_SEL, hubp_idx);
}
-void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_GET_3(MPCC_MCM_3DLUT_FAST_LOAD_STATUS[mpcc_id],
- MPCC_MCM_3DLUT_FL_DONE, done,
- MPCC_MCM_3DLUT_FL_SOFT_UNDERFLOW, soft_underflow,
- MPCC_MCM_3DLUT_FL_HARD_UNDERFLOW, hard_underflow);
-}
-
void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
@@ -618,7 +608,6 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.set_bg_color = mpc1_set_bg_color,
.set_movable_cm_location = mpc401_set_movable_cm_location,
.update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select,
- .get_3dlut_fast_load_status = mpc401_get_3dlut_fast_load_status,
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index ce6fbcf14d7a..8e35ebc603a9 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -241,23 +241,9 @@ void mpc401_update_3dlut_fast_load_select(
int mpcc_id,
int hubp_idx);
-void mpc401_get_3dlut_fast_load_status(
- struct mpc *mpc,
- int mpcc_id,
- uint32_t *done,
- uint32_t *soft_underflow,
- uint32_t *hard_underflow);
-
void mpc401_update_3dlut_fast_load_select(
struct mpc *mpc,
int mpcc_id,
int hubp_idx);
-void mpc401_get_3dlut_fast_load_status(
- struct mpc *mpc,
- int mpcc_id,
- uint32_t *done,
- uint32_t *soft_underflow,
- uint32_t *hard_underflow);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index b86fe2b094f8..4cfc6c0fa147 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -507,6 +507,7 @@ void dcn35_timing_generator_init(struct optc *optc1)
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
+ optc1->max_frame_count = 0xFFFFFF;
dcn35_timing_generator_set_fgcg(
optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index b8cddef6b3d2..5b42da8b79c2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+RESOURCE_DCE60 = dce60_resource.o
+
+AMD_DAL_RESOURCE_DCE60 = $(addprefix $(AMDDALPATH)/dc/resource/dce60/,$(RESOURCE_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE60)
+endif
+
+###############################################################################
+
+RESOURCE_DCE80 = dce80_resource.o
+
+AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
+
+###############################################################################
+
RESOURCE_DCE100 = dce100_resource.o
AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100))
@@ -57,14 +75,6 @@ AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOUR
AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120)
-###############################################################################
-
-RESOURCE_DCE80 = dce80_resource.o
-
-AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index e698543ec937..84f73fdb0f95 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -836,7 +836,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce100_validate_bandwidth(
+static enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -858,7 +858,7 @@ static bool dce100_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce100_validate_surface_sets(
@@ -1069,7 +1069,7 @@ static bool dce100_resource_construct(
pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
- dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.i2c_speed_in_khz_hdcp = 40;
dc->caps.max_cursor_size = 128;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dual_link_dvi = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index 035c6cfdaee5..f3d5baac11bf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -960,7 +960,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce110_validate_bandwidth(
+static enum dc_status dce110_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -1031,7 +1031,7 @@ static bool dce110_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 480a50967385..4225cae68c10 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -883,7 +883,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -952,7 +952,7 @@ bool dce112_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
enum dc_status resource_map_phy_clock_resources(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
index 1f57ebc6f9b4..6221d749246d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
@@ -42,7 +42,7 @@ enum dc_status dce112_validate_with_context(
struct dc_state *context,
struct dc_state *old_context);
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 889f314cac65..d9ffdded5ce1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -48,7 +48,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
-#include "dce60/dce60_hw_sequencer.h"
+#include "dce60/dce60_hwseq.h"
#include "dce100/dce100_resource.h"
#include "dce/dce_panel_cntl.h"
@@ -863,7 +863,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce60_validate_bandwidth(
+static enum dc_status dce60_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -885,7 +885,7 @@ static bool dce60_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce60_validate_surface_sets(
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
index 5d653a76b0b0..5d653a76b0b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 3d5113f010bb..bd5811f97531 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -869,7 +869,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce80_validate_bandwidth(
+static enum dc_status dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -891,7 +891,7 @@ static bool dce80_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce80_validate_surface_sets(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index e92f14d50adb..be4ade0853e9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -23,6 +23,7 @@
*
*/
+#include "core_status.h"
#include "dm_services.h"
#include "dc.h"
@@ -1125,7 +1126,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
-static bool dcn10_validate_bandwidth(
+static enum dc_status dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -1136,7 +1137,7 @@ static bool dcn10_validate_bandwidth(
voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
DC_FP_END();
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
@@ -1245,6 +1246,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && pool->stream_enc[i]->id ==
+ link->dpia_preferred_eng_id)
+ return pool->stream_enc[i];
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index e4eca3e32c1b..3405be07f5e3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2124,7 +2124,7 @@ validate_out:
return out;
}
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
@@ -2132,14 +2132,14 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
index 4cee3fa11a7f..c0e062c7407d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
@@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params(
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 4bd5c2278596..9ab01b65b177 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -923,7 +923,7 @@ validate_out:
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
-static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
@@ -931,14 +931,14 @@ static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_NOT_SUPPORTED;
}
static void dcn21_destroy_resource_pool(struct resource_pool **pool)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index f01ced015072..f631ae34e320 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1891,8 +1891,6 @@ static int get_refresh_rate(struct dc_state *context)
/* check if refresh rate at least 120hz */
timing = &context->streams[0]->timing;
- if (timing == NULL)
- return 0;
h_v_total = timing->h_total * timing->v_total;
if (h_v_total == 0)
@@ -2037,7 +2035,7 @@ void dcn30_calculate_wm_and_dlg(
DC_FP_END();
}
-bool dcn30_validate_bandwidth(struct dc *dc,
+enum dc_status dcn30_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -2094,7 +2092,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
index 8e6b8b7368fd..689d9bdace81 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
@@ -56,7 +56,7 @@ unsigned int dcn30_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
-bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
+enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate);
bool dcn30_internal_validate_bw(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index dddddbfef85f..7e0af5297dc4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1758,7 +1758,7 @@ dcn31_set_mcif_arb_params(struct dc *dc,
DC_FP_END();
}
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1813,7 +1813,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index 551ad912f7be..dd82815d7efe 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -37,7 +37,7 @@ struct dcn31_resource_pool {
struct resource_pool base;
};
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
void dcn31_calculate_wm_and_dlg(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 26becc4cb804..d96bc6cb73ad 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1694,7 +1694,7 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1750,7 +1750,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static struct resource_funcs dcn314_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
index 49ffe71018df..f8ba531d6342 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
@@ -39,7 +39,7 @@ struct dcn314_resource_pool {
struct resource_pool base;
};
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 944650cb13de..bb0dae0be5b8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -24,6 +24,7 @@
*
*/
+#include "dc_types.h"
#include "dm_services.h"
#include "dc.h"
@@ -1806,19 +1807,56 @@ validate_out:
return out;
}
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
else
- out = dml1_validate(dc, context, fast_validate);
- return out;
+ status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ };
+ }
+
+ if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ else
+ status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
int dcn32_populate_dml_pipes_from_context(
@@ -2042,6 +2080,18 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
DC_FP_END();
}
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ bool limit_cur_to_buf;
+
+ limit_cur_to_buf = dc_state_get_stream_subvp_cursor_limit(stream, state) &&
+ !stream->hw_cursor_req;
+
+ return limit_cur_to_buf ? dc->caps.max_buffered_cursor_size : dc->caps.max_cursor_size;
+}
+
static struct resource_funcs dcn32_res_pool_funcs = {
.destroy = dcn32_destroy_resource_pool,
.link_enc_create = dcn32_link_encoder_create,
@@ -2067,7 +2117,8 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2151,6 +2202,7 @@ static bool dcn32_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 1aa4ced29291..d60ed77eda80 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -98,7 +98,7 @@ void dcn32_add_phantom_pipes(struct dc *dc,
unsigned int pipe_cnt,
unsigned int index);
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
@@ -188,6 +188,10 @@ void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes);
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 38d76434683e..7db1f7a5613f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1624,7 +1624,8 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1709,6 +1710,7 @@ static bool dcn321_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index ffd2b816cd02..72c6cf047db0 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1732,7 +1732,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn35_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1743,13 +1743,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
fast_validate);
if (fast_validate)
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state)
@@ -1903,7 +1903,7 @@ static bool dcn35_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
/* Sequential ONO is based on ASIC. */
- if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
dc->caps.sequential_ono = true;
/* Use pipe context based otg sync logic */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 98f5bc1b929e..989a270f7dea 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1712,7 +1712,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn351_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1723,13 +1723,13 @@ static bool dcn351_validate_bandwidth(struct dc *dc,
fast_validate);
if (fast_validate)
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static struct resource_funcs dcn351_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index b6468573dc33..48e1f234185f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -1713,7 +1713,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn35_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1724,13 +1724,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
fast_validate);
if (fast_validate)
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1876,7 +1876,7 @@ static bool dcn36_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
/* Sequential ONO is based on ASIC. */
- if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
dc->caps.sequential_ono = true;
/* Use pipe context based otg sync logic */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 7436dfbdf927..e0e32975ca34 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -1642,16 +1642,52 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta
return DC_OK;
}
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status = DC_OK;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
+
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
- return out;
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ };
+ }
+
+ if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
void dcn401_prepare_mcache_programming(struct dc *dc,
@@ -1770,7 +1806,8 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
.get_power_profile = dcn401_get_power_profile,
- .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1846,8 +1883,9 @@ static bool dcn401_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 95;
dc->caps.i2c_speed_in_khz_hdcp = 95; /*1.4 w/a applied by default*/
- /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ /* used to set cursor pitch, so must be aligned to power of 2 (HW actually supported 78x78) */
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64;
dc->caps.cursor_not_scaled = true;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 4c259745d519..dc52a30991af 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -22,7 +22,7 @@ struct resource_pool *dcn401_create_resource_pool(
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state);
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 28348734d900..e0008c5f08ad 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -776,7 +776,7 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
* Do not bypass UV at 1:1 for cositing to be applied
*/
if (!enable_isharp) {
- if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one && !spl_in->basic_out.always_scale)
return SCL_MODE_SCALING_420_LUMA_BYPASS;
}
@@ -884,7 +884,7 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
/* Calculate number of tap with adaptive scaling off */
static void spl_get_taps_non_adaptive_scaler(
- struct spl_scratch *spl_scratch, const struct spl_taps *in_taps)
+ struct spl_scratch *spl_scratch, const struct spl_taps *in_taps, bool always_scale)
{
bool check_max_downscale = false;
@@ -944,15 +944,15 @@ static void spl_get_taps_non_adaptive_scaler(
spl_fixpt_from_fraction(6, 1));
SPL_ASSERT(check_max_downscale);
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz) && !always_scale)
spl_scratch->scl_data.taps.h_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert) && !always_scale)
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !always_scale)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !always_scale)
spl_scratch->scl_data.taps.v_taps_c = 1;
-
}
/* Calculate optimal number of taps */
@@ -965,13 +965,15 @@ static bool spl_get_optimal_number_of_taps(
unsigned int max_taps_y, max_taps_c;
unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- bool skip_easf = false;
+ bool skip_easf = false;
+ bool always_scale = spl_in->basic_out.always_scale;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
+
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
spl_scratch->scl_data.viewport.width > max_downscale_src_width) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -980,7 +982,7 @@ static bool spl_get_optimal_number_of_taps(
/* Disable adaptive scaler and sharpener when integer scaling is enabled */
if (spl_in->scaling_quality.integer_scaling) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -996,7 +998,7 @@ static bool spl_get_optimal_number_of_taps(
* taps = 4 for upscaling
*/
if (skip_easf)
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
else {
if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
@@ -1297,7 +1299,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_v) {
dscl_prog_data->easf_v_en = true;
dscl_prog_data->easf_v_ring = 0;
- dscl_prog_data->easf_v_sharp_factor = 0;
+ dscl_prog_data->easf_v_sharp_factor = 1;
dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode
/* 2-bit, BF3 chroma mode correction calculation mode */
@@ -1461,7 +1463,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
- dscl_prog_data->easf_h_sharp_factor = 0;
+ dscl_prog_data->easf_h_sharp_factor = 1;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =
@@ -1898,3 +1900,4 @@ bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out
spl_set_taps_data(dscl_prog_data, data);
return res;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 1c3949b24611..36a284305a70 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -480,6 +480,10 @@ enum sharpness_setting {
SHARPNESS_ZERO,
SHARPNESS_CUSTOM
};
+enum sharpness_range_source {
+ SHARPNESS_RANGE_DCN = 0,
+ SHARPNESS_RANGE_DCN_OVERRIDE
+};
struct spl_sharpness_range {
int sdr_rgb_min;
int sdr_rgb_max;
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
index 52d97918a3bd..ebf0287417e0 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
@@ -29,8 +29,6 @@ static inline unsigned long long spl_complete_integer_division_u64(
{
unsigned long long result;
- SPL_ASSERT(divisor);
-
result = spl_div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -196,8 +194,6 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
* Good idea to use Newton's method
*/
- SPL_ASSERT(arg.value);
-
return spl_fixpt_from_fraction(
spl_fixpt_one.value,
arg.value);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 4e0efff92dca..3f3fa1b6a69e 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -51,8 +51,8 @@
* for the cache windows.
*
* The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
- * for command submission. Commands can be queued via dmub_srv_cmd_queue()
- * and executed via dmub_srv_cmd_execute().
+ * for command submission. Commands can be queued via dmub_srv_fb_cmd_queue()
+ * and executed via dmub_srv_fb_cmd_execute().
*
* If the queue is full the dmub_srv_wait_for_idle() call can be used to
* wait until the queue has been cleared.
@@ -142,6 +142,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
DMUB_NOTIFICATION_DPIA_NOTIFICATION,
DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
+ DMUB_NOTIFICATION_FUSED_IO,
DMUB_NOTIFICATION_MAX
};
@@ -170,6 +171,13 @@ enum dmub_srv_power_state_type {
DMUB_POWER_STATE_D3 = 8
};
+/* enum dmub_inbox_cmd_interface type - defines default interface for host->dmub commands */
+enum dmub_inbox_cmd_interface_type {
+ DMUB_CMD_INTERFACE_DEFAULT = 0,
+ DMUB_CMD_INTERFACE_FB = 1,
+ DMUB_CMD_INTERFACE_REG = 2,
+};
+
/**
* struct dmub_region - dmub hw memory region
* @base: base address for region, must be 256 byte aligned
@@ -349,6 +357,21 @@ struct dmub_diagnostic_data {
uint8_t is_cw6_enabled : 1;
};
+struct dmub_srv_inbox {
+ /* generic status */
+ uint64_t num_submitted;
+ uint64_t num_reported;
+ union {
+ /* frame buffer mailbox status */
+ struct dmub_rb rb;
+ /* register mailbox status */
+ struct {
+ bool is_pending;
+ bool is_multi_pending;
+ };
+ };
+};
+
/**
* struct dmub_srv_base_funcs - Driver specific base callbacks
*/
@@ -422,6 +445,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub);
+ uint32_t (*emul_get_inbox1_wptr)(struct dmub_srv *dmub);
+
void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
bool (*is_supported)(struct dmub_srv *dmub);
@@ -462,18 +487,21 @@ struct dmub_srv_hw_funcs {
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
void (*subvp_save_surf_addr)(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
+
void (*send_reg_inbox0_cmd_msg)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
uint32_t (*read_reg_inbox0_rsp_int_status)(struct dmub_srv *dmub);
void (*read_reg_inbox0_cmd_rsp)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void (*write_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*clear_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
+
uint32_t (*read_reg_outbox0_rdy_int_status)(struct dmub_srv *dmub);
void (*write_reg_outbox0_rdy_int_ack)(struct dmub_srv *dmub);
void (*read_reg_outbox0_msg)(struct dmub_srv *dmub, uint32_t *msg);
void (*write_reg_outbox0_rsp)(struct dmub_srv *dmub, uint32_t *rsp);
uint32_t (*read_reg_outbox0_rsp_int_status)(struct dmub_srv *dmub);
- void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
void (*enable_reg_outbox0_rdy_int)(struct dmub_srv *dmub, bool enable);
};
@@ -493,6 +521,7 @@ struct dmub_srv_create_params {
enum dmub_asic asic;
uint32_t fw_version;
bool is_virtual;
+ enum dmub_inbox_cmd_interface_type inbox_type;
};
/**
@@ -521,8 +550,9 @@ struct dmub_srv {
const struct dmub_srv_dcn401_regs *regs_dcn401;
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
- struct dmub_rb inbox1_rb;
+ struct dmub_srv_inbox inbox1;
uint32_t inbox1_last_wptr;
+ struct dmub_srv_inbox reg_inbox0;
/**
* outbox1_rb is accessed without locks (dal & dc)
* and to be used only in dmub_srv_stat_get_notification()
@@ -542,6 +572,7 @@ struct dmub_srv {
struct dmub_fw_meta_info meta_info;
struct dmub_feature_caps feature_caps;
struct dmub_visual_confirm_color visual_confirm_color;
+ enum dmub_inbox_cmd_interface_type inbox_type;
enum dmub_srv_power_state_type power_state;
struct dmub_diagnostic_data debug;
@@ -566,11 +597,8 @@ struct dmub_notification {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
enum set_config_status sc_status;
- /**
- * DPIA notification command.
- */
- struct dmub_rb_cmd_dpia_notification dpia_notification;
struct dmub_rb_cmd_hpd_sense_notify_data hpd_sense_notify;
+ struct dmub_cmd_fused_request fused_request;
};
};
@@ -699,19 +727,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
/**
- * dmub_srv_sync_inbox1() - sync sw state with hw state
- * @dmub: the dmub service
- *
- * Sync sw state with hw state when resume from S0i3
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
-
-/**
- * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * dmub_srv_fb_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service
* @cmd: the command to queue
*
@@ -723,11 +739,11 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
* DMUB_STATUS_QUEUE_FULL - no remaining room in queue
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd);
/**
- * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * dmub_srv_fb_cmd_execute() - Executes a queued sequence to the dmub
* @dmub: the dmub service
*
* Begins execution of queued commands on the dmub.
@@ -736,7 +752,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub);
/**
* dmub_srv_wait_for_hw_pwr_up() - Waits for firmware hardware power up is completed
@@ -795,6 +811,23 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
uint32_t timeout_us);
/**
+ * dmub_srv_wait_for_pending() - Re-entrant wait for messages currently pending
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the commands queued prior to this call are complete.
+ * If interfaces remain busy due to additional work being submitted
+ * concurrently, this function will not continue to wait.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
* dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
* @dmub: the dmub service
* @timeout_us: the maximum number of microseconds to wait
@@ -892,9 +925,6 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
union dmub_fw_boot_options *option);
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd);
-
enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
bool skip);
@@ -959,26 +989,6 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
/**
- * dmub_srv_send_reg_inbox0_cmd() - send a dmub command and wait for the command
- * being processed by DMUB.
- * @dmub: The dmub service
- * @cmd: The dmub command being sent. If with_replay is true, the function will
- * update cmd with replied data.
- * @with_reply: true if DMUB reply needs to be copied back to cmd. false if the
- * cmd doesn't need to be replied.
- * @timeout_us: timeout in microseconds.
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_TIMEOUT - DMUB fails to process the command within the timeout
- * interval.
- */
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us);
-
-/**
* dmub_srv_set_power_state() - Track DC power state in dmub_srv
* @dmub: The dmub service
* @power_state: DC power state setting
@@ -990,4 +1000,71 @@ enum dmub_status dmub_srv_send_reg_inbox0_cmd(
*/
void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state);
+/**
+ * dmub_srv_reg_cmd_execute() - Executes provided command to the dmub
+ * @dmub: the dmub service
+ * @cmd: the command packet to be executed
+ *
+ * Executes a single command for the dmub.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd);
+
+
+/**
+ * dmub_srv_cmd_get_response() - Copies return data for command into buffer
+ * @dmub: the dmub service
+ * @cmd_rsp: response buffer
+ *
+ * Copies return data for command into buffer
+ */
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp);
+
+/**
+ * dmub_srv_sync_inboxes() - Sync inbox state
+ * @dmub: the dmub service
+ *
+ * Sync inbox state
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_wait_for_inbox_free() - Waits for space in the DMUB inbox to free up
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ * @num_free_required: number of free entries required
+ *
+ * Waits until the DMUB buffer is freed to the specified number.
+ * The maximum wait time is given in microseconds to prevent spinning
+ * forever.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required);
+
+/**
+ * dmub_srv_update_inbox_status() - Updates pending status for inbox & reg inbox0
+ * @dmub: the dmub service
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_HW_FAILURE - issue with HW programming
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub);
+
#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 1f5f4e3e49d4..57fa05bddb45 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -36,6 +36,9 @@
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
+#ifdef __forceinline
+#undef __forceinline
+#endif
#define __forceinline inline
/**
@@ -547,6 +550,11 @@ union replay_hw_flags {
* @is_alpm_initialized: Indicates whether ALPM is initialized
*/
uint32_t is_alpm_initialized : 1;
+
+ /**
+ * @alpm_mode: Indicates ALPM mode selected
+ */
+ uint32_t alpm_mode : 2;
} bitfields;
uint32_t u32All;
@@ -739,6 +747,14 @@ enum dmub_ips_disable_type {
DMUB_IPS_DISABLE_IPS2_Z10 = 4,
DMUB_IPS_DISABLE_DYNAMIC = 5,
DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6,
+ DMUB_IPS_DISABLE_Z8_RETENTION = 7,
+};
+
+enum dmub_ips_rcg_disable_type {
+ DMUB_IPS_RCG_ENABLE = 0,
+ DMUB_IPS0_RCG_DISABLE = 1,
+ DMUB_IPS1_RCG_DISABLE = 2,
+ DMUB_IPS_RCG_DISABLE = 3
};
#define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -817,11 +833,12 @@ enum dmub_shared_state_feature_id {
*/
union dmub_shared_state_ips_fw_signals {
struct {
- uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
+ uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */
uint32_t detection_required : 1; /**< 1 if detection is required */
- uint32_t reserved_bits : 28; /**< Reversed */
+ uint32_t ips1z8_commit: 1; /**< 1 if in IPS1 Z8 Retention */
+ uint32_t reserved_bits : 27; /**< Reversed */
} bits;
uint32_t all;
};
@@ -836,7 +853,10 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */
- uint32_t reserved_bits : 27; /**< Reversed bits */
+ uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */
+ uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */
+ uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */
+ uint32_t reserved_bits : 24; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -865,7 +885,9 @@ struct dmub_shared_state_ips_fw {
uint32_t ips1_exit_count; /**< Exit counter for IPS1 */
uint32_t ips2_entry_count; /**< Entry counter for IPS2 */
uint32_t ips2_exit_count; /**< Exit counter for IPS2 */
- uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */
+ uint32_t ips1_z8ret_entry_count; /**< Entry counter for IPS1 Z8 Retention */
+ uint32_t ips1_z8ret_exit_count; /**< Exit counter for IPS1 Z8 Retention */
+ uint32_t reserved[53]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
@@ -1253,6 +1275,10 @@ enum dmub_gpint_command {
* DESC: Setup debug configs.
*/
DMUB_GPINT__SETUP_DEBUG_MODE = 136,
+ /**
+ * DESC: Initiates IPS wake sequence.
+ */
+ DMUB_GPINT__IPS_DEBUG_WAKE = 137,
};
/**
@@ -2113,6 +2139,11 @@ union dmub_cmd_fams2_config {
} stream_v1; //v1
};
+struct dmub_fams2_config_v2 {
+ struct dmub_cmd_fams2_global_config global;
+ struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1
+};
+
/**
* DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy)
*/
@@ -2122,6 +2153,22 @@ struct dmub_rb_cmd_fams2 {
};
/**
+ * Indirect buffer descriptor
+ */
+struct dmub_ib_data {
+ union dmub_addr src; // location of indirect buffer in memory
+ uint16_t size; // indirect buffer size in bytes
+};
+
+/**
+ * DMUB rb command definition for commands passed over indirect buffer
+ */
+struct dmub_rb_cmd_ib {
+ struct dmub_cmd_header header;
+ struct dmub_ib_data ib_data;
+};
+
+/**
* enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/
enum dmub_cmd_idle_opt_type {
@@ -2144,6 +2191,11 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware notify power state.
*/
DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
+
+ /**
+ * DCN notify to release HW.
+ */
+ DMUB_CMD__IDLE_OPT_RELEASE_HW = 4,
};
/**
@@ -2636,7 +2688,11 @@ enum dp_hpd_type {
/**
* DP HPD short pulse
*/
- DP_IRQ
+ DP_IRQ = 1,
+ /**
+ * Failure to acquire DP HPD state
+ */
+ DP_NONE_HPD = 2
};
/**
@@ -2901,8 +2957,9 @@ enum dmub_cmd_fams_type {
*/
DMUB_CMD__FAMS_SET_MANUAL_TRIGGER = 3,
DMUB_CMD__FAMS2_CONFIG = 4,
- DMUB_CMD__FAMS2_DRR_UPDATE = 5,
- DMUB_CMD__FAMS2_FLIP = 6,
+ DMUB_CMD__FAMS2_IB_CONFIG = 5,
+ DMUB_CMD__FAMS2_DRR_UPDATE = 6,
+ DMUB_CMD__FAMS2_FLIP = 7,
};
/**
@@ -3609,6 +3666,12 @@ struct dmub_rb_cmd_psr_set_power_opt {
struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
};
+enum dmub_alpm_mode {
+ ALPM_AUXWAKE = 0,
+ ALPM_AUXLESS = 1,
+ ALPM_UNSUPPORTED = 2,
+};
+
/**
* Definition of Replay Residency GPINT command.
* Bit[0] - Residency mode for Revision 0
@@ -3742,6 +3805,15 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
};
+struct dmub_alpm_auxless_data {
+ uint16_t lfps_setup_ns;
+ uint16_t lfps_period_ns;
+ uint16_t lfps_silence_ns;
+ uint16_t lfps_t1_t2_override_us;
+ short lfps_t1_t2_offset_us;
+ uint8_t lttpr_count;
+};
+
/**
* Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -3812,6 +3884,10 @@ struct dmub_cmd_replay_copy_settings_data {
* Use FSM state for Replay power up/down
*/
uint8_t use_phy_fsm;
+ /**
+ * Use for AUX-less ALPM LFPS wake operation
+ */
+ struct dmub_alpm_auxless_data auxless_alpm_data;
};
/**
@@ -4360,6 +4436,11 @@ enum dmub_cmd_abm_type {
* Get the current ACE curve.
*/
DMUB_CMD__ABM_GET_ACE_CURVE = 10,
+
+ /**
+ * Get current histogram data
+ */
+ DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11,
};
struct abm_ace_curve {
@@ -4954,6 +5035,20 @@ enum dmub_abm_ace_curve_type {
};
/**
+ * enum dmub_abm_histogram_type - Histogram type.
+ */
+enum dmub_abm_histogram_type {
+ /**
+ * ACE curve as defined by the SW layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW = 0,
+ /**
+ * ACE curve as defined by the SW to HW translation interface layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW_IF = 1,
+};
+
+/**
* Definition of a DMUB_CMD__ABM_GET_ACE_CURVE command.
*/
struct dmub_rb_cmd_abm_get_ace_curve {
@@ -4989,6 +5084,41 @@ struct dmub_rb_cmd_abm_get_ace_curve {
};
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+struct dmub_rb_cmd_abm_get_histogram {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Address where Histogram should be copied.
+ */
+ union dmub_addr dest;
+
+ /**
+ * Type of Histogram being queried.
+ */
+ enum dmub_abm_histogram_type histogram_type;
+
+ /**
+ * Indirect buffer length.
+ */
+ uint16_t bytes;
+
+ /**
+ * eDP panel instance.
+ */
+ uint8_t panel_inst;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad;
+};
+
+/**
* Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
*/
struct dmub_rb_cmd_abm_save_restore {
@@ -5389,7 +5519,8 @@ struct dmub_cmd_fused_request {
struct dmub_cmd_fused_request_location_i2c {
uint8_t is_aux : 1; // False
uint8_t ddc_line : 3;
- uint8_t _reserved0 : 4;
+ uint8_t over_aux : 1;
+ uint8_t _reserved0 : 3;
uint8_t address;
uint8_t offset;
uint8_t length;
@@ -5687,6 +5818,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_get_ace_curve abm_get_ace_curve;
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+ struct dmub_rb_cmd_abm_get_histogram abm_get_histogram;
+
+ /**
* Definition of a DMUB_CMD__ABM_SET_EVENT command.
*/
struct dmub_rb_cmd_abm_set_event abm_set_event;
@@ -5817,8 +5953,11 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/
struct dmub_rb_cmd_assr_enable assr_enable;
+
struct dmub_rb_cmd_fams2 fams2_config;
+ struct dmub_rb_cmd_ib ib_fams2_config;
+
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;
@@ -5934,6 +6073,9 @@ static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb)
else
data_count = rb->capacity - (rb->rptr - rb->wrpt);
+ /* +1 because 1 entry is always unusable */
+ data_count += DMUB_RB_CMD_SIZE;
+
return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE;
}
@@ -5953,6 +6095,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
else
data_count = rb->capacity - (rb->rptr - rb->wrpt);
+ /* -1 because 1 entry is always unusable */
return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index e67f7c4784eb..2575dbc448f7 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -66,24 +66,20 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
const uint32_t timeout_us = 1 * 1000 * 1000; //1s
const uint32_t poll_delay_us = 1; //1us
uint32_t i = 0;
- uint32_t in_reset, scratch, pwait_mode;
+ uint32_t enabled, in_reset, scratch, pwait_mode;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL,
+ DMCUB_ENABLE, &enabled);
+ REG_GET(DMCUB_CNTL2,
+ DMCUB_SOFT_RESET, &in_reset);
- if (in_reset == 0) {
+ if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
- break;
-
- udelay(poll_delay_us);
- }
-
for (; i < timeout_us; i++) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
@@ -517,28 +513,69 @@ void dmub_dcn401_send_reg_inbox0_cmd_msg(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd)
{
uint32_t *dwords = (uint32_t *)cmd;
-
+ int32_t payload_size_bytes = cmd->cmd_common.header.payload_bytes;
+ uint32_t msg_index;
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[0]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[1]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[2]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[3]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[4]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[5]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[6]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[7]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[8]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[9]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[10]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[11]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[12]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[13]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[14]);
+ /* read remaining data based on payload size */
+ for (msg_index = 0; msg_index < 15; msg_index++) {
+ if (payload_size_bytes <= msg_index * 4) {
+ break;
+ }
+
+ switch (msg_index) {
+ case 0:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[msg_index + 1]);
+ break;
+ case 1:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[msg_index + 1]);
+ break;
+ case 2:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[msg_index + 1]);
+ break;
+ case 3:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[msg_index + 1]);
+ break;
+ case 4:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[msg_index + 1]);
+ break;
+ case 5:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[msg_index + 1]);
+ break;
+ case 6:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[msg_index + 1]);
+ break;
+ case 7:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[msg_index + 1]);
+ break;
+ case 8:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[msg_index + 1]);
+ break;
+ case 9:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[msg_index + 1]);
+ break;
+ case 10:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[msg_index + 1]);
+ break;
+ case 11:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[msg_index + 1]);
+ break;
+ case 12:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[msg_index + 1]);
+ break;
+ case 13:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[msg_index + 1]);
+ break;
+ case 14:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[msg_index + 1]);
+ break;
+ }
+ }
+
/* writing to INBOX RDY register will trigger DMUB REG INBOX0 RDY
* interrupt.
*/
- REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[15]);
+ REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[0]);
}
uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub)
@@ -556,30 +593,39 @@ void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- dwords[0] = REG_READ(DMCUB_REG_INBOX0_MSG0);
- dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG1);
- dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG2);
- dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG3);
- dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG4);
- dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG5);
- dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG6);
- dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG7);
- dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG8);
- dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG9);
- dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG10);
- dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG11);
- dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG12);
- dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG13);
- dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG14);
- dwords[15] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[0] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG0);
+ dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG1);
+ dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG2);
+ dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG3);
+ dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG4);
+ dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG5);
+ dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG6);
+ dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG7);
+ dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG8);
+ dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG9);
+ dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG10);
+ dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG11);
+ dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG12);
+ dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG13);
+ dwords[15] = REG_READ(DMCUB_REG_INBOX0_MSG14);
}
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 1);
+}
+
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
+{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 0);
}
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
+{
+ REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
+}
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK, 1);
@@ -604,11 +650,6 @@ uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub)
return status;
}
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
-{
- REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
-}
-
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN, enable ? 1:0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
index c35be52676f6..88c3a44d67d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
@@ -277,11 +277,13 @@ uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_outbox0_msg(struct dmub_srv *dmub, uint32_t *msg);
void dmub_dcn401_write_reg_outbox0_rsp(struct dmub_srv *dmub, uint32_t *msg);
uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub);
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable);
uint32_t dmub_dcn401_read_reg_outbox0_rdy_int_status(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index ae8133816b43..acca7943a8c8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -157,6 +157,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
+ /* default to specifying now inbox type */
+ enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT;
+
switch (asic) {
case DMUB_ASIC_DCN20:
case DMUB_ASIC_DCN21:
@@ -395,10 +398,15 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn401_get_current_time;
funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data;
+
funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg;
funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status;
funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp;
funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack;
+ funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack;
+ funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
+ default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now
+
funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack;
funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg;
funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp;
@@ -411,6 +419,20 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
return false;
}
+ /* set default inbox type if not overriden */
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) {
+ if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) {
+ /* use default inbox type as specified by DCN rev */
+ dmub->inbox_type = default_inbox_type;
+ } else if (funcs->send_reg_inbox0_cmd_msg) {
+ /* prefer reg as default inbox type if present */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_REG;
+ } else {
+ /* use fb as fallback */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_FB;
+ }
+ }
+
return true;
}
@@ -426,6 +448,7 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
dmub->asic = params->asic;
dmub->fw_version = params->fw_version;
dmub->is_virtual = params->is_virtual;
+ dmub->inbox_type = params->inbox_type;
/* Setup asic dependent hardware funcs. */
if (!dmub_srv_hw_setup(dmub, params->asic)) {
@@ -695,7 +718,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
inbox1.base = cw4.region.base;
inbox1.top = cw4.region.base + DMUB_RB_SIZE;
outbox1.base = inbox1.top;
- outbox1.top = cw4.region.top;
+ outbox1.top = inbox1.top + DMUB_RB_SIZE;
cw5.offset.quad_part = tracebuff_fb->gpu_addr;
cw5.region.base = DMUB_CW5_BASE;
@@ -737,7 +760,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
rb_params.ctx = dmub;
rb_params.base_address = mail_fb->cpu_addr;
rb_params.capacity = DMUB_RB_SIZE;
- dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+ dmub_rb_init(&dmub->inbox1.rb, &rb_params);
// Initialize outbox1 ring buffer
rb_params.ctx = dmub;
@@ -768,27 +791,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
-{
- if (!dmub->sw_init)
- return DMUB_STATUS_INVALID;
-
- if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
- uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
-
- if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
- return DMUB_STATUS_HW_FAILURE;
- } else {
- dmub->inbox1_rb.rptr = rptr;
- dmub->inbox1_rb.wrpt = wptr;
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
- }
- }
-
- return DMUB_STATUS_OK;
-}
-
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
@@ -799,8 +801,13 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
/* mailboxes have been reset in hw, so reset the sw state as well */
dmub->inbox1_last_wptr = 0;
- dmub->inbox1_rb.wrpt = 0;
- dmub->inbox1_rb.rptr = 0;
+ dmub->inbox1.rb.wrpt = 0;
+ dmub->inbox1.rb.rptr = 0;
+ dmub->inbox1.num_reported = 0;
+ dmub->inbox1.num_submitted = 0;
+ dmub->reg_inbox0.num_reported = 0;
+ dmub->reg_inbox0.num_submitted = 0;
+ dmub->reg_inbox0.is_pending = 0;
dmub->outbox0_rb.wrpt = 0;
dmub->outbox0_rb.rptr = 0;
dmub->outbox1_rb.wrpt = 0;
@@ -811,7 +818,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd)
{
if (!dmub->hw_init)
@@ -820,18 +827,20 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (dmub->power_state != DMUB_POWER_STATE_D0)
return DMUB_STATUS_POWER_STATE_D3;
- if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
- dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
+ if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity ||
+ dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
}
- if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) {
+ dmub->inbox1.num_submitted++;
return DMUB_STATUS_OK;
+ }
return DMUB_STATUS_QUEUE_FULL;
}
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub)
{
struct dmub_rb flush_rb;
@@ -846,13 +855,13 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
* been flushed to framebuffer memory. Otherwise DMCUB might
* read back stale, fully invalid or partially invalid data.
*/
- flush_rb = dmub->inbox1_rb;
+ flush_rb = dmub->inbox1.rb;
flush_rb.rptr = dmub->inbox1_last_wptr;
dmub_rb_flush_pending(&flush_rb);
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt);
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
return DMUB_STATUS_OK;
}
@@ -910,26 +919,84 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub)
+{
+ if (dmub->reg_inbox0.is_pending) {
+ dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (!dmub->reg_inbox0.is_pending) {
+ /* ack the rsp interrupt */
+ if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack)
+ dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+
+ /* only update the reported count if commands aren't being batched */
+ if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) {
+ dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted;
+ }
+ }
+ }
+}
+
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+ struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0;
+ struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1;
+ const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0;
+ const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1;
+
+ if (!dmub->hw_init ||
+ !dmub->hw_funcs.get_inbox1_wptr)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; i += polling_interval_us) {
+ scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+ scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending &&
+ dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ /* check current HW state first, but use command submission vs reported as a fallback */
+ if ((dmub_rb_empty(&scratch_inbox1.rb) ||
+ inbox1->num_reported >= scratch_inbox1.num_submitted) &&
+ (!scratch_reg_inbox0.is_pending ||
+ reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted))
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
- uint32_t i, rptr;
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
- for (i = 0; i <= timeout_us; ++i) {
- rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
- if (rptr > dmub->inbox1_rb.capacity)
- return DMUB_STATUS_HW_FAILURE;
+ if (status != DMUB_STATUS_OK)
+ return status;
- dmub->inbox1_rb.rptr = rptr;
-
- if (dmub_rb_empty(&dmub->inbox1_rb))
+ /* check for idle */
+ if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending)
return DMUB_STATUS_OK;
- udelay(1);
+ udelay(polling_interval_us);
}
return DMUB_STATUS_TIMEOUT;
@@ -1040,35 +1107,6 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd)
-{
- enum dmub_status status = DMUB_STATUS_OK;
-
- // Queue command
- status = dmub_srv_cmd_queue(dmub, cmd);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Execute command
- status = dmub_srv_cmd_execute(dmub);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Wait for DMUB to process command
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Copy data back from ring buffer into command
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd);
-
- return status;
-}
-
static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
void *entry)
{
@@ -1160,47 +1198,162 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_
}
}
+void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+{
+ if (!dmub || !dmub->hw_init)
+ return;
+
+ dmub->power_state = dmub_srv_power_state;
+}
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us)
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd)
{
- uint32_t rsp_ready = 0;
- uint32_t i;
+ uint32_t num_pending = 0;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg ||
+ !dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported)
+ num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported;
+ else
+ /* num_submitted wrapped */
+ num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY -
+ (dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted);
+
+ if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY)
+ return DMUB_STATUS_QUEUE_FULL;
+
+ /* clear last rsp ack and send message */
+ dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub);
dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready)
- break;
- udelay(1);
+ dmub->reg_inbox0.num_submitted++;
+ dmub->reg_inbox0.is_pending = true;
+ dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending;
+
+ return DMUB_STATUS_OK;
+}
+
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp)
+{
+ if (dmub) {
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG &&
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp) {
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp);
+ } else {
+ dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp);
+ }
}
- if (rsp_ready == 0)
- return DMUB_STATUS_TIMEOUT;
+}
+
+static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->sw_init)
+ return DMUB_STATUS_INVALID;
- if (with_reply)
- dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd);
+ dmub->reg_inbox0.is_pending = 0;
+ dmub->reg_inbox0.is_multi_pending = 0;
- dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+ return DMUB_STATUS_OK;
+}
- /* wait for rsp int status is cleared to initial state before exit */
- for (; i <= timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready == 0)
- break;
- udelay(1);
+static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) {
+ return DMUB_STATUS_HW_FAILURE;
+ } else {
+ dmub->inbox1.rb.rptr = rptr;
+ dmub->inbox1.rb.wrpt = wptr;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
+ }
}
- ASSERT(rsp_ready == 0);
return DMUB_STATUS_OK;
}
-void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub)
{
- if (!dmub || !dmub->hw_init)
- return;
+ enum dmub_status status;
- dmub->power_state = dmub_srv_power_state;
+ status = dmub_srv_sync_reg_inbox0(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ status = dmub_srv_sync_inbox1(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required)
+{
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
+
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ /* check for space in inbox1 */
+ if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required)
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
+{
+ uint32_t rptr;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ /* update inbox1 state */
+ rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ if (dmub->inbox1.rb.rptr > rptr) {
+ /* rb wrapped */
+ dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ } else {
+ dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ }
+ dmub->inbox1.rb.rptr = rptr;
+
+ /* update reg_inbox0 */
+ dmub_srv_update_reg_inbox0_status(dmub);
+
+ return DMUB_STATUS_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index cce887cefc01..567c5b1aeb7a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -95,23 +95,6 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
case DMUB_OUT_CMD__DPIA_NOTIFICATION:
notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
notify->link_index = cmd.dpia_notification.payload.header.instance;
-
- if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) {
-
- notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw;
- notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw;
-
- if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed)
- notify->result = DPIA_BW_REQ_FAILED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded)
- notify->result = DPIA_BW_REQ_SUCCESS;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed)
- notify->result = DPIA_EST_BW_CHANGED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed)
- notify->result = DPIA_BW_ALLOC_CAPS_CHANGED;
- }
break;
case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
@@ -119,6 +102,10 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
&cmd.hpd_sense_notify.data,
sizeof(cmd.hpd_sense_notify.data));
break;
+ case DMUB_OUT_CMD__FUSED_IO:
+ notify->type = DMUB_NOTIFICATION_FUSED_IO;
+ dmub_memcpy(&notify->fused_request, &cmd.fused_io.request, sizeof(cmd.fused_io.request));
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
index 7e3240e73c1f..63813009a3a6 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -86,6 +86,9 @@ enum dc_irq_source dal_irq_get_source(
enum dc_irq_source dal_irq_get_rx_source(
const struct gpio *irq);
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq);
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config);
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 1867aac57cf2..da74ed66c8f9 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -89,6 +89,8 @@ struct link_training_settings {
bool enhanced_framing;
enum lttpr_mode lttpr_mode;
+ bool lttpr_early_tps2;
+
/* disallow different lanes to have different lane settings */
bool disallow_per_lane_settings;
/* dpcd lane settings will always use the same hw lane settings
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 55c7d873175f..a37634942b07 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -386,6 +386,7 @@ enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 1d41dd58f6bc..bb8ae80b37f8 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -452,21 +452,12 @@ out:
return status;
}
-static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
- event_ctx->unexpected_event = 1;
- goto out;
- }
-
- if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
- &input->lc_init_prepare, &status,
- hdcp, "lc_init_prepare"))
- goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
&input->lc_init_write, &status,
hdcp, "lc_init_write"))
@@ -482,6 +473,48 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
&input->l_prime_read, &status,
hdcp, "l_prime_read"))
goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw,
+ &input->l_prime_read, &status,
+ hdcp, "l_prime_read"))
+ goto out;
+
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
+ && hdcp->config.ddc.funcs.atomic_write_poll_read_aux
+ && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
+ &input->lc_init_prepare, &status,
+ hdcp, "lc_init_prepare"))
+ goto out;
+
+ status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
&input->l_prime_validation, &status,
hdcp, "l_prime_validation"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index c5f6c11de7e5..89ffb89e1932 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -184,17 +184,28 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
break;
- case H2_A2_LOCALITY_CHECK:
+ case H2_A2_LOCALITY_CHECK: {
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
+ && !adjust->hdcp2.force_sw_locality_check;
+
+ /*
+ * 1A-05: consider disconnection after LC init a failure
+ * 1A-13-1: consider invalid l' a failure
+ * 1A-13-2: consider l' timeout a failure
+ */
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_available_poll != PASS ||
- input->l_prime_read != PASS) {
- /*
- * 1A-05: consider disconnection after LC init a failure
- * 1A-13-1: consider invalid l' a failure
- * 1A-13-2: consider l' timeout a failure
- */
+ (!use_fw && input->lc_init_write != PASS) ||
+ (!use_fw && input->l_prime_available_poll != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->l_prime_read != PASS) {
+ if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
+ adjust->hdcp2.force_sw_locality_check = true;
+ callback_in_ms(0, output);
+ break;
+ }
+
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
@@ -205,6 +216,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
+ }
case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
@@ -498,14 +510,25 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
break;
- case D2_A2_LOCALITY_CHECK:
+ case D2_A2_LOCALITY_CHECK: {
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux
+ && !adjust->hdcp2.force_sw_locality_check;
+
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_read != PASS) {
+ (!use_fw && input->lc_init_write != PASS)) {
/* 1A-12: consider invalid l' a failure */
fail_and_restart_in_ms(0, &status, output);
break;
+ } else if (input->l_prime_read != PASS) {
+ if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
+ adjust->hdcp2.force_sw_locality_check = true;
+ callback_in_ms(0, output);
+ break;
+ }
+
+ fail_and_restart_in_ms(0, &status, output);
+ break;
} else if (input->l_prime_validation != PASS) {
callback_in_ms(0, output);
increment_stay_counter(hdcp);
@@ -514,6 +537,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
+ }
case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index 6e064e6ae949..2e6408579194 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -688,3 +688,76 @@ enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_INVALID_OPERATION;
}
+
+static bool write_stall_read_lc_fw_aux(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+
+ struct mod_hdcp_atomic_op_aux write = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT],
+ hdcp2->lc_init + 1,
+ sizeof(hdcp2->lc_init) - 1,
+ };
+ struct mod_hdcp_atomic_op_aux stall = { 0, NULL, 0, };
+ struct mod_hdcp_atomic_op_aux read = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime + 1,
+ sizeof(hdcp2->lc_l_prime) - 1,
+ };
+
+ hdcp2->lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_aux(
+ hdcp->config.ddc.handle,
+ &write,
+ &stall,
+ &read,
+ 16 * 1000,
+ 0
+ );
+}
+
+static bool write_poll_read_lc_fw_i2c(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+ uint8_t expected_rxstatus[2] = { sizeof(hdcp2->lc_l_prime) };
+
+ hdcp->buf[0] = hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT];
+ memmove(&hdcp->buf[1], hdcp2->lc_init, sizeof(hdcp2->lc_init));
+
+ struct mod_hdcp_atomic_op_i2c write = {
+ HDCP_I2C_ADDR,
+ 0,
+ hdcp->buf,
+ sizeof(hdcp2->lc_init) + 1,
+ };
+ struct mod_hdcp_atomic_op_i2c poll = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS],
+ expected_rxstatus,
+ sizeof(expected_rxstatus),
+ };
+ struct mod_hdcp_atomic_op_i2c read = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime,
+ sizeof(hdcp2->lc_l_prime),
+ };
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_i2c(
+ hdcp->config.ddc.handle,
+ &write,
+ &poll,
+ &read,
+ 20 * 1000,
+ 6
+ );
+}
+
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp)
+{
+ const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp);
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index a4d344a4db9e..c42468bb70ac 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -133,9 +133,22 @@ enum mod_hdcp_display_disable_option {
MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION,
};
+struct mod_hdcp_atomic_op_i2c {
+ uint8_t address;
+ uint8_t offset;
+ uint8_t *data;
+ uint32_t size;
+};
+
+struct mod_hdcp_atomic_op_aux {
+ uint32_t address;
+ uint8_t *data;
+ uint32_t size;
+};
+
struct mod_hdcp_ddc {
void *handle;
- struct {
+ struct mod_hdcp_ddc_funcs {
bool (*read_i2c)(void *handle,
uint32_t address,
uint8_t offset,
@@ -153,6 +166,22 @@ struct mod_hdcp_ddc {
uint32_t address,
const uint8_t *data,
uint32_t size);
+ bool (*atomic_write_poll_read_i2c)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
+ bool (*atomic_write_poll_read_aux)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
} funcs;
};
@@ -185,7 +214,8 @@ struct mod_hdcp_link_adjustment_hdcp2 {
uint8_t force_type : 2;
uint8_t force_no_stored_km : 1;
uint8_t increase_h_prime_timeout: 1;
- uint8_t reserved : 3;
+ uint8_t force_sw_locality_check : 1;
+ uint8_t reserved : 2;
};
struct mod_hdcp_link_adjustment {
@@ -272,6 +302,10 @@ struct mod_hdcp_display_query {
struct mod_hdcp_config {
struct mod_hdcp_psp psp;
struct mod_hdcp_ddc ddc;
+ struct {
+ uint8_t lc_enable_sw_fallback : 1;
+ uint8_t reserved : 7;
+ } debug;
uint8_t index;
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 4c95b885d1d0..c8eccee9b023 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -366,7 +366,7 @@ enum DC_DEBUG_MASK {
DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000,
/**
- * @DC_HDCP_LC_ENABLE_SW_FALLBACK If set, upon HDCP Locality Check FW
+ * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW
* path failure, retry using legacy SW path.
*/
DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
index bd8085ec54ed..2d6a598a6c25 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -5242,6 +5242,8 @@
#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0x0000000c
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x00000003L
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x00000000
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE_MASK 0x00000300L
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT 0x00000008
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE_MASK 0x00000030L
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT 0x00000004
#define DENORM_CONTROL__DENORM_MODE_MASK 0x00000007L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
index c75aee25619e..6f44345277af 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
@@ -1779,6 +1779,8 @@
#define mmRLC_TTOP_D 0x3105
#define mmRLC_CLEAR_STATE_RESTORE_BASE 0x30C8
#define mmRLC_PG_AO_CU_MASK 0x310B
+#define mmSPI_STATIC_THREAD_MGMT_1 0x2438
+#define mmSPI_STATIC_THREAD_MGMT_2 0x2439
#define mmSPI_STATIC_THREAD_MGMT_3 0x243A
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
index edc8a793a95d..4dd386b98748 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
@@ -234,6 +234,26 @@
#define mmIH_RB_WPTR_ADDR_HI 0x0F84
#define mmIH_RB_WPTR_ADDR_LO 0x0F85
#define mmIH_STATUS 0x0F88
+
+#define mmDMA_GFX_RB_CNTL 0x3400
+#define mmDMA_GFX_RB_BASE 0x3401
+#define mmDMA_GFX_RB_RPTR 0x3402
+#define mmDMA_GFX_RB_WPTR 0x3403
+#define mmDMA_GFX_RB_RPTR_ADDR_HI 0x3407
+#define mmDMA_GFX_RB_RPTR_ADDR_LO 0x3408
+#define mmDMA_GFX_IB_CNTL 0x3409
+#define mmDMA_GFX_IB_RPTR 0x340a
+#define mmDMA_CNTL 0x340b
+#define mmDMA_STATUS_REG 0x340D
+#define mmDMA_TILING_CONFIG 0x342E
+#define mmDMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
+#define mmDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
+#define mmDMA_POWER_CNTL 0x342F
+#define mmDMA_CLK_CTRL 0x3430
+#define mmDMA_PG 0x3435
+#define mmDMA_PGFSM_CONFIG 0x3436
+#define mmDMA_PGFSM_WRITE 0x3437
+
#define mmSEM_MAILBOX 0x0F9B
#define mmSEM_MAILBOX_CLIENTCONFIG 0x0F9A
#define mmSEM_MAILBOX_CONTROL 0x0F9C
@@ -269,7 +289,4 @@
#define mmVCE_CONFIG 0x0F94
#define mmXDMA_MSTR_MEM_OVERFLOW_CNTL 0x03F8
-/* from the old sid.h */
-#define mmDMA_TILING_CONFIG 0x342E
-
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
index 1c540fe136cb..9f7fc2428b69 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
@@ -823,6 +823,43 @@
#define LX3__RESERVED__SHIFT 0x00000000
#define RINGOSC_MASK__MASK_MASK 0x0000ffffL
#define RINGOSC_MASK__MASK__SHIFT 0x00000000
+
+#define DMA_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define DMA_CNTL__TRAP_ENABLE__SHIFT 0x00000000
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE_MASK 0x00000002L
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE__SHIFT 0x00000001
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x00000002
+#define DMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define DMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x00000003
+#define DMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x0000001C
+#define DMA_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_RB_CNTL__RB_SIZE__SHIFT 0x00000001
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x00000009
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0x0000000C
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0x0000000D
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x00000010
+#define DMA_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK 0x80000000L
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE__SHIFT 0x0000001F
+
+#define DMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define DMA_STATUS_REG__IDLE__SHIFT 0x00000000
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x00000008
+#define DMA_PG__PG_CNTL_ENABLE_MASK 0x00000001L
+#define DMA_PG__PG_CNTL_ENABLE__SHIFT 0x00000000
+
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x00000000
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
@@ -1015,6 +1052,10 @@
#define SRBM_STATUS2__VCE_BUSY__SHIFT 0x00000007
#define SRBM_STATUS2__VCE_RQ_PENDING_MASK 0x00000008L
#define SRBM_STATUS2__VCE_RQ_PENDING__SHIFT 0x00000003
+#define SRBM_STATUS2__DMA_BUSY_MASK 0x00000020L
+#define SRBM_STATUS2__DMA_BUSY__SHIFT 0x00000005
+#define SRBM_STATUS2__DMA1_BUSY_MASK 0x00000040L
+#define SRBM_STATUS2__DMA1_BUSY__SHIFT 0x00000006
#define SRBM_STATUS2__XDMA_BUSY_MASK 0x00000100L
#define SRBM_STATUS2__XDMA_BUSY__SHIFT 0x00000008
#define SRBM_STATUS2__XSP_BUSY_MASK 0x00000010L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
index 6b10be61efc3..bdef1f743df7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
@@ -41,7 +41,49 @@
#define ixLCAC_MC5_CNTL 0x012B
#define ixLCAC_MC5_OVR_SEL 0x012C
#define ixLCAC_MC5_OVR_VAL 0x012D
+
+#define mmCG_SPLL_FUNC_CNTL 0x0180
+#define mmCG_SPLL_FUNC_CNTL_2 0x0181
+#define mmCG_SPLL_FUNC_CNTL_3 0x0182
+#define mmCG_SPLL_FUNC_CNTL_4 0x0183
+#define mmCG_SPLL_STATUS 0x0185
+#define mmSPLL_CNTL_MODE 0x0186
+#define mmCG_SPLL_SPREAD_SPECTRUM 0x0188
+#define mmCG_SPLL_SPREAD_SPECTRUM_2 0x0189
+#define mmCG_SPLL_AUTOSCALE_CNTL 0x018B
+#define mmMPLL_BYPASSCLK_SEL 0x0197
+#define mmCG_CLKPIN_CNTL 0x0198
+#define mmCG_CLKPIN_CNTL_2 0x0199
+#define mmTHM_CLK_CNTL 0x019B
+#define mmMISC_CLK_CNTL 0x019C
+#define mmCG_THERMAL_CTRL 0x01C0
+#define mmCG_THERMAL_STATUS 0x01C1
+#define mmCG_THERMAL_INT 0x01C2
+#define mmCG_MULT_THERMAL_CTRL 0x01C4
+#define mmCG_MULT_THERMAL_STATUS 0x01C5
+#define mmCG_FDO_CTRL0 0x01D5
+#define mmCG_FDO_CTRL1 0x01D6
+#define mmCG_FDO_CTRL2 0x01D7
+#define mmCG_TACH_CTRL 0x01DC
+#define mmCG_TACH_STATUS 0x01DD
+#define mmGENERAL_PWRMGT 0x1E0
+#define mmCG_TPC 0x1E1
+#define mmSCLK_PWRMGT_CNTL 0x1E2
+#define mmTARGET_AND_CURRENT_PROFILE_INDEX 0x01E6
+#define mmCG_FTV 0x01EF
+#define mmCG_FFCT_0 0x01F0
+#define mmCG_BSP 0x01FF
+#define mmCG_AT 0x0200
+#define mmCG_GIT 0x0201
+#define mmCG_SSP 0x0203
+#define mmCG_DISPLAY_GAP_CNTL 0x020A
+#define mmCG_ULV_CONTROL 0x021E
+#define mmCG_ULV_PARAMETER 0x021F
+#define mmSMC_SCRATCH0 0x0221
+#define mmCG_CAC_CTRL 0x022E
+
#define ixSMC_PC_C 0x80000370
+
#define ixTHM_TMON0_DEBUG 0x03F0
#define ixTHM_TMON0_INT_DATA 0x0380
#define ixTHM_TMON0_RDIL0_DATA 0x0300
@@ -110,6 +152,7 @@
#define ixTHM_TMON1_RDIR7_DATA 0x0337
#define ixTHM_TMON1_RDIR8_DATA 0x0338
#define ixTHM_TMON1_RDIR9_DATA 0x0339
+
#define mmGPIOPAD_A 0x05E7
#define mmGPIOPAD_EN 0x05E8
#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x05F1
@@ -127,6 +170,7 @@
#define mmGPIOPAD_STRENGTH 0x05E5
#define mmGPIOPAD_SW_INT_STAT 0x05E4
#define mmGPIOPAD_Y 0x05E9
+
#define mmSMC_IND_ACCESS_CNTL 0x008A
#define mmSMC_IND_DATA_0 0x0081
#define mmSMC_IND_DATA 0x0081
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
index 7d3925b7266e..67d3c7e13a48 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
@@ -23,10 +23,142 @@
#ifndef SMU_6_0_SH_MASK_H
#define SMU_6_0_SH_MASK_H
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03ffffffL
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
-#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003f0L
+#define CG_AT__CG_R_MASK 0x0000FFFFL
+#define CG_AT__CG_R__SHIFT 0x00000000
+#define CG_AT__CG_L_MASK 0xFFFF0000L
+#define CG_AT__CG_L__SHIFT 0x00000010
+
+#define CG_BSP__BSP_MASK 0x0000FFFFL
+#define CG_BSP__BSP__SHIFT 0x00000000
+#define CG_BSP__BSU_MASK 0x000F0000L
+#define CG_BSP__BSU__SHIFT 0x00000010
+
+#define CG_CAC_CTRL__CAC_WINDOW_MASK 0x00FFFFFFL
+#define CG_CAC_CTRL__CAC_WINDOW__SHIFT 0x00000000
+
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x00000002L
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x00000001
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x00000004L
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x00000002
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x00000008L
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x00000003
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x00000100L
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x00000008
+
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK 0x00000003L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT 0x00000000
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK 0x0000000CL
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT 0x00000002
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x0003FFF0L
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x00000004
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x00700000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x00000014
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK 0x03000000L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT 0x00000018
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK 0x0C000000L
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT 0x0000001A
+
+#define CG_FFCT_0__UTC_0_MASK 0x000003FFL
+#define CG_FFCT_0__UTC_0__SHIFT 0x00000000
+#define CG_FFCT_0__DTC_0_MASK 0x000FFC00L
+#define CG_FFCT_0__DTC_0__SHIFT 0x0000000A
+
+#define CG_GIT__CG_GICST_MASK 0x0000FFFFL
+#define CG_GIT__CG_GICST__SHIFT 0x00000000
+#define CG_GIT__CG_GIPOT_MASK 0xFFFF0000L
+#define CG_GIT__CG_GIPOT__SHIFT 0x00000010
+
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x00000001L
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK 0x00000002L
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP__SHIFT 0x00000001
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x00000008L
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x00000003
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003F0L
#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x00000004
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x007F00000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x00000014
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x0000001FF
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x00800000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x00000017
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x04000000
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x0000001A
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03FFFFFFL
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000L
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x0000001C
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x00000002L
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x00000001
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x00000001L
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x00000000
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK 0x0000FFF0L
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT 0x00000004
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK 0x00000200L
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT 0x00000000
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK 0x03FFFFFFL
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR__SHIFT 0x00000009
+
+#define CG_SSP__SST_MASK 0x0000FFFFL
+#define CG_SSP__SST__SHIFT 0x00000000
+#define CG_SSP__SSTU_MASK 0x000F0000L
+#define CG_SSP__SSTU__SHIFT 0x00000010
+
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x00000007L
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x00000000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x003FC000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0x0000000E
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x0001FE00L
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x00000009
+#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0x0000FF00L
+#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x00000008
+#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0x00FF0000L
+#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x00000010
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK 0x01000000L
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH__SHIFT 0x00000018
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK 0x02000000
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW__SHIFT 0x00000019
+
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0x0FF00000L
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x00000014
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x00000000
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003fe00L
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x00000009
+
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x00000000
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x00000000
+#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x00000000
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0x0000000B
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xFE000000L
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x00000019
+
+#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x00000007L
+#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x00000000
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x00000003
+#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xFFFFFFFFL
+#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x00000000
+
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x00000001L
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x00000000
+#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x00000002L
+#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x00000001
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x00000004L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x00000002
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x00000008L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x00000003
+#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x00000040L
+#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x00000006
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x00000400L
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0x0000000A
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x00800000L
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x00000017
+
#define GPIOPAD_A__GPIO_A_MASK 0x7fffffffL
#define GPIOPAD_A__GPIO_A__SHIFT 0x00000000
#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffffL
@@ -195,6 +327,7 @@
#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x00000000
#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffffL
#define GPIOPAD_Y__GPIO_Y__SHIFT 0x00000000
+
#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x00000001L
#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x00000000
#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x0001fffeL
@@ -243,6 +376,37 @@
#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x00000000
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffffL
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x00000000
+
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0x0000FF00L
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x00000008
+
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x00000001L
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x00000000
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1_MASK 0x00000002L
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1__SHIFT 0x00000001
+#define SCLK_PWRMGT_CNTL__FIR_RESET_MASK 0x00000010L
+#define SCLK_PWRMGT_CNTL__FIR_RESET__SHIFT 0x00000004
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK 0x00000020L
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL__SHIFT 0x00000005
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK 0x00000040L
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE__SHIFT 0x00000006
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN_MASK 0x00000080L
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN__SHIFT 0x00000007
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON_MASK 0x00000100L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON__SHIFT 0x00000008
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF_MASK 0x00000200L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF__SHIFT 0x00000009
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF_MASK 0x00000400L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF__SHIFT 0x0000000A
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1_MASK 0x00000800L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1__SHIFT 0x0000000B
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2_MASK 0x00001000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2__SHIFT 0x0000000C
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3_MASK 0x00002000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3__SHIFT 0x0000000D
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x00004000L
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0x0000000E
+
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x00000001L
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x00000000
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x00000100L
@@ -285,6 +449,7 @@
#define SMC_RESP_1__SMC_RESP__SHIFT 0x00000000
#define SMC_RESP_2__SMC_RESP_MASK 0xffffffffL
#define SMC_RESP_2__SMC_RESP__SHIFT 0x00000000
+
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0x000ff000L
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0x0000000c
#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x00000010L
@@ -293,6 +458,8 @@
#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x00000003
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x00000002L
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x00000001
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK 0x0C000000L
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL__SHIFT 0x0000001A
#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000L
#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x0000001c
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x00000001L
@@ -303,10 +470,25 @@
#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x00000002
#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000L
#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x0000001d
+
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0x0f000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x00000018
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x0000001c
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK 0x000000F0L
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT 0x00000004
+
+#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0x000000FFL
+#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x00000000
+#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0x0000FF00L
+#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x00000008
+
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK 0x000000FFL
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT 0x00000000
+#define MISC_CLK_CNTL__ZCLK_SEL_MASK 0x0000FF00L
+#define MISC_CLK_CNTL__ZCLK_SEL__SHIFT 0x00000008
+
#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x0000001fL
#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x00000000
#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
index 14574112c469..c4aaa86a95e2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
@@ -1147,6 +1147,22 @@
#define regUVD_DPG_LMA_CTL2_BASE_IDX 1
+// addressBlock: uvd_mmsch_dec
+// base address: 0x20d2c
+#define regMMSCH_VF_VMID 0x054b
+#define regMMSCH_VF_VMID_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_LO 0x054c
+#define regMMSCH_VF_CTX_ADDR_LO_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_HI 0x054d
+#define regMMSCH_VF_CTX_ADDR_HI_BASE_IDX 1
+#define regMMSCH_VF_CTX_SIZE 0x054e
+#define regMMSCH_VF_CTX_SIZE_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_HOST 0x0552
+#define regMMSCH_VF_MAILBOX_HOST_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_RESP 0x0553
+#define regMMSCH_VF_MAILBOX_RESP_BASE_IDX 1
+
+
// addressBlock: uvd_vcn_umsch_dec
// base address: 0x21500
#define regVCN_UMSCH_MES_CNTL 0x0740
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
index 5c119a6b87fb..bd7242e4e9c6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
@@ -5929,6 +5929,29 @@
#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L
+// addressBlock: uvd_mmsch_dec
+//MMSCH_VF_VMID
+#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0
+#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5
+#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL
+#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L
+//MMSCH_VF_CTX_ADDR_LO
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L
+//MMSCH_VF_CTX_ADDR_HI
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL
+//MMSCH_VF_CTX_SIZE
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_HOST
+#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_RESP
+#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+
+
// addressBlock: uvd_vcn_umsch_dec
//VCN_UMSCH_MES_CNTL
#define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index b78360a71bc9..52bac19fb404 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -4308,7 +4308,7 @@ typedef struct _ATOM_DPCD_INFO
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
-// (in offset to start of memory address) is KB aligned instead of byte aligend.
+// (in offset to start of memory address) is KB aligned instead of byte aligned.
// Note3:
/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged
constant across VGA or non VGA adapter,
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0160d65f3f5e..2d1135bdc4b9 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -183,6 +183,7 @@ enum atom_dgpu_vram_type {
ATOM_DGPU_VRAM_TYPE_HBM2E = 0x61,
ATOM_DGPU_VRAM_TYPE_GDDR6 = 0x70,
ATOM_DGPU_VRAM_TYPE_HBM3 = 0x80,
+ ATOM_DGPU_VRAM_TYPE_HBM3E = 0x81,
};
enum atom_dp_vs_preemph_def{
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 21dc956b5f35..0f7542d7074b 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -128,6 +128,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_CPU_CLK,
AMDGPU_PP_SENSOR_VDDNB,
AMDGPU_PP_SENSOR_VDDGFX,
+ AMDGPU_PP_SENSOR_VDDBOARD,
AMDGPU_PP_SENSOR_UVD_VCLK,
AMDGPU_PP_SENSOR_UVD_DCLK,
AMDGPU_PP_SENSOR_VCE_ECCLK,
diff --git a/drivers/gpu/drm/amd/include/v11_structs.h b/drivers/gpu/drm/amd/include/v11_structs.h
index f8008270f813..3728389fc3be 100644
--- a/drivers/gpu/drm/amd/include/v11_structs.h
+++ b/drivers/gpu/drm/amd/include/v11_structs.h
@@ -535,8 +535,8 @@ struct v11_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v11_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v11_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/include/v12_structs.h b/drivers/gpu/drm/amd/include/v12_structs.h
index 5eabab611b02..03a35f8a65b0 100644
--- a/drivers/gpu/drm/amd/include/v12_structs.h
+++ b/drivers/gpu/drm/amd/include/v12_structs.h
@@ -535,8 +535,8 @@ struct v12_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v12_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v12_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 3533d43ed1e7..2148c8db5a59 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -329,6 +329,34 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool support_link_reset = false;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_link_reset = smu_link_reset_is_support(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_link_reset;
+}
+
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_link_reset(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en)
@@ -780,6 +808,21 @@ int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
return ret;
}
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 922def51685b..edd9895b46c0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1606,7 +1606,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
long throttling_logging_interval;
- unsigned long flags;
int ret = 0;
ret = kstrtol(buf, 0, &throttling_logging_interval);
@@ -1617,18 +1616,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
return -EINVAL;
if (throttling_logging_interval > 0) {
- raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
/*
* Reset the ratelimit timer internals.
* This can effectively restart the timer.
*/
- adev->throttling_logging_rs.interval =
- (throttling_logging_interval - 1) * HZ;
- adev->throttling_logging_rs.begin = 0;
- adev->throttling_logging_rs.printed = 0;
- adev->throttling_logging_rs.missed = 0;
- raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
-
+ ratelimit_state_reset_interval(&adev->throttling_logging_rs,
+ (throttling_logging_interval - 1) * HZ);
atomic_set(&adev->throttling_logging_enabled, 1);
} else {
atomic_set(&adev->throttling_logging_enabled, 0);
@@ -2944,6 +2937,23 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
return sysfs_emit(buf, "%d\n", vddgfx);
}
+static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ u32 vddboard;
+ int r;
+
+ /* get the voltage */
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&vddboard);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%d\n", vddboard);
+}
+
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -2951,6 +2961,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
return sysfs_emit(buf, "vddgfx\n");
}
+static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "vddboard\n");
+}
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3294,6 +3310,8 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0)
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
@@ -3341,6 +3359,8 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_label.dev_attr.attr,
&sensor_dev_attr_power1_average.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
@@ -3492,6 +3512,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
+ /* only few boards support vddboard */
+ if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
+ amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&tmp) == -EOPNOTSUPP)
+ return 0;
+
/* no mclk on APUs other than gc 9,4,3*/
if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 4c0f7ad14816..2c3c97587dd5 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -416,11 +416,13 @@ int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev);
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev);
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev);
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
@@ -607,5 +609,6 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 1c25f3023e93..4c0e976004ba 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -30,16 +30,32 @@
#include "amdgpu_atombios.h"
#include "amdgpu_dpm_internal.h"
#include "amd_pcie.h"
-#include "sid.h"
+#include "atom.h"
+#include "gfx_v6_0.h"
#include "r600_dpm.h"
+#include "sid.h"
#include "si_dpm.h"
-#include "atom.h"
#include "../include/pptable.h"
#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <legacy_dpm.h>
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
+#include"gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
@@ -2193,7 +2209,7 @@ static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
if (xclk == 0)
return 0;
- cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+ cac_window = RREG32(mmCG_CAC_CTRL) & CG_CAC_CTRL__CAC_WINDOW_MASK;
cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
wintime = (cac_window_size * 100) / xclk;
@@ -2489,19 +2505,19 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if (adev->pm.dpm.sq_ramping_threshold == 0)
return -EINVAL;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (SQ_POWER_THROTTLE__MAX_POWER_MASK >> SQ_POWER_THROTTLE__MAX_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (SQ_POWER_THROTTLE__MIN_POWER_MASK >> SQ_POWER_THROTTLE__MIN_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK >> SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK >> SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK >> SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -2510,14 +2526,17 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
enable_sq_ramping) {
- sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
- sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
- sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
- sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
- sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER << SQ_POWER_THROTTLE__MAX_POWER__SHIFT;
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MIN_POWER << SQ_POWER_THROTTLE__MIN_POWER__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA << SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_STI_SIZE << SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_LTI_RATIO << SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT;
} else {
- sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
- sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ sq_power_throttle |= SQ_POWER_THROTTLE__MAX_POWER_MASK |
+ SQ_POWER_THROTTLE__MIN_POWER_MASK;
+ sq_power_throttle2 |= SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
}
smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
@@ -2761,9 +2780,9 @@ static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
if (!cac_tables)
return -ENOMEM;
- reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
- reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
- WREG32(CG_CAC_CTRL, reg);
+ reg = RREG32(mmCG_CAC_CTRL) & ~CG_CAC_CTRL__CAC_WINDOW_MASK;
+ reg |= (si_pi->powertune_data->cac_window << CG_CAC_CTRL__CAC_WINDOW__SHIFT);
+ WREG32(mmCG_CAC_CTRL, reg);
si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
si_pi->dyn_powertune_data.dc_pwr_value =
@@ -2962,10 +2981,10 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev)
ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
if (ret)
break;
- p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
- fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
- clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
- clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+ p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK) >> CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
+ fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK) >> CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK) >> CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK) >> CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
fb_div &= ~0x00001FFF;
fb_div >>= 1;
@@ -3669,10 +3688,10 @@ static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
- tmp = RREG32(MC_ARB_RAMCFG);
- row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
- column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
- bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ row = ((tmp & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT) + 10;
+ column = ((tmp & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT) + 8;
+ bank = ((tmp & MC_ARB_RAMCFG__NOOFBANK_MASK) >> MC_ARB_RAMCFG__NOOFBANK__SHIFT) + 2;
density = (1 << (row + column - 20 + bank)) * width;
@@ -3756,11 +3775,11 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
}
if (want_thermal_protection) {
- WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, dpm_event_src << CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT, ~CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK);
if (pi->thermal_protection)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
} else {
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
}
@@ -3785,20 +3804,20 @@ static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
static void si_start_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_stop_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
}
@@ -3838,7 +3857,7 @@ static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power
static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg, u32 parameter)
{
- WREG32(SMC_SCRATCH0, parameter);
+ WREG32(mmSMC_SCRATCH0, parameter);
return amdgpu_si_send_msg_to_smc(adev, msg);
}
@@ -4023,12 +4042,12 @@ static void si_read_clock_registers(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
- si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
- si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
- si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
- si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
- si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
- si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+ si_pi->clock_registers.cg_spll_func_cntl = RREG32(mmCG_SPLL_FUNC_CNTL);
+ si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(mmCG_SPLL_FUNC_CNTL_3);
+ si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(mmCG_SPLL_FUNC_CNTL_4);
+ si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(mmCG_SPLL_SPREAD_SPECTRUM);
+ si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(mmCG_SPLL_SPREAD_SPECTRUM_2);
si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
@@ -4044,14 +4063,14 @@ static void si_enable_thermal_protection(struct amdgpu_device *adev,
bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
else
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
static void si_enable_acpi_power_management(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__STATIC_PM_EN_MASK, ~GENERAL_PWRMGT__STATIC_PM_EN_MASK);
}
#if 0
@@ -4132,9 +4151,9 @@ static void si_program_ds_registers(struct amdgpu_device *adev)
tmp = 0x1;
if (eg_pi->sclk_deep_sleep) {
- WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
- WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
- ~AUTOSCALE_ON_SS_CLEAR);
+ WREG32_P(mmMISC_CLK_CNTL, (tmp << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT), ~MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK);
+ WREG32_P(mmCG_SPLL_AUTOSCALE_CNTL, CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK,
+ ~CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK);
}
}
@@ -4143,18 +4162,18 @@ static void si_program_display_gap(struct amdgpu_device *adev)
u32 tmp, pipe;
int i;
- tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
if (adev->pm.dpm.new_active_crtc_count > 0)
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
else
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
if (adev->pm.dpm.new_active_crtc_count > 1)
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
else
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
@@ -4189,10 +4208,10 @@ static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
if (enable) {
if (pi->sclk_ss)
- WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
} else {
- WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
- WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmCG_SPLL_SPREAD_SPECTRUM, 0, ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
}
}
@@ -4214,15 +4233,15 @@ static void si_setup_bsp(struct amdgpu_device *adev)
&pi->pbsu);
- pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
- pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+ pi->dsp = (pi->bsp << CG_BSP__BSP__SHIFT) | (pi->bsu << CG_BSP__BSU__SHIFT);
+ pi->psp = (pi->pbsp << CG_BSP__BSP__SHIFT) | (pi->pbsu << CG_BSP__BSU__SHIFT);
- WREG32(CG_BSP, pi->dsp);
+ WREG32(mmCG_BSP, pi->dsp);
}
static void si_program_git(struct amdgpu_device *adev)
{
- WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+ WREG32_P(mmCG_GIT, R600_GICST_DFLT << CG_GIT__CG_GICST__SHIFT, ~CG_GIT__CG_GICST_MASK);
}
static void si_program_tp(struct amdgpu_device *adev)
@@ -4231,54 +4250,54 @@ static void si_program_tp(struct amdgpu_device *adev)
enum r600_td td = R600_TD_DFLT;
for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
- WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+ WREG32(mmCG_FFCT_0 + i, (r600_utc[i] << CG_FFCT_0__UTC_0__SHIFT | r600_dtc[i] << CG_FFCT_0__DTC_0__SHIFT));
if (td == R600_TD_AUTO)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
if (td == R600_TD_UP)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
if (td == R600_TD_DOWN)
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
}
static void si_program_tpp(struct amdgpu_device *adev)
{
- WREG32(CG_TPC, R600_TPC_DFLT);
+ WREG32(mmCG_TPC, R600_TPC_DFLT);
}
static void si_program_sstp(struct amdgpu_device *adev)
{
- WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+ WREG32(mmCG_SSP, (R600_SSTU_DFLT << CG_SSP__SSTU__SHIFT| R600_SST_DFLT << CG_SSP__SST__SHIFT));
}
static void si_enable_display_gap(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+ u32 tmp = RREG32(mmCG_DISPLAY_GAP_CNTL);
- tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
- DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT);
- tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
- DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
}
static void si_program_vc(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
- WREG32(CG_FTV, pi->vrc);
+ WREG32(mmCG_FTV, pi->vrc);
}
static void si_clear_vc(struct amdgpu_device *adev)
{
- WREG32(CG_FTV, 0);
+ WREG32(mmCG_FTV, 0);
}
static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
@@ -4735,7 +4754,7 @@ static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
u32 dram_rows;
u32 dram_refresh_rate;
u32 mc_arb_rfsh_rate;
- u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+ u32 tmp = (RREG32(mmMC_ARB_RAMCFG) & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT;
if (tmp >= 4)
dram_rows = 16384;
@@ -4909,7 +4928,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
- reg = CG_R(0xffff) | CG_L(0);
+ reg = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
table->initialState.level.aT = cpu_to_be32(reg);
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
@@ -4935,10 +4954,13 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
table->initialState.level.dpm2.BelowSafeInc = 0;
table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK |
+ SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5057,8 +5079,8 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
@@ -5102,10 +5124,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
table->ACPIState.level.dpm2.BelowSafeInc = 0;
table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5250,8 +5272,8 @@ static int si_init_smc_table(struct amdgpu_device *adev)
if (ret)
return ret;
- WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
- WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+ WREG32(mmCG_ULV_CONTROL, ulv->cg_ulv_control);
+ WREG32(mmCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
lane_width = amdgpu_get_pcie_lanes(adev);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
@@ -5294,16 +5316,16 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
do_div(tmp, reference_clock);
fbdiv = (u32) tmp;
- spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
- spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
- spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+ spll_func_cntl &= ~(CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK | CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK);
+ spll_func_cntl |= dividers.ref_div << CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT;
+ spll_func_cntl |= dividers.post_div << CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 2 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
if (pi->sclk_ss) {
struct amdgpu_atom_ss ss;
@@ -5314,12 +5336,12 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
- cg_spll_spread_spectrum &= ~CLK_S_MASK;
- cg_spll_spread_spectrum |= CLK_S(clk_s);
- cg_spll_spread_spectrum |= SSEN;
+ cg_spll_spread_spectrum &= ~CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK;
+ cg_spll_spread_spectrum |= clk_s << CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ cg_spll_spread_spectrum |= CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
- cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
- cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
}
}
@@ -5485,7 +5507,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
if (pi->mclk_stutter_mode_threshold &&
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
- (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+ (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
(adev->pm.dpm.new_active_crtc_count <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
}
@@ -5579,7 +5601,7 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
return -EINVAL;
if (state->performance_level_count < 2) {
- a_t = CG_R(0xffff) | CG_L(0);
+ a_t = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
smc_state->levels[0].aT = cpu_to_be32(a_t);
return 0;
}
@@ -5600,13 +5622,13 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
}
- a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
- a_t |= CG_R(t_l * pi->bsp / 20000);
+ a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_AT__CG_R_MASK;
+ a_t |= (t_l * pi->bsp / 20000) << CG_AT__CG_R__SHIFT;
smc_state->levels[i].aT = cpu_to_be32(a_t);
high_bsp = (i == state->performance_level_count - 2) ?
pi->pbsp : pi->bsp;
- a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+ a_t = (0xffff) << CG_AT__CG_R__SHIFT | (t_h * high_bsp / 20000) << CG_AT__CG_L__SHIFT;
smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
}
@@ -6180,9 +6202,9 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev,
static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
}
static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
@@ -6204,8 +6226,8 @@ static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
{
u32 speed_cntl;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
- speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
return (u16)speed_cntl;
}
@@ -6412,21 +6434,21 @@ static void si_dpm_setup_asic(struct amdgpu_device *adev)
static int si_thermal_enable_alert(struct amdgpu_device *adev,
bool enable)
{
- u32 thermal_int = RREG32(CG_THERMAL_INT);
+ u32 thermal_int = RREG32(mmCG_THERMAL_INT);
if (enable) {
PPSMC_Result result;
- thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int &= ~(CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK);
+ WREG32(mmCG_THERMAL_INT, thermal_int);
result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
if (result != PPSMC_Result_OK) {
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
return -EINVAL;
}
} else {
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32(mmCG_THERMAL_INT, thermal_int);
}
return 0;
@@ -6447,9 +6469,9 @@ static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
return -EINVAL;
}
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
- WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTH_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (low_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTL_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, (high_temp / 1000) << CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT, ~CG_THERMAL_CTRL__DIG_THERM_DPM_MASK);
adev->pm.dpm.thermal.min_temp = low_temp;
adev->pm.dpm.thermal.max_temp = high_temp;
@@ -6463,20 +6485,20 @@ static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
u32 tmp;
if (si_pi->fan_ctrl_is_in_default_mode) {
- tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
si_pi->fan_ctrl_default_mode = tmp;
- tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) >> CG_FDO_CTRL2__TMIN__SHIFT;
si_pi->t_min = tmp;
si_pi->fan_ctrl_is_in_default_mode = false;
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(0);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
@@ -6495,7 +6517,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
return 0;
}
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0) {
adev->pm.dpm.fan.ucode_fan_control = false;
@@ -6531,7 +6553,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
reference_clock) / 1600);
fan_table.fdo_max = cpu_to_be16((u16)duty100);
- tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ tmp = (RREG32(mmCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
fan_table.temp_src = (uint8_t)tmp;
ret = amdgpu_si_copy_bytes_to_smc(adev,
@@ -6590,8 +6612,8 @@ static int si_dpm_get_fan_speed_pwm(void *handle,
if (adev->pm.no_fan)
return -ENOENT;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
- duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+ duty = (RREG32(mmCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6621,7 +6643,7 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
if (speed > 255)
return -EINVAL;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6630,9 +6652,9 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
do_div(tmp64, 255);
duty = (u32)tmp64;
- tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
- tmp |= FDO_STATIC_DUTY(duty);
- WREG32(CG_FDO_CTRL0, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
+ tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
+ WREG32(mmCG_FDO_CTRL0, tmp);
return 0;
}
@@ -6672,8 +6694,8 @@ static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
if (si_pi->fan_is_controlled_by_smc)
return 0;
- tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
- *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);
+ tmp = RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ *fan_mode = (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
return 0;
}
@@ -6691,7 +6713,7 @@ static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
if (adev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
- tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ tach_period = (RREG32(mmCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
if (tach_period == 0)
return -ENOENT;
@@ -6720,9 +6742,9 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
si_fan_ctrl_stop_smc_fan_control(adev);
tach_period = 60 * xclk * 10000 / (8 * speed);
- tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
- tmp |= TARGET_PERIOD(tach_period);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
+ tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
@@ -6736,13 +6758,13 @@ static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
u32 tmp;
if (!si_pi->fan_ctrl_is_in_default_mode) {
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= si_pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(si_pi->t_min);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= si_pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
si_pi->fan_ctrl_is_in_default_mode = true;
}
}
@@ -6760,14 +6782,14 @@ static void si_thermal_initialize(struct amdgpu_device *adev)
u32 tmp;
if (adev->pm.fan_pulses_per_revolution) {
- tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
- tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
+ tmp |= (adev->pm.fan_pulses_per_revolution -1) << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
- tmp |= TACH_PWM_RESP_RATE(0x28);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
+ tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
@@ -7530,8 +7552,8 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle,
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
if (current_index >= ps->performance_level_count) {
seq_printf(m, "invalid dpm profile %d\n", current_index);
@@ -7554,14 +7576,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7571,14 +7593,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7883,8 +7905,8 @@ static int si_dpm_get_temp(void *handle)
int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
- CTF_TEMP_SHIFT;
+ temp = (RREG32(mmCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
if (temp & 0x200)
actual_temp = 255;
@@ -8014,8 +8036,8 @@ static int si_dpm_read_sensor(void *handle, int idx,
struct si_ps *ps = si_get_ps(rps);
uint32_t sclk, mclk;
u32 pl_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 8f994ffa9cd1..4e65ab9e931c 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
@@ -30,6 +30,12 @@
#include "amdgpu_ucode.h"
#include "sislands_smc.h"
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
static int si_set_smc_sram_address(struct amdgpu_device *adev,
u32 smc_address, u32 limit)
{
@@ -38,8 +44,8 @@ static int si_set_smc_sram_address(struct amdgpu_device *adev,
if ((smc_address + 3) > limit)
return -EINVAL;
- WREG32(SMC_IND_INDEX_0, smc_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, smc_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
return 0;
}
@@ -68,7 +74,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
@@ -83,7 +89,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- original_data = RREG32(SMC_IND_DATA_0);
+ original_data = RREG32(mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
@@ -99,7 +105,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
}
done:
@@ -121,10 +127,10 @@ void amdgpu_si_reset_smc(struct amdgpu_device *adev)
{
u32 tmp;
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
RST_REG;
@@ -170,16 +176,16 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_Failed;
- WREG32(SMC_MESSAGE_0, msg);
+ WREG32(mmSMC_MESSAGE_0, msg);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SMC_RESP_0);
+ tmp = RREG32(mmSMC_RESP_0);
if (tmp != 0)
break;
udelay(1);
}
- return (PPSMC_Result)RREG32(SMC_RESP_0);
+ return (PPSMC_Result)RREG32(mmSMC_RESP_0);
}
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
@@ -225,18 +231,18 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
return -EINVAL;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, ucode_start_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
while (ucode_size >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
ucode_size -= 4;
}
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return 0;
@@ -251,7 +257,7 @@ int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- *value = RREG32(SMC_IND_DATA_0);
+ *value = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
@@ -266,7 +272,7 @@ int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- WREG32(SMC_IND_DATA_0, value);
+ WREG32(mmSMC_IND_DATA_0, value);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 4bd92fd782be..8d40ed0f0e83 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -143,6 +143,10 @@ int atomctrl_initialize_mc_reg_table(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
@@ -180,6 +184,10 @@ int atomctrl_initialize_mc_reg_table_v2_2(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index 5a010cd38303..baf51cd82a35 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -46,42 +46,6 @@ static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
}
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
-{
- uint32_t data;
- uint32_t addr;
- uint8_t *dest_byte;
- uint8_t i, data_byte[4] = {0};
- uint32_t *pdata = (uint32_t *)&data_byte;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
- PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
-
- addr = smc_start_address;
-
- while (byte_count >= 4) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
-
- *dest = PP_SMC_TO_HOST_UL(data);
-
- dest += 1;
- byte_count -= 4;
- addr += 4;
- }
-
- if (byte_count) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
- *pdata = PP_SMC_TO_HOST_UL(data);
- /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
- dest_byte = (uint8_t *)dest;
- for (i = 0; i < byte_count; i++)
- dest_byte[i] = data_byte[i];
- }
-
- return 0;
-}
-
-
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
index e7303dc8c260..63e428ceaee4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
@@ -53,8 +53,6 @@ struct smu7_smumgr {
};
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
- uint32_t *dest, uint32_t byte_count, uint32_t limit);
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 46cce1d2aaf3..f24a1d8c77db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -3432,15 +3432,15 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
return ret;
}
-bool smu_mode2_reset_is_support(struct smu_context *smu)
+bool smu_link_reset_is_support(struct smu_context *smu)
{
bool ret = false;
if (!smu->pm_enabled)
return false;
- if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
- ret = smu->ppt_funcs->mode2_reset_is_support(smu);
+ if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
+ ret = smu->ppt_funcs->link_reset_is_support(smu);
return ret;
}
@@ -3475,6 +3475,19 @@ static int smu_mode2_reset(void *handle)
return ret;
}
+int smu_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu->ppt_funcs->link_reset)
+ ret = smu->ppt_funcs->link_reset(smu);
+
+ return ret;
+}
+
static int smu_enable_gfx_features(void *handle)
{
struct smu_context *smu = handle;
@@ -3975,3 +3988,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
+ smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index dd6d0e7aa242..d47e32ae4671 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -438,9 +438,11 @@ struct mclock_latency_table {
};
enum smu_reset_mode {
- SMU_RESET_MODE_0,
- SMU_RESET_MODE_1,
- SMU_RESET_MODE_2,
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+ SMU_RESET_MODE_3,
+ SMU_RESET_MODE_4,
};
enum smu_baco_state {
@@ -1229,10 +1231,11 @@ struct pptable_funcs {
* @mode1_reset_is_support: Check if GPU supports mode1 reset.
*/
bool (*mode1_reset_is_support)(struct smu_context *smu);
+
/**
- * @mode2_reset_is_support: Check if GPU supports mode2 reset.
+ * @link_reset_is_support: Check if GPU supports link reset.
*/
- bool (*mode2_reset_is_support)(struct smu_context *smu);
+ bool (*link_reset_is_support)(struct smu_context *smu);
/**
* @mode1_reset: Perform mode1 reset.
@@ -1252,6 +1255,13 @@ struct pptable_funcs {
int (*enable_gfx_features)(struct smu_context *smu);
/**
+ * @link_reset: Perform link reset.
+ *
+ * The gfx device driver reset
+ */
+ int (*link_reset)(struct smu_context *smu);
+
+ /**
* @get_dpm_ultimate_freq: Get the hard frequency range of a clock
* domain in MHz.
*/
@@ -1383,6 +1393,11 @@ struct pptable_funcs {
bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
+ * @reset_vcn: message SMU to soft reset vcn instance.
+ */
+ int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1601,8 +1616,9 @@ int smu_get_power_limit(void *handle,
enum pp_power_type pp_power_type);
bool smu_mode1_reset_is_support(struct smu_context *smu);
-bool smu_mode2_reset_is_support(struct smu_context *smu);
+bool smu_link_reset_is_support(struct smu_context *smu);
int smu_mode1_reset(struct smu_context *smu);
+int smu_link_reset(struct smu_context *smu);
extern const struct amd_ip_funcs smu_ip_funcs;
@@ -1643,6 +1659,7 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
bool smu_reset_sdma_is_supported(struct smu_context *smu);
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index d26f35119a12..3d9e5e967c94 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -459,4 +459,11 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
+#pragma pack(push, 4)
+typedef struct {
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 288b2576432b..41f268313613 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -94,7 +94,9 @@
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_Message_Count 0x4E
+#define PPSMC_MSG_ResetVCN 0x4E
+#define PPSMC_MSG_GetStaticMetricsTable 0x59
+#define PPSMC_Message_Count 0x5A
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index c9dee09395e3..eefdaa0b5df6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -277,6 +277,7 @@
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA), \
+ __SMU_DUMMY_MAP(ResetVCN), \
__SMU_DUMMY_MAP(GetStaticMetricsTable),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index ed8304d82831..56ae555bb52a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -281,11 +281,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_11_0_dpm_table *single_dpm_table);
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value);
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index cd03caffe317..4263798d716b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -112,6 +112,7 @@ struct smu_13_0_dpm_context {
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
uint64_t caps;
+ uint32_t board_volt;
};
enum smu_13_0_power_state {
@@ -162,8 +163,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu);
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en);
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count);
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu);
int smu_v13_0_notify_display_change(struct smu_context *smu);
@@ -183,13 +182,6 @@ int smu_v13_0_disable_thermal_alert(struct smu_context *smu);
int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value);
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
-
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req);
-
uint32_t
smu_v13_0_get_fan_control_mode(struct smu_context *smu);
@@ -226,11 +218,6 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max, bool automatic);
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max);
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
@@ -310,14 +297,6 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
uint32_t *value);
void smu_v13_0_interrupt_work(struct smu_context *smu);
-bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
-int smu_v13_0_12_get_max_metrics_size(void);
-int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
-int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
- MetricsMember_t member,
- uint32_t *value);
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
-extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
-extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+void smu_v13_0_reset_custom_level(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 453952cdc353..9ad46f545d15 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1347,7 +1347,7 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (max_power_limit)
*max_power_limit = power_limit;
- /**
+ /*
* No lower bound is imposed on the limit. Any unreasonable limit set
* will result in frequent throttling.
*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 19a25fdc2f5b..115e3fa456bc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -3089,11 +3089,6 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
return 0;
}
-static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int sienna_cichlid_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -3229,7 +3224,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
.get_unique_id = sienna_cichlid_get_unique_id,
- .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported,
.mode2_reset = sienna_cichlid_mode2_reset,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 25fabf336a64..78e4186d06cc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -2059,45 +2059,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
return 0;
}
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value)
-{
- uint32_t level_count = 0;
- int ret = 0;
-
- if (!min_value && !max_value)
- return -EINVAL;
-
- if (min_value) {
- /* by default, level 0 clock value as min value */
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0,
- min_value);
- if (ret)
- return ret;
- }
-
- if (max_value) {
- ret = smu_v11_0_get_dpm_level_count(smu,
- clk_type,
- &level_count);
- if (ret)
- return ret;
-
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- level_count - 1,
- max_value);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 83163d7c7f00..6de653d2ed62 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1270,6 +1270,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int r;
/* Disable determinism if switching to another mode */
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
@@ -1282,7 +1283,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
return 0;
-
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ r = smu_v13_0_set_performance_level(smu, level);
+ if (!r)
+ smu_v13_0_reset_custom_level(smu);
+ return r;
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1423,7 +1428,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
+ ret = aldebaran_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
+ if (ret)
+ return ret;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1976,11 +1985,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
return true;
}
-static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int aldebaran_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
@@ -2086,7 +2090,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = aldebaran_get_gpu_metrics,
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
- .mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
.mode1_reset = aldebaran_mode1_reset,
.set_mp1_state = aldebaran_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index ba5a9012dbd5..a7167668d189 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -709,18 +709,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
-{
- int ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
- if (ret)
- dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
-
- return ret;
-}
-
int smu_v13_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
@@ -761,18 +749,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
-{
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
-
- return ret;
-}
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -1073,56 +1049,6 @@ int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req)
-{
- enum amd_pp_clock_type clk_type = clock_req->clock_type;
- int ret = 0;
- enum smu_clk_type clk_select = 0;
- uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
- smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- switch (clk_type) {
- case amd_pp_dcef_clock:
- clk_select = SMU_DCEFCLK;
- break;
- case amd_pp_disp_clock:
- clk_select = SMU_DISPCLK;
- break;
- case amd_pp_pixel_clock:
- clk_select = SMU_PIXCLK;
- break;
- case amd_pp_phy_clock:
- clk_select = SMU_PHYCLK;
- break;
- case amd_pp_mem_clock:
- clk_select = SMU_UCLK;
- break;
- default:
- dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
- ret = -EINVAL;
- break;
- }
-
- if (ret)
- goto failed;
-
- if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
- return 0;
-
- ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
-
- if (clk_select == SMU_UCLK)
- smu->hard_min_uclk_req_from_dal = clk_freq;
- }
-
-failed:
- return ret;
-}
-
uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1647,45 +1573,6 @@ out:
return ret;
}
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
-{
- int ret = 0, clk_id = 0;
- uint32_t param;
-
- if (min <= 0 && max <= 0)
- return -EINVAL;
-
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
- return 0;
-
- clk_id = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_CLK,
- clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
@@ -2595,3 +2482,13 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
return ret;
}
+
+void smu_v13_0_reset_custom_level(struct smu_context *smu)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->uclk_pstate.custom.min = 0;
+ pstate_table->uclk_pstate.custom.max = 0;
+ pstate_table->gfxclk_pstate.custom.min = 0;
+ pstate_table->gfxclk_pstate.custom.max = 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 238bd71baa6d..533d58e57d05 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -187,26 +187,6 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
-static int smu_v13_0_12_get_static_metrics_table(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
- uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
- struct smu_table *table = &smu_table->driver_table;
- int ret;
-
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
- if (ret) {
- dev_info(smu->adev->dev,
- "Failed to export static metrics table!\n");
- return ret;
- }
-
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
- memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
-
- return 0;
-}
-
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -217,7 +197,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
int ret, i;
if (!pptable->Init) {
- ret = smu_v13_0_12_get_static_metrics_table(smu);
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
if (ret)
return ret;
@@ -345,8 +325,8 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
u8 num_jpeg_rings_gpu_metrics;
@@ -357,7 +337,7 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t));
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(metrics->MaxSocketTemperature);
@@ -474,6 +454,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
SMUQ10_ROUND(metrics->GfxBusy[inst]);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index c478b3be37af..7d4ff09be7e8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -101,24 +101,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
-#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
-
-enum smu_v13_0_6_caps {
- SMU_CAP(DPM),
- SMU_CAP(DPM_POLICY),
- SMU_CAP(OTHER_END_METRICS),
- SMU_CAP(SET_UCLK_MAX),
- SMU_CAP(PCIE_METRICS),
- SMU_CAP(MCA_DEBUG_MODE),
- SMU_CAP(PER_INST_METRICS),
- SMU_CAP(CTF_LIMIT),
- SMU_CAP(RMA_MSG),
- SMU_CAP(ACA_SYND),
- SMU_CAP(SDMA_RESET),
- SMU_CAP(STATIC_METRICS),
- SMU_CAP(ALL),
-};
-
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -194,6 +176,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
};
// clang-format on
@@ -299,8 +283,8 @@ static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
dpm_context->caps &= ~BIT_ULL(cap);
}
-static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
- enum smu_v13_0_6_caps cap)
+bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -353,6 +337,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver >= 0x00561E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+
+ if (fw_ver >= 0x00562500)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -402,6 +389,13 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
if (fw_ver < 0x00555600)
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+ if ((pgm == 7 && fw_ver >= 0x7550E00) ||
+ (pgm == 0 && fw_ver >= 0x00557E00))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if (fw_ver >= 0x00557F01) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ }
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
@@ -525,7 +519,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -747,9 +741,43 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
return pm_metrics->common_header.structure_size;
}
+static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+}
+
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ return 0;
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
@@ -759,7 +787,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
int ret, i, retry = 100;
uint32_t table_version;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_setup_driver_pptable(smu);
/* Store one-time values in driver PPTable */
@@ -813,6 +842,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
pptable->Init = true;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+ smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
+ }
}
return 0;
@@ -1142,7 +1177,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
if (ret)
return ret;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
/* For clocks with multiple instances, only report the first one */
@@ -1616,6 +1652,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, void *data,
uint32_t *size)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
int ret = 0;
if (amdgpu_ras_intr_triggered())
@@ -1660,6 +1697,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VDDBOARD:
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ *(uint32_t *)data = dpm_context->board_volt;
+ *size = 4;
+ break;
+ } else {
+ ret = -EOPNOTSUPP;
+ break;
+ }
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
@@ -1927,7 +1973,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
return ret;
pstate_table->uclk_pstate.curr.max = uclk_table->max;
}
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -2140,7 +2186,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
return ret;
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -2486,8 +2532,8 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
@@ -2507,13 +2553,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return ret;
}
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_gpu_metrics(smu, table);
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
@@ -2666,6 +2713,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
version)[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
@@ -2844,14 +2905,29 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_4, NULL);
+ return ret;
+}
+
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
return true;
}
-static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
+static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
{
- return true;
+ struct amdgpu_device *adev = smu->adev;
+ int var = (adev->pdev->device & 0xF);
+
+ if (var == 0x1)
+ return true;
+
+ return false;
}
static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
@@ -2924,6 +3000,19 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetVCN event with mask 0x%x\n",
+ inst_mask);
+ return ret;
+}
+
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3586,9 +3675,10 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pm_metrics = smu_v13_0_6_get_pm_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
- .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
+ .link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
+ .link_reset = smu_v13_0_6_link_reset,
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
@@ -3596,6 +3686,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
.reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
+ .dpm_reset_vcn = smu_v13_0_6_reset_vcn,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 83745909e564..d151bcd0cca7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -26,6 +26,7 @@
#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2
#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
typedef enum {
/*0*/ METRICS_VERSION_V0 = 0,
@@ -51,6 +52,34 @@ struct PPTable_t {
bool Init;
};
+enum smu_v13_0_6_caps {
+ SMU_CAP(DPM),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(PER_INST_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(SDMA_RESET),
+ SMU_CAP(STATIC_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(BOARD_VOLTAGE),
+ SMU_CAP(ALL),
+};
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+int smu_v13_0_12_get_max_metrics_size(void);
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member, uint32_t *value);
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
+extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
+extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
#endif
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index 139ab00dee8f..2d3ad7610c2e 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -37,6 +37,7 @@
*/
/* define for signature structure */
+#define AST_HWC_SIGNATURE_SIZE SZ_32
#define AST_HWC_SIGNATURE_CHECKSUM 0x00
#define AST_HWC_SIGNATURE_SizeX 0x04
#define AST_HWC_SIGNATURE_SizeY 0x08
@@ -45,6 +46,21 @@
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+static unsigned long ast_cursor_vram_size(void)
+{
+ return AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE;
+}
+
+long ast_cursor_vram_offset(struct ast_device *ast)
+{
+ unsigned long size = ast_cursor_vram_size();
+
+ if (size > ast->vram_size)
+ return -EINVAL;
+
+ return ALIGN_DOWN(ast->vram_size - size, SZ_8);
+}
+
static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height)
{
u32 csum = 0;
@@ -75,7 +91,7 @@ static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, un
static void ast_set_cursor_image(struct ast_device *ast, const u8 *src,
unsigned int width, unsigned int height)
{
- u8 __iomem *dst = ast->cursor_plane.base.vaddr;
+ u8 __iomem *dst = ast_plane_vaddr(&ast->cursor_plane.base);
u32 csum;
csum = ast_cursor_calculate_checksum(src, width, height);
@@ -177,7 +193,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct ast_device *ast = to_ast_device(plane->dev);
struct drm_rect damage;
u64 dst_off = ast_plane->offset;
- u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
+ u8 __iomem *dst = ast_plane_vaddr(ast_plane); /* TODO: Use mapping abstraction properly */
u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
unsigned int offset_x, offset_y;
u16 x, y;
@@ -274,25 +290,16 @@ int ast_cursor_plane_init(struct ast_device *ast)
struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
struct ast_plane *ast_plane = &ast_cursor_plane->base;
struct drm_plane *cursor_plane = &ast_plane->base;
- size_t size;
- void __iomem *vaddr;
- u64 offset;
+ unsigned long size;
+ long offset;
int ret;
- /*
- * Allocate backing storage for cursors. The BOs are permanently
- * pinned to the top end of the VRAM.
- */
-
- size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
+ size = ast_cursor_vram_size();
+ offset = ast_cursor_vram_offset(ast);
+ if (offset < 0)
+ return offset;
- if (ast->vram_fb_available < size)
- return -ENOMEM;
-
- vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_fb_available - size;
-
- ret = ast_plane_init(dev, ast_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_plane, offset, size,
0x01, &ast_cursor_plane_funcs,
ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR);
@@ -303,7 +310,5 @@ int ast_cursor_plane_init(struct ast_device *ast)
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(cursor_plane);
- ast->vram_fb_available -= size;
-
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d2c2605d2728..2ee402096cd9 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -112,12 +112,9 @@ enum ast_config_mode {
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
-
#define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2)
#define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH)
-#define AST_HWC_SIGNATURE_SIZE 32
-
/*
* Planes
*/
@@ -125,7 +122,6 @@ enum ast_config_mode {
struct ast_plane {
struct drm_plane base;
- void __iomem *vaddr;
u64 offset;
unsigned long size;
};
@@ -183,7 +179,6 @@ struct ast_device {
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
- unsigned long vram_fb_available;
struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
@@ -340,14 +335,6 @@ static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 i
__ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
}
-#define AST_VIDMEM_SIZE_8M 0x00800000
-#define AST_VIDMEM_SIZE_16M 0x01000000
-#define AST_VIDMEM_SIZE_32M 0x02000000
-#define AST_VIDMEM_SIZE_64M 0x04000000
-#define AST_VIDMEM_SIZE_128M 0x08000000
-
-#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-
struct ast_vbios_stdtable {
u8 misc;
u8 seq[4];
@@ -440,6 +427,7 @@ int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
/* ast_cursor.c */
+long ast_cursor_vram_offset(struct ast_device *ast);
int ast_cursor_plane_init(struct ast_device *ast);
/* ast dp501 */
@@ -454,11 +442,12 @@ int ast_astdp_output_init(struct ast_device *ast);
/* ast_mode.c */
int ast_mode_config_init(struct ast_device *ast);
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type);
+void __iomem *ast_plane_vaddr(struct ast_plane *ast);
#endif
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 6dfe6d9777d4..0bc140319464 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -35,36 +35,35 @@
static u32 ast_get_vram_size(struct ast_device *ast)
{
- u8 jreg;
u32 vram_size;
+ u8 vgacr99, vgacraa;
- vram_size = AST_VIDMEM_DEFAULT_SIZE;
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff);
- switch (jreg & 3) {
+ vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa);
+ switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) {
case 0:
- vram_size = AST_VIDMEM_SIZE_8M;
+ vram_size = SZ_8M;
break;
case 1:
- vram_size = AST_VIDMEM_SIZE_16M;
+ vram_size = SZ_16M;
break;
case 2:
- vram_size = AST_VIDMEM_SIZE_32M;
+ vram_size = SZ_32M;
break;
case 3:
- vram_size = AST_VIDMEM_SIZE_64M;
+ vram_size = SZ_64M;
break;
}
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff);
- switch (jreg & 0x03) {
+ vgacr99 = ast_get_index_reg(ast, AST_IO_VGACRI, 0x99);
+ switch (vgacr99 & AST_IO_VGACR99_VGAMEM_RSRV_MASK) {
case 1:
- vram_size -= 0x100000;
+ vram_size -= SZ_1M;
break;
case 2:
- vram_size -= 0x200000;
+ vram_size -= SZ_2M;
break;
case 3:
- vram_size -= 0x400000;
+ vram_size -= SZ_4M;
break;
}
@@ -93,7 +92,6 @@ int ast_mm_init(struct ast_device *ast)
ast->vram_base = base;
ast->vram_size = vram_size;
- ast->vram_fb_available = vram_size;
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c3b950675485..1de832964e92 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -51,6 +51,26 @@
#define AST_LUT_SIZE 256
+#define AST_PRIMARY_PLANE_MAX_OFFSET (BIT(16) - 1)
+
+static unsigned long ast_fb_vram_offset(void)
+{
+ return 0; // with shmem, the primary plane is always at offset 0
+}
+
+static unsigned long ast_fb_vram_size(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ unsigned long offset = ast_fb_vram_offset(); // starts at offset
+ long cursor_offset = ast_cursor_vram_offset(ast); // ends at cursor offset
+
+ if (cursor_offset < 0)
+ cursor_offset = ast->vram_size; // no cursor; it's all ours
+ if (drm_WARN_ON_ONCE(dev, offset > cursor_offset))
+ return 0; // cannot legally happen; signal error
+ return cursor_offset - offset;
+}
+
static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
@@ -439,7 +459,7 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
*/
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
@@ -448,7 +468,6 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
{
struct drm_plane *plane = &ast_plane->base;
- ast_plane->vaddr = vaddr;
ast_plane->offset = offset;
ast_plane->size = size;
@@ -457,6 +476,13 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
type, NULL);
}
+void __iomem *ast_plane_vaddr(struct ast_plane *ast_plane)
+{
+ struct ast_device *ast = to_ast_device(ast_plane->base.dev);
+
+ return ast->vram + ast_plane->offset;
+}
+
/*
* Primary plane
*/
@@ -503,7 +529,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane_vaddr(ast_plane));
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
@@ -576,12 +602,12 @@ static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
{
struct ast_plane *ast_plane = to_ast_plane(plane);
- if (plane->state && plane->state->fb && ast_plane->vaddr) {
+ if (plane->state && plane->state->fb) {
sb->format = plane->state->fb->format;
sb->width = plane->state->fb->width;
sb->height = plane->state->fb->height;
sb->pitch[0] = plane->state->fb->pitches[0];
- iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr);
+ iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane_vaddr(ast_plane));
return 0;
}
return -ENODEV;
@@ -608,13 +634,11 @@ static int ast_primary_plane_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
- void __iomem *vaddr = ast->vram;
- u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
- unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- unsigned long size = ast->vram_fb_available - cursor_size;
+ u64 offset = ast_fb_vram_offset();
+ unsigned long size = ast_fb_vram_size(ast);
int ret;
- ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_primary_plane, offset, size,
0x01, &ast_primary_plane_funcs,
ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY);
@@ -922,9 +946,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
/*
* Concurrent operations could possibly trigger a call to
- * drm_connector_helper_funcs.get_modes by trying to read the
- * display modes. Protect access to I/O registers by acquiring
- * the I/O-register lock. Released in atomic_flush().
+ * drm_connector_helper_funcs.get_modes by reading the display
+ * modes. Protect access to registers by acquiring the modeset
+ * lock.
*/
mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail(state);
@@ -938,16 +962,20 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB8888);
struct ast_device *ast = to_ast_device(dev);
- unsigned long fbsize, fbpages, max_fbpages;
-
- max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
+ unsigned long max_fb_size = ast_fb_vram_size(ast);
+ u64 pitch;
+
+ if (drm_WARN_ON_ONCE(dev, !info))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(info, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > AST_PRIMARY_PLANE_MAX_OFFSET)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > max_fb_size / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -1018,10 +1046,7 @@ int ast_mode_config_init(struct ast_device *ast)
return ret;
drm_mode_config_reset(dev);
-
- ret = drmm_kms_helper_poll_init(dev);
- if (ret)
- return ret;
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 91e85e457bdf..37568cf3822c 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -1075,16 +1075,16 @@ static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *par
switch (param->vram_size) {
default:
- case AST_VIDMEM_SIZE_8M:
+ case SZ_8M:
param->dram_config |= 0x00;
break;
- case AST_VIDMEM_SIZE_16M:
+ case SZ_16M:
param->dram_config |= 0x04;
break;
- case AST_VIDMEM_SIZE_32M:
+ case SZ_32M:
param->dram_config |= 0x08;
break;
- case AST_VIDMEM_SIZE_64M:
+ case SZ_64M:
param->dram_config |= 0x0c;
break;
}
@@ -1446,16 +1446,16 @@ static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *par
switch (param->vram_size) {
default:
- case AST_VIDMEM_SIZE_8M:
+ case SZ_8M:
param->dram_config |= 0x00;
break;
- case AST_VIDMEM_SIZE_16M:
+ case SZ_16M:
param->dram_config |= 0x04;
break;
- case AST_VIDMEM_SIZE_32M:
+ case SZ_32M:
param->dram_config |= 0x08;
break;
- case AST_VIDMEM_SIZE_64M:
+ case SZ_64M:
param->dram_config |= 0x0c;
break;
}
@@ -1635,19 +1635,19 @@ static void ast_post_chip_2300(struct ast_device *ast)
switch (temp & 0x0c) {
default:
case 0x00:
- param.vram_size = AST_VIDMEM_SIZE_8M;
+ param.vram_size = SZ_8M;
break;
case 0x04:
- param.vram_size = AST_VIDMEM_SIZE_16M;
+ param.vram_size = SZ_16M;
break;
case 0x08:
- param.vram_size = AST_VIDMEM_SIZE_32M;
+ param.vram_size = SZ_32M;
break;
case 0x0c:
- param.vram_size = AST_VIDMEM_SIZE_64M;
+ param.vram_size = SZ_64M;
break;
}
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index bb2cc1d8b84e..e15adaf3a80e 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -30,9 +30,11 @@
#define AST_IO_VGACRI (0x54)
#define AST_IO_VGACR80_PASSWORD (0xa8)
+#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0)
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
#define AST_IO_VGACRA3_DVO_ENABLED BIT(7)
+#define AST_IO_VGACRAA_VGAMEM_SIZE_MASK GENMASK(1, 0)
#define AST_IO_VGACRB6_HSYNC_OFF BIT(0)
#define AST_IO_VGACRB6_VSYNC_OFF BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 09a1be234f71..b9e0ca85226a 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -16,6 +16,7 @@ config DRM_AUX_BRIDGE
tristate
depends on DRM_BRIDGE && OF
select AUXILIARY_BUS
+ select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Simple transparent bridge that is used by several non-DRM drivers to
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 050dae338ffe..1257009e850c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -948,13 +948,14 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret = 0;
if (adv->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, adv->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 83d711ee3a2e..f3fe47b12edc 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -143,35 +143,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx6345->aux, anx6345->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
@@ -517,6 +489,7 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
};
static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
@@ -553,7 +526,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx6345->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
@@ -691,9 +664,10 @@ static int anx6345_i2c_probe(struct i2c_client *client)
struct device *dev;
int i, err;
- anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL);
- if (!anx6345)
- return -ENOMEM;
+ anx6345 = devm_drm_bridge_alloc(&client->dev, struct anx6345, bridge,
+ &anx6345_bridge_funcs);
+ if (IS_ERR(anx6345))
+ return PTR_ERR(anx6345);
mutex_init(&anx6345->lock);
@@ -765,7 +739,6 @@ static int anx6345_i2c_probe(struct i2c_client *client)
/* Look for supported chip ID */
anx6345_poweron(anx6345);
if (anx6345_get_chip_id(anx6345)) {
- anx6345->bridge.funcs = &anx6345_bridge_funcs;
drm_bridge_add(&anx6345->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index f74694bb9c50..a83020d6576f 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -656,35 +656,7 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx78xx->aux, anx78xx->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
@@ -888,6 +860,7 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
};
static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
@@ -924,7 +897,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx78xx->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 071168aa0c3b..a761941bc3c2 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -838,10 +838,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
int ret;
/* Keep the panel disabled while we configure video */
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel))
- DRM_ERROR("failed to disable the panel\n");
- }
+ drm_panel_disable(dp->plat_data->panel);
ret = analogix_dp_train_link(dp);
if (ret) {
@@ -863,13 +860,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
}
/* Safe to enable the panel now */
- if (dp->plat_data->panel) {
- ret = drm_panel_enable(dp->plat_data->panel);
- if (ret) {
- DRM_ERROR("failed to enable the panel\n");
- return ret;
- }
- }
+ drm_panel_enable(dp->plat_data->panel);
/* Check whether panel supports fast training */
ret = analogix_dp_fast_link_train_detection(dp);
@@ -955,67 +946,15 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp)
return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
}
-/*
- * This function is a bit of a catch-all for panel preparation, hopefully
- * simplifying the logic of functions that need to prepare/unprepare the panel
- * below.
- *
- * If @prepare is true, this function will prepare the panel. Conversely, if it
- * is false, the panel will be unprepared.
- *
- * If @is_modeset_prepare is true, the function will disregard the current state
- * of the panel and either prepare/unprepare the panel based on @prepare. Once
- * it finishes, it will update dp->panel_is_modeset to reflect the current state
- * of the panel.
- */
-static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
- bool prepare, bool is_modeset_prepare)
-{
- int ret = 0;
-
- if (!dp->plat_data->panel)
- return 0;
-
- mutex_lock(&dp->panel_lock);
-
- /*
- * Exit early if this is a temporary prepare/unprepare and we're already
- * modeset (since we neither want to prepare twice or unprepare early).
- */
- if (dp->panel_is_modeset && !is_modeset_prepare)
- goto out;
-
- if (prepare)
- ret = drm_panel_prepare(dp->plat_data->panel);
- else
- ret = drm_panel_unprepare(dp->plat_data->panel);
-
- if (ret)
- goto out;
-
- if (is_modeset_prepare)
- dp->panel_is_modeset = prepare;
-
-out:
- mutex_unlock(&dp->panel_lock);
- return ret;
-}
-
static int analogix_dp_get_modes(struct drm_connector *connector)
{
struct analogix_dp_device *dp = to_dp(connector);
const struct drm_edid *drm_edid;
- int ret, num_modes = 0;
+ int num_modes = 0;
if (dp->plat_data->panel) {
num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
} else {
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return 0;
- }
-
drm_edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
drm_edid_connector_update(&dp->connector, drm_edid);
@@ -1024,10 +963,6 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
num_modes += drm_edid_connector_add_modes(&dp->connector);
drm_edid_free(drm_edid);
}
-
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
}
if (dp->plat_data->get_modes)
@@ -1082,24 +1017,13 @@ analogix_dp_detect(struct drm_connector *connector, bool force)
{
struct analogix_dp_device *dp = to_dp(connector);
enum drm_connector_status status = connector_status_disconnected;
- int ret;
if (dp->plat_data->panel)
return connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return connector_status_disconnected;
- }
-
if (!analogix_dp_detect_hpd(dp))
status = connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
-
return status;
}
@@ -1113,10 +1037,10 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
};
static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector = NULL;
int ret = 0;
@@ -1203,7 +1127,6 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- int ret;
crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
@@ -1214,9 +1137,7 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
- ret = analogix_dp_prepare_panel(dp, true, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_prepare(dp->plat_data->panel);
}
static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
@@ -1296,17 +1217,11 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
{
struct analogix_dp_device *dp = bridge->driver_private;
- int ret;
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel)) {
- DRM_ERROR("failed to disable the panel\n");
- return;
- }
- }
+ drm_panel_disable(dp->plat_data->panel);
disable_irq(dp->irq);
@@ -1314,9 +1229,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put_sync(dp->dev);
- ret = analogix_dp_prepare_panel(dp, false, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_unprepare(dp->plat_data->panel);
dp->fast_train_enable = false;
dp->psr_supported = false;
@@ -1505,6 +1418,10 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
video_info->max_link_rate = 0x0A;
video_info->max_lane_count = 0x04;
break;
+ case RK3588_EDP:
+ video_info->max_link_rate = 0x14;
+ video_info->max_lane_count = 0x04;
+ break;
case EXYNOS_DP:
/*
* NOTE: those property parseing code is used for
@@ -1540,6 +1457,26 @@ out:
return ret;
}
+static int analogix_dpaux_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+ int val;
+ int ret;
+
+ if (dp->force_hpd)
+ return 0;
+
+ pm_runtime_get_sync(dp->dev);
+
+ ret = readx_poll_timeout(analogix_dp_get_plug_in_status, dp, val, !val,
+ wait_us / 100, wait_us);
+
+ pm_runtime_mark_last_busy(dp->dev);
+ pm_runtime_put_autosuspend(dp->dev);
+
+ return ret;
+}
+
struct analogix_dp_device *
analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
@@ -1560,9 +1497,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
- mutex_init(&dp->panel_lock);
- dp->panel_is_modeset = false;
-
/*
* platform dp driver need containor_of the plat_data to get
* the driver private data, so we need to store the point of
@@ -1625,10 +1559,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
* that we can get the current state of the GPIO.
*/
dp->irq = gpiod_to_irq(dp->hpd_gpiod);
- irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN;
} else {
dp->irq = platform_get_irq(pdev, 0);
- irq_flags = 0;
+ irq_flags = IRQF_NO_AUTOEN;
}
if (dp->irq == -ENXIO) {
@@ -1645,7 +1579,18 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
dev_err(&pdev->dev, "failed to request irq\n");
goto err_disable_clk;
}
- disable_irq(dp->irq);
+
+ dp->aux.name = "DP-AUX";
+ dp->aux.transfer = analogix_dpaux_transfer;
+ dp->aux.wait_hpd_asserted = analogix_dpaux_wait_hpd_asserted;
+ dp->aux.dev = dp->dev;
+ drm_dp_aux_init(&dp->aux);
+
+ pm_runtime_use_autosuspend(dp->dev);
+ pm_runtime_set_autosuspend_delay(dp->dev, 100);
+ ret = devm_pm_runtime_enable(dp->dev);
+ if (ret)
+ goto err_disable_clk;
return dp;
@@ -1681,6 +1626,7 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
if (dp->plat_data->power_on)
dp->plat_data->power_on(dp->plat_data);
+ phy_set_mode(dp->phy, PHY_MODE_DP);
phy_power_on(dp->phy);
analogix_dp_init_dp(dp);
@@ -1696,25 +1642,12 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
dp->drm_dev = drm_dev;
dp->encoder = dp->plat_data->encoder;
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_use_autosuspend(dp->dev);
- pm_runtime_set_autosuspend_delay(dp->dev, 100);
- pm_runtime_enable(dp->dev);
- } else {
- ret = analogix_dp_resume(dp);
- if (ret)
- return ret;
- }
-
- dp->aux.name = "DP-AUX";
- dp->aux.transfer = analogix_dpaux_transfer;
- dp->aux.dev = dp->dev;
dp->aux.drm_dev = drm_dev;
ret = drm_dp_aux_register(&dp->aux);
if (ret) {
DRM_ERROR("failed to register AUX (%d)\n", ret);
- goto err_disable_pm_runtime;
+ return ret;
}
ret = analogix_dp_create_bridge(drm_dev, dp);
@@ -1727,13 +1660,6 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
err_unregister_aux:
drm_dp_aux_unregister(&dp->aux);
-err_disable_pm_runtime:
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
return ret;
}
@@ -1744,19 +1670,9 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
analogix_dp_bridge_disable(dp->bridge);
dp->connector.funcs->destroy(&dp->connector);
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
+ drm_panel_unprepare(dp->plat_data->panel);
drm_dp_aux_unregister(&dp->aux);
-
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
@@ -1782,6 +1698,20 @@ int analogix_dp_stop_crc(struct drm_connector *connector)
}
EXPORT_SYMBOL_GPL(analogix_dp_stop_crc);
+struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+
+ return dp->plat_data;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_aux_to_plat_data);
+
+struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp)
+{
+ return &dp->aux;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_get_aux);
+
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("Analogix DP Core Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 774d11574b09..2b54120ba4a3 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -169,9 +169,6 @@ struct analogix_dp_device {
bool fast_train_enable;
bool psr_supported;
- struct mutex panel_lock;
- bool panel_is_modeset;
-
struct analogix_dp_plat_data *plat_data;
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 3afc73c858c4..38fd8d5014d2 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <drm/bridge/analogix_dp.h>
@@ -513,10 +514,24 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
{
u32 reg;
+ int ret;
reg = bwtype;
if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62))
writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.link_rate =
+ drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100;
+ phy_cfg.dp.set_rate = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
@@ -530,9 +545,22 @@ void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count)
{
u32 reg;
+ int ret;
reg = count;
writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.lanes = dp->link_train.lane_count;
+ phy_cfg.dp.set_lanes = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
@@ -546,10 +574,34 @@ void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp)
{
u8 lane;
+ int ret;
for (lane = 0; lane < dp->link_train.lane_count; lane++)
writel(dp->link_train.training_lane[lane],
dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ for (lane = 0; lane < dp->link_train.lane_count; lane++) {
+ u8 training_lane = dp->link_train.training_lane[lane];
+ u8 vs, pe;
+
+ vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ phy_cfg.dp.voltage[lane] = vs;
+ phy_cfg.dp.pre[lane] = pe;
+ }
+
+ phy_cfg.dp.set_voltages = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane)
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 0b97b66de577..8a9079c2ed5c 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1257,10 +1257,10 @@ static void anx7625_power_on(struct anx7625_data *ctx)
usleep_range(11000, 12000);
/* Power on pin enable */
- gpiod_set_value(ctx->pdata.gpio_p_on, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 1);
usleep_range(10000, 11000);
/* Power reset pin enable */
- gpiod_set_value(ctx->pdata.gpio_reset, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 1);
usleep_range(10000, 11000);
DRM_DEV_DEBUG_DRIVER(dev, "power on !\n");
@@ -1280,9 +1280,9 @@ static void anx7625_power_standby(struct anx7625_data *ctx)
return;
}
- gpiod_set_value(ctx->pdata.gpio_reset, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 0);
usleep_range(1000, 1100);
- gpiod_set_value(ctx->pdata.gpio_p_on, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 0);
usleep_range(1000, 1100);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->pdata.supplies),
@@ -1814,9 +1814,6 @@ static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n");
- if (ctx->pdata.panel_bridge)
- return connector_status_connected;
-
return ctx->hpd_status ? connector_status_connected :
connector_status_disconnected;
}
@@ -2141,6 +2138,7 @@ static void hdcp_check_work_func(struct work_struct *work)
}
static int anx7625_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
@@ -2159,7 +2157,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
}
if (ctx->pdata.panel_bridge) {
- err = drm_bridge_attach(bridge->encoder,
+ err = drm_bridge_attach(encoder,
ctx->pdata.panel_bridge,
&ctx->bridge, flags);
if (err)
@@ -2474,6 +2472,22 @@ static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge
return anx7625_edid_read(ctx);
}
+static void anx7625_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_get_sync(dev);
+}
+
+static void anx7625_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_put_sync(dev);
+}
+
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.attach = anx7625_bridge_attach,
.detach = anx7625_bridge_detach,
@@ -2487,6 +2501,8 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.detect = anx7625_bridge_detect,
.edid_read = anx7625_bridge_edid_read,
+ .hpd_enable = anx7625_bridge_hpd_enable,
+ .hpd_disable = anx7625_bridge_hpd_disable,
};
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
@@ -2568,12 +2584,6 @@ static const struct dev_pm_ops anx7625_pm_ops = {
anx7625_runtime_pm_resume, NULL)
};
-static void anx7625_runtime_disable(void *data)
-{
- pm_runtime_dont_use_autosuspend(data);
- pm_runtime_disable(data);
-}
-
static int anx7625_link_bridge(struct drm_dp_aux *aux)
{
struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux);
@@ -2590,9 +2600,8 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
platform->bridge.of_node = dev->of_node;
if (!anx7625_of_panel_on_aux_bus(dev))
platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
- if (!platform->pdata.panel_bridge)
- platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
- DRM_BRIDGE_OP_DETECT;
+ if (!platform->pdata.panel_bridge || !anx7625_of_panel_on_aux_bus(dev))
+ platform->bridge.ops |= DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_DETECT;
platform->bridge.type = platform->pdata.panel_bridge ?
DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort;
@@ -2707,11 +2716,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
goto free_wq;
}
- pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
pm_suspend_ignore_children(dev, true);
- ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
goto free_wq;
@@ -2771,7 +2779,6 @@ static void anx7625_i2c_remove(struct i2c_client *client)
if (platform->hdcp_workqueue) {
cancel_delayed_work(&platform->hdcp_work);
- flush_workqueue(platform->hdcp_workqueue);
destroy_workqueue(platform->hdcp_workqueue);
}
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index 015983c015e5..c179b86d208f 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -86,6 +86,7 @@ struct drm_aux_bridge_data {
};
static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct drm_aux_bridge_data *data;
@@ -95,7 +96,7 @@ static int drm_aux_bridge_attach(struct drm_bridge *bridge,
data = container_of(bridge, struct drm_aux_bridge_data, bridge);
- return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+ return drm_bridge_attach(encoder, data->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index 48f297c78ee6..b3f588b71a7d 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -156,6 +156,7 @@ void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status sta
EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index c7a0247e06ad..b022dd6e6b6e 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -425,6 +425,17 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
+struct cdns_dsi_bridge_state {
+ struct drm_bridge_state base;
+ struct cdns_dsi_cfg dsi_cfg;
+};
+
+static inline struct cdns_dsi_bridge_state *
+to_cdns_dsi_bridge_state(struct drm_bridge_state *bridge_state)
+{
+ return container_of(bridge_state, struct cdns_dsi_bridge_state, base);
+}
+
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
{
return container_of(input, struct cdns_dsi, input);
@@ -568,15 +579,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
+ int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
if (ret)
return ret;
- phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
- mipi_dsi_pixel_format_to_bpp(output->dev->format),
- nlanes, phy_cfg);
+ ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
+ mipi_dsi_pixel_format_to_bpp(output->dev->format),
+ nlanes, phy_cfg);
+ if (ret)
+ return ret;
ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
if (ret)
@@ -605,6 +619,7 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
}
static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
@@ -617,7 +632,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ return drm_bridge_attach(encoder, output->bridge, bridge,
flags);
}
@@ -655,7 +670,8 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -675,11 +691,17 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put(dsi->base.dev);
}
-static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
+ dsi->phy_initialized = false;
+ dsi->link_initialized = false;
+ phy_power_off(dsi->dphy);
+ phy_exit(dsi->dphy);
+
pm_runtime_put(dsi->base.dev);
}
@@ -752,32 +774,59 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
dsi->link_initialized = true;
}
-static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct cdns_dsi_bridge_state *dsi_state;
+ struct drm_bridge_state *new_bridge_state;
struct drm_display_mode *mode;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
+ struct drm_connector *connector;
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
- u32 tmp, reg_wakeup, div;
+ u32 tmp, reg_wakeup, div, status;
int nlanes;
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
+ new_bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ if (WARN_ON(!new_bridge_state))
+ return;
+
+ dsi_state = to_cdns_dsi_bridge_state(new_bridge_state);
+ dsi_cfg = dsi_state->dsi_cfg;
+
if (dsi->platform_ops && dsi->platform_ops->enable)
dsi->platform_ops->enable(dsi);
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
nlanes = output->dev->lanes;
- WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
-
cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
+ /*
+ * Now that the DSI Link and DSI Phy are initialized,
+ * wait for the CLK and Data Lanes to be ready.
+ */
+ tmp = CLK_LANE_RDY;
+ for (int i = 0; i < nlanes; i++)
+ tmp |= DATA_LANE_RDY(i);
+
+ if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
+ (tmp == (status & tmp)), 100, 500000))
+ dev_err(dsi->base.dev,
+ "Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
+
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
dsi->regs + VID_HSIZE1);
writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
@@ -892,7 +941,8 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
-static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -904,13 +954,109 @@ static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
cdns_dsi_hs_init(dsi);
}
+static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_output *output = &dsi->output;
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = drm_mipi_dsi_get_input_bus_fmt(output->dev->format);
+ if (!input_fmts[0])
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
+
+ return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state, *old_dsi_state;
+ struct drm_bridge_state *bridge_state;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ bridge_state = drm_priv_to_bridge_state(bridge->base.state);
+ old_dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &dsi_state->base);
+
+ memcpy(&dsi_state->dsi_cfg, &old_dsi_state->dsi_cfg,
+ sizeof(dsi_state->dsi_cfg));
+
+ return &dsi_state->base;
+}
+
+static void
+cdns_dsi_bridge_atomic_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = to_cdns_dsi_bridge_state(state);
+
+ kfree(dsi_state);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ memset(dsi_state, 0, sizeof(*dsi_state));
+ dsi_state->base.bridge = bridge;
+
+ return &dsi_state->base;
+}
+
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
- .disable = cdns_dsi_bridge_disable,
- .pre_enable = cdns_dsi_bridge_pre_enable,
- .enable = cdns_dsi_bridge_enable,
- .post_disable = cdns_dsi_bridge_post_disable,
+ .atomic_disable = cdns_dsi_bridge_atomic_disable,
+ .atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
+ .atomic_enable = cdns_dsi_bridge_atomic_enable,
+ .atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
+ .atomic_check = cdns_dsi_bridge_atomic_check,
+ .atomic_reset = cdns_dsi_bridge_atomic_reset,
+ .atomic_duplicate_state = cdns_dsi_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = cdns_dsi_bridge_atomic_destroy_state,
+ .atomic_get_input_bus_fmts = cdns_dsi_bridge_get_input_bus_fmts,
};
static int cdns_dsi_attach(struct mipi_dsi_host *host,
@@ -920,8 +1066,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
struct drm_bridge *bridge;
- struct drm_panel *panel;
- struct device_node *np;
int ret;
/*
@@ -939,26 +1083,10 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
/*
* The host <-> device link might be described using an OF-graph
* representation, in this case we extract the device of_node from
- * this representation, otherwise we use dsidev->dev.of_node which
- * should have been filled by the core.
+ * this representation.
*/
- np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
- dev->channel);
- if (!np)
- np = of_node_get(dev->dev.of_node);
-
- panel = of_drm_find_panel(np);
- if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DSI);
- } else {
- bridge = of_drm_find_bridge(dev->dev.of_node);
- if (!bridge)
- bridge = ERR_PTR(-EINVAL);
- }
-
- of_node_put(np);
-
+ bridge = devm_drm_of_get_bridge(dsi->base.dev, dsi->base.dev->of_node,
+ DSI_OUTPUT_PORT, dev->channel);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
dev_err(host->dev, "failed to add DSI device %s (err = %d)",
@@ -968,7 +1096,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
output->dev = dev;
output->bridge = bridge;
- output->panel = panel;
/*
* The DSI output has been properly configured, we can now safely
@@ -984,12 +1111,9 @@ static int cdns_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dev)
{
struct cdns_dsi *dsi = to_cdns_dsi(host);
- struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
drm_bridge_remove(&input->bridge);
- if (output->panel)
- drm_panel_bridge_remove(output->bridge);
return 0;
}
@@ -1152,7 +1276,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
clk_disable_unprepare(dsi->dsi_sys_clk);
clk_disable_unprepare(dsi->dsi_p_clk);
reset_control_assert(dsi->dsi_p_rst);
- dsi->link_initialized = false;
return 0;
}
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
index ca7ea2da635c..5db5dbbbcaad 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
@@ -10,7 +10,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <linux/bits.h>
#include <linux/completion.h>
@@ -21,7 +20,6 @@ struct reset_control;
struct cdns_dsi_output {
struct mipi_dsi_device *dev;
- struct drm_panel *panel;
struct drm_bridge *bridge;
union phy_configure_opts phy_opts;
};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 81fad14c2cd5..b431e7efd1f0 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -546,76 +546,6 @@ out:
}
/**
- * cdns_mhdp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * cdns_mhdp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
- struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* cdns_mhdp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
@@ -1453,7 +1383,7 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
- cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_up(&mhdp->aux, mhdp->link.revision);
cdns_mhdp_fill_sink_caps(mhdp, dpcd);
@@ -1500,7 +1430,7 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
if (mhdp->plugged)
- cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_down(&mhdp->aux, mhdp->link.revision);
mhdp->link_up = false;
}
@@ -1726,6 +1656,7 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
}
static int cdns_mhdp_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -2305,7 +2236,7 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
* If everything looks fine, just return, as we don't handle
* DP IRQs.
*/
- if (ret > 0 &&
+ if (!ret &&
drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
goto out;
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 81f7c701961f..634c5b030667 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -580,11 +580,13 @@ static int chipone_dsi_host_attach(struct chipone *icn)
return ret;
}
-static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int chipone_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct chipone *icn = bridge_to_chipone(bridge);
- return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, icn->panel_bridge, bridge, flags);
}
#define MAX_INPUT_SEL_FORMATS 1
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index da17f0978a79..210c45c1efd4 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -268,13 +268,14 @@ static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
}
static int ch7033_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
struct drm_connector *connector = &priv->connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, priv->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
@@ -305,7 +306,7 @@ static int ch7033_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+ return drm_connector_attach_encoder(&priv->connector, encoder);
}
static void ch7033_bridge_detach(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 72bc508d4e6e..badd2c7f91a1 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -34,6 +34,7 @@ to_display_connector(struct drm_bridge *bridge)
}
static int display_connector_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
@@ -209,9 +210,10 @@ static int display_connector_probe(struct platform_device *pdev)
const char *label = NULL;
int ret;
- conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
- if (!conn)
- return -ENOMEM;
+ conn = devm_drm_bridge_alloc(&pdev->dev, struct display_connector, bridge,
+ &display_connector_bridge_funcs);
+ if (IS_ERR(conn))
+ return PTR_ERR(conn);
platform_set_drvdata(pdev, conn);
@@ -361,7 +363,6 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
- conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
if (conn->bridge.ddc)
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 26ae1ab5237f..2cb6dfc7a6d3 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -113,11 +113,12 @@ static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock)
}
static int fsl_ldb_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge,
+ return drm_bridge_attach(encoder, fsl_ldb->panel_bridge,
bridge, flags);
}
@@ -180,9 +181,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
configured_link_freq = clk_get_rate(fsl_ldb->clk);
if (configured_link_freq != requested_link_freq)
- dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
- configured_link_freq,
- requested_link_freq);
+ dev_warn(fsl_ldb->dev,
+ "Configured %pC clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
+ fsl_ldb->clk, configured_link_freq, requested_link_freq);
clk_prepare_enable(fsl_ldb->clk);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
index 9b5bebbe357d..6149ba141a38 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
@@ -104,7 +104,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge)
}
EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
@@ -116,9 +116,8 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
- ldb_ch->next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ return drm_bridge_attach(encoder, ldb_ch->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper);
@@ -191,8 +190,7 @@ int ldb_find_next_bridge_helper(struct ldb *ldb)
}
EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs)
+void ldb_add_bridge_helper(struct ldb *ldb)
{
struct ldb_channel *ldb_ch;
int i;
@@ -204,7 +202,6 @@ void ldb_add_bridge_helper(struct ldb *ldb,
continue;
ldb_ch->bridge.driver_private = ldb_ch;
- ldb_ch->bridge.funcs = bridge_funcs;
ldb_ch->bridge.of_node = ldb_ch->np;
drm_bridge_add(&ldb_ch->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
index a0a5cde27fbc..de187e326999 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
@@ -81,15 +81,14 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge);
void ldb_bridge_disable_helper(struct drm_bridge *bridge);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
int ldb_init_helper(struct ldb *ldb);
int ldb_find_next_bridge_helper(struct ldb *ldb);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs);
+void ldb_add_bridge_helper(struct ldb *ldb);
void ldb_remove_bridge_helper(struct ldb *ldb);
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
index 3ebf0b9866de..f072c6ed39ef 100644
--- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -23,7 +23,8 @@ struct imx_legacy_bridge {
#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base)
static int imx_legacy_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
@@ -76,9 +77,9 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
imx_bridge->base.type = type;
- ret = devm_drm_bridge_add(dev, &imx_bridge->base);
- if (ret)
- return ERR_PTR(ret);
+ ret = devm_drm_bridge_add(dev, &imx_bridge->base);
+ if (ret)
+ return ERR_PTR(ret);
return &imx_bridge->base;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index a17433a7c755..8a4fd7d77a8d 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -40,11 +40,12 @@ to_imx8mp_hdmi_pvi(struct drm_bridge *bridge)
}
static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
- return drm_bridge_attach(bridge->encoder, pvi->next_bridge,
+ return drm_bridge_attach(encoder, pvi->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
index 524aac751359..47aa65938e6a 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
@@ -47,7 +47,7 @@ struct imx8qm_ldb_channel {
struct imx8qm_ldb {
struct ldb base;
struct device *dev;
- struct imx8qm_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qm_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
int active_chno;
@@ -107,7 +107,7 @@ static int imx8qm_ldb_bridge_atomic_check(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts);
@@ -158,7 +158,7 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_configure(imx8qm_ldb_ch->phy, &opts);
@@ -226,13 +226,13 @@ static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
}
if (is_split) {
- ret = phy_power_on(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel0 PHY: %d\n",
ret);
- ret = phy_power_on(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel1 PHY: %d\n",
@@ -261,12 +261,12 @@ static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
ldb_bridge_disable_helper(bridge);
if (is_split) {
- ret = phy_power_off(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel0 PHY: %d\n",
ret);
- ret = phy_power_off(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel1 PHY: %d\n",
@@ -412,7 +412,7 @@ static int imx8qm_ldb_get_phy(struct imx8qm_ldb *imx8qm_ldb)
int i, ret;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (!ldb_ch->is_available)
@@ -448,6 +448,14 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
if (!imx8qm_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qm_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qm_ldb_channel, base.bridge,
+ &imx8qm_ldb_bridge_funcs);
+ if (IS_ERR(imx8qm_ldb->channel[i]))
+ return PTR_ERR(imx8qm_ldb->channel[i]);
+ }
+
imx8qm_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qm_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qm_ldb->clk_pixel);
@@ -473,7 +481,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qm_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qm_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -499,12 +507,12 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
}
imx8qm_ldb->active_chno = 0;
- imx8qm_ldb_ch = &imx8qm_ldb->channel[0];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[0];
ldb_ch = &imx8qm_ldb_ch->base;
ldb_ch->link_type = pixel_order;
} else {
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -525,7 +533,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qm_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qm_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 3cb484773ddf..5d272916e200 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -44,7 +44,7 @@ struct imx8qxp_ldb_channel {
struct imx8qxp_ldb {
struct ldb base;
struct device *dev;
- struct imx8qxp_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qxp_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
struct drm_bridge *companion;
@@ -410,7 +410,7 @@ static const struct drm_bridge_funcs imx8qxp_ldb_bridge_funcs = {
static int imx8qxp_ldb_set_di_id(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct device_node *ep, *remote;
struct device *dev = imx8qxp_ldb->dev;
@@ -456,7 +456,7 @@ imx8qxp_ldb_check_chno_and_dual_link(struct ldb_channel *ldb_ch, int link)
static int imx8qxp_ldb_parse_dt_companion(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct ldb_channel *companion_ldb_ch;
struct device_node *companion;
@@ -586,6 +586,14 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
if (!imx8qxp_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qxp_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qxp_ldb_channel, base.bridge,
+ &imx8qxp_ldb_bridge_funcs);
+ if (IS_ERR(imx8qxp_ldb->channel[i]))
+ return PTR_ERR(imx8qxp_ldb->channel[i]);
+ }
+
imx8qxp_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qxp_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qxp_ldb->clk_pixel);
@@ -611,7 +619,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qxp_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qxp_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -627,7 +635,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
}
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qxp_ldb_ch = &imx8qxp_ldb->channel[i];
+ imx8qxp_ldb_ch = imx8qxp_ldb->channel[i];
ldb_ch = &imx8qxp_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -660,9 +668,9 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qxp_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
- return ret;
+ return 0;
}
static void imx8qxp_ldb_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1d9529dc7f2a..1f6fd488e703 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -108,6 +108,7 @@ imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge,
}
static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pc_channel *ch = bridge->driver_private;
@@ -119,7 +120,7 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
ch->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index cd6818db0fd3..e092c9ea99b0 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -128,6 +128,7 @@ static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl)
}
static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -138,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
pl->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index 49dd4f96d52c..da138ab51b3b 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -48,6 +48,7 @@ struct imx8qxp_pxl2dpi {
#define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge)
static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
@@ -58,7 +59,7 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
p2d->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index 21152a1c28f7..a3a63a977b0a 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -665,13 +665,14 @@ it6263_bridge_mode_valid(struct drm_bridge *bridge,
}
static int it6263_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6263 *it = bridge_to_it6263(bridge);
struct drm_connector *connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, it->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -679,7 +680,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
- connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ connector = drm_bridge_connector_init(bridge->dev, encoder);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
dev_err(it->dev, "failed to initialize bridge connector: %d\n",
@@ -687,7 +688,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(connector, bridge->encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 8a607558ac89..1383d1e21afe 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -771,40 +771,6 @@ static void it6505_calc_video_info(struct it6505 *it6505)
DRM_MODE_ARG(&it6505->video_info));
}
-static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux,
- struct it6505_drm_dp_link *link,
- u8 mode)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < DPCD_V_1_1)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= mode;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- if (mode == DP_SET_POWER_D0) {
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
static void it6505_clear_int(struct it6505 *it6505)
{
it6505_write(it6505, INT_STATUS_01, 0xFF);
@@ -2578,8 +2544,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
}
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
@@ -2910,8 +2875,7 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505)
}
if (it6505->hpd_state) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
@@ -3124,6 +3088,7 @@ static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge)
}
static int it6505_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3233,8 +3198,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
it6505_int_mask_enable(it6505);
it6505_video_reset(it6505);
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -3246,8 +3210,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_DEBUG_DRIVER(dev, "start");
if (it6505->powered) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D3);
+ drm_dp_link_power_down(&it6505->aux, it6505->link.revision);
it6505_video_disable(it6505);
}
}
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index b9f90f32145d..7b110ae53291 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -586,6 +586,7 @@ static bool it66121_is_hpd_detect(struct it66121_ctx *ctx)
}
static int it66121_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -594,7 +595,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ ret = drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 52da204f5740..3e49d855b364 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -543,12 +543,13 @@ exit:
}
static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
+ ret = drm_bridge_attach(encoder, lt->hdmi_port, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0) {
dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 0fc5ea18fe6a..9b2dac9bd63c 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -99,11 +99,12 @@ static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
}
static int lt9211_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 026803034231..a35a8b8ca89c 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -740,11 +740,12 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
}
static int lt9611_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611->next_bridge,
+ return drm_bridge_attach(encoder, lt9611->next_bridge,
bridge, flags);
}
@@ -1130,7 +1131,7 @@ static int lt9611_probe(struct i2c_client *client)
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
- DRM_BRIDGE_OP_HDMI;
+ DRM_BRIDGE_OP_HDMI | DRM_BRIDGE_OP_HDMI_AUDIO;
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
lt9611->bridge.vendor = "Lontium";
lt9611->bridge.product = "LT9611";
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index f4c3ff1fdc69..766da2cb45a7 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -280,11 +280,12 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
}
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
+ return drm_bridge_attach(encoder, lt9611uxc->next_bridge,
bridge, flags);
}
@@ -774,9 +775,9 @@ static int lt9611uxc_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL);
- if (!lt9611uxc)
- return -ENOMEM;
+ lt9611uxc = devm_drm_bridge_alloc(dev, struct lt9611uxc, bridge, &lt9611uxc_bridge_funcs);
+ if (IS_ERR(lt9611uxc))
+ return PTR_ERR(lt9611uxc);
lt9611uxc->dev = dev;
lt9611uxc->client = client;
@@ -855,7 +856,6 @@ retry:
i2c_set_clientdata(client, lt9611uxc);
- lt9611uxc->bridge.funcs = &lt9611uxc_bridge_funcs;
lt9611uxc->bridge.of_node = client->dev.of_node;
lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
if (lt9611uxc->hpd_supported)
@@ -880,7 +880,11 @@ retry:
}
}
- return lt9611uxc_audio_init(dev, lt9611uxc);
+ ret = lt9611uxc_audio_init(dev, lt9611uxc);
+ if (ret)
+ goto err_remove_bridge;
+
+ return 0;
err_remove_bridge:
free_irq(client->irq, lt9611uxc);
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 389af0233fcd..1646e454e0b0 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -34,11 +34,12 @@ static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
}
static int lvds_codec_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
- return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
+ return drm_bridge_attach(encoder, lvds_codec->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index a47aabf134fd..15a5a1f644fc 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -190,6 +190,7 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
}
static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct i2c_client *stdp4028_i2c
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index 53dd140a1b8d..1d4ae0097df8 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -104,11 +104,12 @@ static void lvds_serialiser_on(struct mchp_lvds *lvds)
}
static int mchp_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mchp_lvds *lvds = bridge_to_lvds(bridge);
- return drm_bridge_attach(bridge->encoder, lvds->panel_bridge,
+ return drm_bridge_attach(encoder, lvds->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index d04c62a0cb9f..55912ae11f46 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -910,6 +910,7 @@ static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
}
static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
@@ -919,7 +920,7 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
- return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, panel_bridge, bridge, flags);
}
static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 27261b2ac9c8..25d7c415478b 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -214,13 +214,14 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
};
static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
/* Let this driver create connector if requested */
- ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ptn_bridge->panel_bridge,
bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -239,7 +240,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
drm_connector_attach_encoder(&ptn_bridge->connector,
- bridge->encoder);
+ encoder);
drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 258c85c83a28..79b009ab9396 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -58,6 +58,7 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
};
static int panel_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
@@ -81,7 +82,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
drm_panel_bridge_set_orientation(connector, bridge);
drm_connector_attach_encoder(&panel_bridge->connector,
- bridge->encoder);
+ encoder);
if (bridge->dev->registered) {
if (connector->funcs->reset)
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 13ada42a5514..8726fefc5c65 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -418,6 +418,7 @@ static void ps8622_post_disable(struct drm_bridge *bridge)
}
static int ps8622_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index a42138b33258..2422ff68c104 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -494,6 +494,7 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -518,7 +519,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the panel-bridge to the dsi bridge */
- ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
if (ret)
goto err_bridge_attach;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 54de6ed2fae8..0014c497e3fe 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -1640,11 +1640,12 @@ static void samsung_dsim_mode_set(struct drm_bridge *bridge,
}
static int samsung_dsim_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge, bridge,
flags);
}
@@ -1935,9 +1936,9 @@ int samsung_dsim_probe(struct platform_device *pdev)
struct samsung_dsim *dsi;
int ret, i;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
@@ -2007,7 +2008,6 @@ int samsung_dsim_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 914a2609a685..6de61d9fe064 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -416,6 +416,7 @@ out:
}
static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -424,7 +425,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, sii902x->next_bridge,
+ return drm_bridge_attach(encoder, sii902x->next_bridge,
bridge, flags);
drm_connector_helper_add(&sii902x->connector,
@@ -452,7 +453,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sii902x->connector, encoder);
return 0;
}
@@ -1138,6 +1139,7 @@ static int sii902x_init(struct sii902x *sii902x)
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ sii902x->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
if (sii902x->i2c->irq > 0)
sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 28a2e1ee04b2..3af650dc92a1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2203,6 +2203,7 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
}
static int sii8620_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index ab0b0e36e97a..70db5b99e5bb 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -103,12 +103,13 @@ static const struct drm_connector_funcs simple_bridge_con_funcs = {
};
static int simple_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, sbridge->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -127,7 +128,7 @@ static int simple_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(&sbridge->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sbridge->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 6166f197e37b..5e5f8c2f95be 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -1077,6 +1077,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_HPD;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0890add5f707..8791408dd1ff 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -22,8 +22,8 @@
#include <media/cec-notifier.h>
-#include <uapi/linux/media-bus-format.h>
-#include <uapi/linux/videodev2.h>
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -2889,12 +2889,13 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
}
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ return drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
return dw_hdmi_connector_create(hdmi);
@@ -3332,9 +3333,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
u8 config0;
u8 config3;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi, bridge, &dw_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return hdmi;
hdmi->plat_data = plat_data;
hdmi->dev = dev;
@@ -3494,7 +3495,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
}
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.interlace_allowed = true;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 2b6e70a49f43..b08ada920a50 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -1072,15 +1072,16 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->panel_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index 5fd7a459efdd..c76f5f2e74d1 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -870,15 +870,16 @@ dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi2->panel_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 49c76027f831..edf01476f2ef 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -202,11 +202,12 @@ static void tc358762_enable(struct drm_bridge *bridge,
}
static int tc358762_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 3d3d135b4348..3f76c890fad9 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -295,11 +295,12 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
}
static int tc358764_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 39e2d3a7a27d..7e5449fb86a3 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1795,6 +1795,7 @@ static const struct drm_connector_funcs tc_connector_funcs = {
};
static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1807,6 +1808,7 @@ static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
}
static int tc_edp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index ec79b0dd0e2c..063f217a17b6 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -554,6 +554,7 @@ static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
};
static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -563,7 +564,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ return drm_bridge_attach(encoder, priv->output.bridge, bridge,
flags);
}
@@ -580,7 +581,8 @@ tc358768_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void tc358768_bridge_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -602,7 +604,8 @@ static void tc358768_bridge_disable(struct drm_bridge *bridge)
dev_warn(priv->dev, "Software disable failed: %d\n", ret);
}
-static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -682,13 +685,17 @@ static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
return (u32)div_u64(m, n);
}
-static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
struct mipi_dsi_device *dsi_dev = priv->output.dev;
unsigned long mode_flags = dsi_dev->mode_flags;
u32 val, val2, lptxcnt, hact, data_type;
s32 raw_val;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
const struct drm_display_mode *mode;
u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
u32 dsiclk, hsbyteclk;
@@ -719,7 +726,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
return;
}
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
ret = tc358768_setup_pll(priv, mode);
if (ret) {
dev_err(dev, "PLL setup failed: %d\n", ret);
@@ -1076,14 +1086,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
tc358768_write(priv, TC358768_DSI_CONFW, val);
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
-static void tc358768_bridge_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -1100,11 +1108,8 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge)
tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
#define MAX_INPUT_SEL_FORMATS 1
@@ -1166,10 +1171,10 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
.attach = tc358768_bridge_attach,
.mode_valid = tc358768_bridge_mode_valid,
.mode_fixup = tc358768_mode_fixup,
- .pre_enable = tc358768_bridge_pre_enable,
- .enable = tc358768_bridge_enable,
- .disable = tc358768_bridge_disable,
- .post_disable = tc358768_bridge_post_disable,
+ .atomic_pre_enable = tc358768_bridge_atomic_pre_enable,
+ .atomic_enable = tc358768_bridge_atomic_enable,
+ .atomic_disable = tc358768_bridge_atomic_disable,
+ .atomic_post_disable = tc358768_bridge_atomic_post_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index c89757bec4e6..1b10e6ee1724 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -286,7 +286,8 @@ static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
return container_of(b, struct tc_data, bridge);
}
-static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -309,7 +310,8 @@ static void tc_bridge_pre_enable(struct drm_bridge *bridge)
usleep_range(10, 20);
}
-static void tc_bridge_post_disable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -368,30 +370,21 @@ static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val)
ret, addr);
}
-/* helper function to access bus_formats */
-static struct drm_connector *get_connector(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- return connector;
-
- return NULL;
-}
-
-static void tc_bridge_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2;
u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2;
u32 val = 0;
u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay;
- struct drm_display_mode *mode;
- struct drm_connector *connector = get_connector(bridge->encoder);
-
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
hback_porch = mode->htotal - mode->hsync_end;
hsync_len = mode->hsync_end - mode->hsync_start;
@@ -589,21 +582,25 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
}
static int tc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
+ return drm_bridge_attach(encoder, tc->panel_bridge,
&tc->bridge, flags);
}
static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
- .pre_enable = tc_bridge_pre_enable,
- .enable = tc_bridge_enable,
+ .atomic_pre_enable = tc_bridge_atomic_pre_enable,
+ .atomic_enable = tc_bridge_atomic_enable,
.mode_valid = tc_mode_valid,
- .post_disable = tc_bridge_post_disable,
+ .atomic_post_disable = tc_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
};
static int tc_attach_host(struct tc_data *tc)
diff --git a/drivers/gpu/drm/bridge/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index 20658258fb51..850909f78a7b 100644
--- a/drivers/gpu/drm/bridge/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -1365,6 +1365,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
@@ -1780,9 +1781,9 @@ static int tda998x_create(struct device *dev)
u32 video;
int rev_lo, rev_hi, ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tda998x_priv, bridge, &tda998x_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -1947,7 +1948,6 @@ static int tda998x_create(struct device *dev)
tda998x_audio_codec_init(priv, &client->dev);
}
- priv->bridge.funcs = &tda998x_bridge_funcs;
#ifdef CONFIG_OF
priv->bridge.of_node = dev->of_node;
#endif
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index bba10cf9b4f9..e2fc78adebcf 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -43,11 +43,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
}
static int thc63_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
+ return drm_bridge_attach(encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index 85f2a0e74a1c..47638d1c96ec 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -242,12 +242,12 @@ static void dlpc_mode_set(struct drm_bridge *bridge,
drm_mode_copy(&dlpc->mode, adjusted_mode);
}
-static int dlpc_attach(struct drm_bridge *bridge,
+static int dlpc_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
- return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, dlpc->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs dlpc_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 95563aa1b450..033c44326552 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -40,7 +40,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_drv.h> /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */
+#include <drm/drm_bridge_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -290,11 +290,12 @@ static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
}
static int sn65dsi83_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
@@ -370,7 +371,6 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
{
- struct drm_device *dev = sn65dsi83->bridge.dev;
struct drm_modeset_acquire_ctx ctx;
int err;
@@ -385,26 +385,21 @@ static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
* Keep the lock during the whole operation to be atomic.
*/
- DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
-
- if (!sn65dsi83->bridge.encoder->crtc) {
- /*
- * No CRTC attached -> No CRTC active outputs to reset
- * This can happen when the SN65DSI83 is reset. Simply do
- * nothing without returning any errors.
- */
- err = 0;
- goto end;
- }
+ drm_modeset_acquire_init(&ctx, 0);
dev_warn(sn65dsi83->dev, "reset the pipe\n");
- err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx);
+retry:
+ err = drm_bridge_helper_reset_crtc(&sn65dsi83->bridge, &ctx);
+ if (err == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
-end:
- DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
- return err;
+ return 0;
}
static void sn65dsi83_reset_work(struct work_struct *ws)
@@ -946,9 +941,9 @@ static int sn65dsi83_probe(struct i2c_client *client)
struct sn65dsi83 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sn65dsi83, bridge, &sn65dsi83_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work);
@@ -988,7 +983,6 @@ static int sn65dsi83_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sn65dsi83_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 01d456b955ab..60224f476e1d 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -35,6 +35,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#define SN_DEVICE_ID_REGS 0x00 /* up to 0x07 */
#define SN_DEVICE_REV_REG 0x08
#define SN_DPPLL_SRC_REG 0x0A
#define DPPLL_CLK_SRC_DSICLK BIT(0)
@@ -243,11 +244,26 @@ static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata,
regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf));
}
-static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata)
+static struct drm_display_mode *
+get_new_adjusted_display_mode(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ return &crtc_state->adjusted_mode;
+}
+
+static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
u32 bit_rate_khz, clk_freq_khz;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
bit_rate_khz = mode->clock *
mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
@@ -274,7 +290,8 @@ static const u32 ti_sn_bridge_dsiclk_lut[] = {
460800000,
};
-static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
int i;
u32 refclk_rate;
@@ -287,7 +304,7 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
clk_prepare_enable(pdata->refclk);
} else {
- refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
+ refclk_rate = ti_sn_bridge_get_dsi_freq(pdata, state) * 1000;
refclk_lut = ti_sn_bridge_dsiclk_lut;
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
}
@@ -311,12 +328,13 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i];
}
-static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
+static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
mutex_lock(&pdata->comms_mutex);
/* configure bridge ref_clk */
- ti_sn_bridge_set_refclk_freq(pdata);
+ ti_sn_bridge_set_refclk_freq(pdata, state);
/*
* HPD on this bridge chip is a bit useless. This is an eDP bridge
@@ -376,7 +394,7 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
* clock so reading early doesn't work.
*/
if (pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, NULL);
return ret;
}
@@ -423,36 +441,8 @@ static int status_show(struct seq_file *s, void *data)
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(status);
-static void ti_sn65dsi86_debugfs_remove(void *data)
-{
- debugfs_remove_recursive(data);
-}
-
-static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
-{
- struct device *dev = pdata->dev;
- struct dentry *debugfs;
- int ret;
-
- debugfs = debugfs_create_dir(dev_name(dev), NULL);
-
- /*
- * We might get an error back if debugfs wasn't enabled in the kernel
- * so let's just silently return upon failure.
- */
- if (IS_ERR_OR_NULL(debugfs))
- return;
-
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
- if (ret)
- return;
-
- debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
-}
-
/* -----------------------------------------------------------------------------
* Auxiliary Devices (*not* AUX)
*/
@@ -732,6 +722,7 @@ static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -748,7 +739,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
* Attach the next bridge.
* We never want the next bridge to *also* create a connector.
*/
- ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
+ ret = drm_bridge_attach(encoder, pdata->next_bridge,
&pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
goto err_initted_aux;
@@ -821,12 +812,13 @@ static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0);
}
-static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
unsigned int bit_rate_mhz, clk_freq_mhz;
unsigned int val;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* set DSIA clk frequency */
bit_rate_mhz = (mode->clock / 1000) *
@@ -856,12 +848,14 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state,
+ unsigned int bpp)
{
unsigned int bit_rate_khz, dp_rate_mhz;
unsigned int i;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* Calculate minimum bit rate based on our pixel clock. */
bit_rate_khz = mode->clock * bpp;
@@ -960,10 +954,11 @@ static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata)
return valid_rates;
}
-static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
u8 hsync_polarity = 0, vsync_polarity = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -1105,7 +1100,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
pdata->ln_polrs << LN_POLRS_OFFSET);
/* set dsi clk frequency value */
- ti_sn_bridge_set_dsi_rate(pdata);
+ ti_sn_bridge_set_dsi_rate(pdata, state);
/*
* The SN65DSI86 only supports ASSR Display Authentication method and
@@ -1140,7 +1135,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
valid_rates = ti_sn_bridge_read_valid_rates(pdata);
/* Train until we run out of rates */
- for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp);
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, state, bpp);
dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
dp_rate_idx++) {
if (!(valid_rates & BIT(dp_rate_idx)))
@@ -1156,7 +1151,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
}
/* config video parameters */
- ti_sn_bridge_set_video_timings(pdata);
+ ti_sn_bridge_set_video_timings(pdata, state);
/* enable video stream */
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE,
@@ -1171,7 +1166,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(pdata->dev);
if (!pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, state);
/* td7: min 100 us after enable before DSI data */
usleep_range(100, 110);
@@ -1216,6 +1211,15 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
return drm_edid_read_ddc(connector, &pdata->aux.ddc);
}
+static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ struct dentry *debugfs;
+
+ debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
+ debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
+}
+
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
@@ -1229,6 +1233,7 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .debugfs_init = ti_sn65dsi86_debugfs_init,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@@ -1312,7 +1317,6 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
if (ret)
return ret;
- pdata->bridge.funcs = &ti_sn_bridge_funcs;
pdata->bridge.of_node = np;
pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
@@ -1894,6 +1898,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ti_sn65dsi86 *pdata;
+ u8 id_buf[8];
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
@@ -1901,9 +1906,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return -ENODEV;
}
- pdata = devm_kzalloc(dev, sizeof(struct ti_sn65dsi86), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ pdata = devm_drm_bridge_alloc(dev, struct ti_sn65dsi86, bridge, &ti_sn_bridge_funcs);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
dev_set_drvdata(dev, pdata);
pdata->dev = dev;
@@ -1937,7 +1942,15 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (ret)
return ret;
- ti_sn65dsi86_debugfs_init(pdata);
+ pm_runtime_get_sync(dev);
+ ret = regmap_bulk_read(pdata->regmap, SN_DEVICE_ID_REGS, id_buf, ARRAY_SIZE(id_buf));
+ pm_runtime_put_autosuspend(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read device id\n");
+
+ /* The ID string is stored backwards */
+ if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
+ return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
/*
* Break ourselves up into a collection of aux devices. The only real
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index 22316382451f..cca75443f012 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -45,11 +45,13 @@ static void tdp158_disable(struct drm_bridge *bridge,
regulator_disable(tdp158->vcc);
}
-static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int tdp158_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct tdp158 *tdp158 = bridge->driver_private;
- return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags);
+ return drm_bridge_attach(encoder, tdp158->next, bridge, flags);
}
static const struct drm_bridge_funcs tdp158_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 79ab5da827e1..e15d232ddbac 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -120,12 +120,13 @@ static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
}
static int tfp410_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, dvi->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -159,7 +160,7 @@ static int tfp410_attach(struct drm_bridge *bridge,
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index 47b74cb25b14..1c289051a598 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -38,6 +38,7 @@ static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
}
static int tpd12s015_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -46,7 +47,7 @@ static int tpd12s015_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ ret = drm_bridge_attach(encoder, tpd->next_bridge,
bridge, flags);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index a8fca079921b..fddfbd4d2493 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -193,6 +193,8 @@ CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
CONFIG_REGULATOR_DA9211=y
CONFIG_DRM_ANALOGIX_ANX7625=y
+CONFIG_PHY_MTK_HDMI=y
+CONFIG_PHY_MTK_MIPI_DSI=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build-igt.sh b/drivers/gpu/drm/ci/build-igt.sh
index eddb5f782a5e..caa2f4804ed5 100644
--- a/drivers/gpu/drm/ci/build-igt.sh
+++ b/drivers/gpu/drm/ci/build-igt.sh
@@ -71,4 +71,4 @@ tar -cf artifacts/igt.tar /igt
# Pass needed files to the test stage
S3_ARTIFACT_NAME="igt.tar.gz"
gzip -c artifacts/igt.tar > ${S3_ARTIFACT_NAME}
-ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/${S3_ARTIFACT_NAME}
+s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 19fe01257ab9..6fb74c51abe2 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -98,14 +98,14 @@ done
make ${KERNEL_IMAGE_NAME}
-mkdir -p /lava-files/
+mkdir -p /kernel/
for image in ${KERNEL_IMAGE_NAME}; do
- cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
+ cp arch/${KERNEL_ARCH}/boot/${image} /kernel/.
done
if [[ -n ${DEVICE_TREES} ]]; then
make dtbs
- cp ${DEVICE_TREES} /lava-files/.
+ cp ${DEVICE_TREES} /kernel/.
fi
make modules
@@ -121,11 +121,11 @@ if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
-d arch/arm64/boot/Image.lzma \
-C lzma\
-b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
- /lava-files/cheza-kernel
+ /kernel/cheza-kernel
KERNEL_IMAGE_NAME+=" cheza-kernel"
# Make a gzipped copy of the Image for db410c.
- gzip -k /lava-files/Image
+ gzip -k /kernel/Image
KERNEL_IMAGE_NAME+=" Image.gz"
fi
@@ -139,7 +139,7 @@ cp -rfv drivers/gpu/drm/ci/* install/.
. .gitlab-ci/container/container_post_build.sh
if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
- xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz
+ xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /kernel/vmlinux.xz
FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz"
if [[ -n $DEVICE_TREES ]]; then
@@ -148,13 +148,13 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
ls -l "${S3_JWT_FILE}"
for f in $FILES_TO_UPLOAD; do
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/$f \
- https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
+ s3_upload /kernel/$f \
+ https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
done
S3_ARTIFACT_NAME="kernel-files.tar.zst"
tar --zstd -cf $S3_ARTIFACT_NAME install
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/${S3_ARTIFACT_NAME}
+ s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
echo "Download vmlinux.xz from https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/vmlinux.xz"
fi
@@ -165,7 +165,7 @@ ln -s common artifacts/install/ci-common
cp .config artifacts/${CI_JOB_NAME}_config
for image in ${KERNEL_IMAGE_NAME}; do
- cp /lava-files/$image artifacts/install/.
+ cp /kernel/$image artifacts/install/.
done
tar -C artifacts -cf artifacts/install.tar install
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 274f118533a7..8eb56ebcf4aa 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -67,7 +67,7 @@ testing:arm32:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm.config
@@ -79,7 +79,7 @@ testing:arm64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm64.config
@@ -91,7 +91,7 @@ testing:x86_64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: x86_64.config
@@ -143,6 +143,10 @@ debian-arm64-release:
rules:
- when: never
+debian-arm64-ubsan:
+ rules:
+ - when: never
+
debian-build-testing:
rules:
- when: never
@@ -183,6 +187,10 @@ debian-testing-msan:
rules:
- when: never
+debian-testing-ubsan:
+ rules:
+ - when: never
+
debian-vulkan:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 07dc13ff865d..56c95c2f91ae 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -24,6 +24,18 @@ alpine/x86_64_build:
rules:
- when: never
+debian/arm32_test-base:
+ rules:
+ - when: never
+
+debian/arm32_test-gl:
+ rules:
+ - when: never
+
+debian/arm32_test-vk:
+ rules:
+ - when: never
+
debian/arm64_test-gl:
rules:
- when: never
@@ -32,6 +44,10 @@ debian/arm64_test-vk:
rules:
- when: never
+debian/baremetal_arm32_test:
+ rules:
+ - when: never
+
debian/ppc64el_build:
rules:
- when: never
@@ -40,6 +56,14 @@ debian/s390x_build:
rules:
- when: never
+debian/x86_32_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-android:
+ rules:
+ - when: never
+
debian/x86_64_test-vk:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index f04aabe8327c..ba75b3a7eca4 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,11 +1,11 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 7d3062470f3ccc6cb40540e772e902c7e2248024
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha f73132f1215a37ce8ffc711a0136c90649aaf128
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 33adea9ebafd059ac88a5ccfec60536394f36c7c
+ IGT_VERSION: 04bedb9238586b81d4d4ca62b02e584f6cfc77af
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
@@ -20,8 +20,10 @@ variables:
rm download-git-cache.sh
set +o xtrace
S3_JWT_FILE: /s3_jwt
+ S3_JWT_HEADER_FILE: /s3_jwt_header
S3_JWT_FILE_SCRIPT: |-
echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
+ echo -n "Authorization: Bearer ${S3_JWT}" > '${S3_JWT_HEADER_FILE}' &&
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
S3_HOST: s3.freedesktop.org
# This bucket is used to fetch the kernel image
@@ -143,11 +145,11 @@ stages:
# Pre-merge pipeline
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
+ - if: &is-fork-push $CI_PIPELINE_SOURCE == "push"
# nightly pipeline
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
# pipeline for direct pushes that bypassed the CI
- - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
+ - if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
# Rules applied to every job in the pipeline
@@ -170,29 +172,48 @@ stages:
- !reference [.disable-farm-mr-rules, rules]
# Never run immediately after merging, as we just ran everything
- !reference [.never-post-merge-rules, rules]
- # Build everything in merge pipelines, if any files affecting the pipeline
- # were changed
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- changes: &all_paths
- - drivers/gpu/drm/ci/**/*
when: on_success
# Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- changes:
- *all_paths
when: manual
- # Skip everything for pre-merge and merge pipelines which don't change
- # anything in the build
+ # Build everything after someone bypassed the CI
+ - if: *is-direct-push
+ when: manual
+ # Build everything in scheduled pipelines
+ - if: *is-scheduled-pipeline
+ when: on_success
+ # Allow building everything in fork pipelines, but build nothing unless
+ # manually triggered
+ - when: manual
+
+
+# Repeat of the above but with `when: on_success` replaced with
+# `when: delayed` + `start_in:`, for build-only jobs.
+# Note: make sure the branches in this list are the same as in
+# `.container+build-rules` above.
+.build-only-delayed-rules:
+ rules:
+ - !reference [.common-rules, rules]
+ # Run when re-enabling a disabled farm, but not when disabling it
+ - !reference [.disable-farm-mr-rules, rules]
+ # Never run immediately after merging, as we just ran everything
+ - !reference [.never-post-merge-rules, rules]
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- when: never
+ when: delayed
+ start_in: &build-delay 5 minutes
+ # Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- when: never
+ when: manual
# Build everything after someone bypassed the CI
- if: *is-direct-push
- when: on_success
+ when: manual
# Build everything in scheduled pipelines
- if: *is-scheduled-pipeline
- when: on_success
+ when: delayed
+ start_in: *build-delay
# Allow building everything in fork pipelines, but build nothing unless
# manually triggered
- when: manual
@@ -232,7 +253,7 @@ make git archive:
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
# Use id_tokens for JWT auth
- - ci-fairy s3cp --token-file "${S3_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
+ - s3_upload ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/
# Sanity checks of MR settings and commit logs
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 68b042e43b7f..2a0599f12c58 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -85,5 +85,16 @@ deqp-runner junit \
--limit 50 \
--template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
+# Check if /proc/lockdep_stats exists
+if [ -f /proc/lockdep_stats ]; then
+ # If debug_locks is 0, it indicates lockdep is detected and it turns itself off.
+ debug_locks=$(grep 'debug_locks:' /proc/lockdep_stats | awk '{print $2}')
+ if [ "$debug_locks" -eq 0 ] && [ "$ret" -eq 0 ]; then
+ echo "Warning: LOCKDEP issue detected. Please check dmesg logs for more information."
+ cat /proc/lockdep_stats
+ ret=101
+ fi
+fi
+
cd $oldpath
exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 20049f3626b2..53fe34b86578 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,5 +1,5 @@
variables:
- CONTAINER_TAG: "20250204-mesa-uprev"
+ CONTAINER_TAG: "20250328-mesa-uprev"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
@@ -20,3 +20,5 @@ variables:
DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
+
+ CONDITIONAL_BUILD_ANGLE_TAG: fec96cc945650c5fe9f7188cabe80d8a
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 6e5ac51e8c0a..a1e8b34fb2d4 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -48,12 +48,13 @@ ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
rm -rf results
mkdir -p results/job-rootfs-overlay/
-artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
+artifacts/ci-common/export-gitlab-job-env-for-dut.sh \
+ > results/job-rootfs-overlay/set-job-env-vars.sh
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
-ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
+s3_upload job-rootfs-overlay.tar.gz "https://${JOB_ARTIFACTS_BASE}"
# Prepare env vars for upload.
section_switch variables "Environment variables passed through to device:"
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 6a1e059858e5..84a25f0e783b 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -1,6 +1,14 @@
+.allow_failure_lockdep:
+ variables:
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ allow_failure:
+ exit_codes:
+ - 101
+
.lava-test:
extends:
- .container+build-rules
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -69,6 +77,7 @@
extends:
- .baremetal-test-arm64
- .use-debian/baremetal_arm64_test
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -89,6 +98,28 @@
tags:
- $RUNNER_TAG
+.software-driver:
+ stage: software-driver
+ extends:
+ - .allow_failure_lockdep
+ timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
+ extends:
+ - .test-gl
+ tags:
+ - kvm
+ script:
+ - ln -sf $CI_PROJECT_DIR/install /install
+ - mv install/bzImage /kernel/bzImage
+ - mkdir -p /lib/modules
+ - install/crosvm-runner.sh install/igt_runner.sh
+ needs:
+ - debian/x86_64_test-gl
+ - testing:x86_64
+ - igt:x86_64
+
.msm-sc7180:
extends:
- .lava-igt:arm64
@@ -133,7 +164,7 @@ msm:apq8016:
BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
RUNNER_TAG: google-freedreno-db410c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:apq8096:
extends:
@@ -147,7 +178,7 @@ msm:apq8096:
GPU_VERSION: apq8096
RUNNER_TAG: google-freedreno-db820c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:sdm845:
extends:
@@ -161,7 +192,7 @@ msm:sdm845:
GPU_VERSION: sdm845
RUNNER_TAG: google-freedreno-cheza
script:
- - ./install/bare-metal/cros-servo.sh
+ - ./install/bare-metal/cros-servo.sh || exit $?
msm:sm8350-hdk:
extends:
@@ -440,47 +471,16 @@ panfrost:g12b:
- .panfrost-gpu
virtio_gpu:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - install/crosvm-runner.sh install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
vkms:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - mkdir -p /lib/modules
- - ./install/crosvm-runner.sh ./install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index 75374085f40f..f44dbce3151a 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -14,16 +14,10 @@ amdgpu/amd_plane@mpo-scale-nv12,Fail
amdgpu/amd_plane@mpo-scale-p010,Fail
amdgpu/amd_plane@mpo-scale-rgb,Crash
amdgpu/amd_plane@mpo-swizzle-toggle,Fail
-amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Crash
+amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail
kms_addfb_basic@bad-pitch-65536,Fail
kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@too-high,Fail
-kms_async_flips@alternate-sync-async-flip,Fail
-kms_async_flips@alternate-sync-async-flip-atomic,Fail
-kms_async_flips@test-cursor,Fail
-kms_async_flips@test-cursor-atomic,Fail
-kms_async_flips@test-time-stamp,Fail
-kms_async_flips@test-time-stamp-atomic,Fail
kms_atomic_transition@plane-all-modeset-transition-internal-panels,Fail
kms_atomic_transition@plane-all-transition,Fail
kms_atomic_transition@plane-all-transition-nonblocking,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index 3879c4812a22..902d54027506 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -14,6 +14,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index a29cea4f234c..8e2b5504004e 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -1,22 +1,18 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_flip@dpms-off-confusion-interruptible,Timeout
-kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@@ -31,12 +27,18 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -44,8 +46,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 2ef1dc35a7fa..922327632eff 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
index ee11999e3da1..7353ab11e940 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -1,4 +1,3 @@
-core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -16,6 +15,7 @@ kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
@@ -30,7 +30,6 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_pm_backlight@basic-brightness,Fail
-kms_pm_backlight@brightness-with-dpms,Crash
kms_pm_backlight@fade,Fail
kms_pm_backlight@fade-with-dpms,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
@@ -43,8 +42,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
index 4f50e0240ff4..80bf2741866c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index 47b3f1d42bb6..6fef7c1e56ea 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -8,8 +8,9 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip@busy-flip,Timeout
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -34,17 +35,22 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
+kms_pipe_stress@stress-xrgb8888-untiled,Fail
+kms_pipe_stress@stress-xrgb8888-ytiled,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
kms_psr2_sf@plane-move-sf-dmg-area,Fail
-kms_psr2_sf@pr-cursor-plane-update-sf,Timeout
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_sf@psr2-cursor-plane-update-sf,Fail
@@ -57,6 +63,7 @@ kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
kms_psr2_su@page_flip-P010,Fail
kms_setmode@basic,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -65,6 +72,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index c87ff8b40e99..c393a138b8a6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -16,7 +17,6 @@ gem_.*
# Hangs the machine and timeout occurs
i915_pm_rc6_residency.*
i915_suspend.*
-xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
i915_pm_rpm.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index 843c363b42f5..8adf5f0a6e80 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,39 +1,47 @@
-core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
gen9_exec_parse@unaligned-access,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
-kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
+kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
+sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index 219ae839323a..2e4ef9f35654 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
index 0e08fff741aa..57453e340040 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
@@ -3,12 +3,13 @@ i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
kms_flip@dpms-off-confusion,Fail
+kms_flip@nonexisting-fb,Fail
kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset,Fail
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,UnexpectedImprovement(Skip)
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
@@ -28,7 +29,6 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-blt,Timeout
kms_lease@lease-uevent,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_rotation_crc@bad-pixel-format,Fail
@@ -37,13 +37,10 @@ kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
+perf_pmu@most-busy-idle-check-all,Fail
perf_pmu@rc6,Crash
+prime_busy@before-wait,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
index 1a3d87c0ca6e..8dec57da1bb3 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index d4fba4f55ec1..117098bc95d9 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -21,8 +21,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index dc722d6a774e..e287462a491a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 93d42b146df9..462c050a8b2d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,13 +1,14 @@
api_intel_allocator@reopen,Timeout
api_intel_bb@destroy-bb,Timeout
core_hotunplug@hotrebind-lateclose,Timeout
-drm_read@short-buffer-block,Timeout
dumb_buffer@map-valid,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rps@engine-order,Timeout
+i915_pm_rps@waitboost,Fail
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
@@ -16,6 +17,7 @@ perf_pmu@enable-race,Timeout
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
perf_pmu@semaphore-wait-idle,Timeout
+prime_busy@before,Fail
prime_mmap@test_refcounting,Timeout
sriov_basic@enable-vfs-bind-unbind-each-numvfs-all,Timeout
syncobj_basic@illegal-fd-to-handle,Timeout
@@ -26,8 +28,3 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index 938377896841..429dc3c731df 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 1cb6978c86dc..0f167cfd503c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -6,10 +6,9 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -30,13 +29,19 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -45,8 +50,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
index 29bff8922ae1..7e7374ebf3d1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 4f176c04ec4e..592d7d69e6fc 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -17,8 +17,28 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail
kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 2956567c3048..443596d9e662 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -46,3 +46,10 @@ kms_prop_blob@invalid-set-prop
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/d25442b9-0b6b-433c-8e23-997840fad305@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@flip-vs-wf_vblank-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 5a063361d7f2..184d0cccc318 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,12 +1,38 @@
-core_setmaster@master-drop-set-user,Fail
dumb_buffer@create-clear,Crash
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
index df7e5ce7a036..0c67fec92450 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -18,3 +18,24 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
fbdev@write
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/a520d1d6-95b3-4573-b8f2-689f05bc2230@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@basic-flip-vs-modeset
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/ca960a82-00fc-4183-b983-998f7ac2fbb5@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/da578eed-224f-4374-853a-1ff0aa20d03b@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-toggle-modeset-transition
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
index 8198e06344a3..9fd44a4b962a 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 7752adff05c1..72c469021b66 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -1,8 +1,4 @@
kms_3d,Fail
-kms_cursor_legacy@forked-bo,Fail
-kms_cursor_legacy@forked-move,Fail
-kms_cursor_legacy@single-bo,Fail
-kms_cursor_legacy@torture-bo,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
index 1674c8e214d6..87724413174c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
index 5550be5486ed..a4d2f2a7963a 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index 8910afb6acf2..d270af1cca52 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -32,3 +32,10 @@ kms_lease@page-flip-implicit-plane
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/73
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-position-covered
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index 478d7c161616..d4b8ba3a54a9 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -28,3 +29,6 @@ kms_cursor_crc@cursor-random-max-size
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# It causes other tests to fail, so skip it.
+kms_invalid_mode@overflow-vrefresh
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
index cd3d3b0befe4..cafc802cecea 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
@@ -11,3 +11,10 @@ msm/msm_mapping@shadow
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_lease@page-flip-implicit-plane
+
+# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/74
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_cursor_crc@cursor-random-128x128
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index ef9318afcd89..022db559cc7d 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 38ec0305c1f4..e32d73c6c98e 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -130,3 +130,10 @@ kms_lease@page-flip-implicit-plane
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/75
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@plain-flip-ts-check-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 2ce7f7e23a01..6c86d1953e11 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -35,3 +36,315 @@ kms_content_protection@uevent
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# Kernel panic
+msm/msm_recovery@hangcheck
+# DEBUG - Begin test msm/msm_recovery@hangcheck
+# Console: switching to colour dummy device 80x25
+# [ 489.526286] [IGT] msm_recovery: executing
+# [ 489.531926] [IGT] msm_recovery: starting subtest hangcheck
+# [ 492.808574] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 492.820358] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 492.831154] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 493.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 493.844177] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 493.854971] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 494.824633] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 494.836237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 494.847034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 495.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 495.828170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 495.838966] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 496.804643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 496.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 496.827041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 497.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 497.844170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 497.854963] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 498.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 498.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 498.843024] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 499.816568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 499.828163] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 499.838958] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 500.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 500.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 500.830960] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 501.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 501.844175] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 501.854965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 502.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 502.836171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 502.846965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 503.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 503.828176] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 503.838969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 504.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 504.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 504.827033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 505.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 505.840247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 505.851043] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 506.820637] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 506.832233] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 506.843026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 507.816567] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 507.828171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 507.838965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 508.808568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 508.820173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 508.830969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 509.832568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 509.844173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 509.854967] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 510.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 510.836162] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 510.846954] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 511.816569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 511.828173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 511.838968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 512.804641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 512.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 512.827040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 513.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 513.840239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 513.851035] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 514.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 514.836164] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 514.846959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 515.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 515.824235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 515.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 515.912427] rcu: INFO: rcu_preempt self-detected stall on CPU
+# [ 515.918398] rcu: 0-....: (6452 ticks this GP) idle=6afc/1/0x4000000000000000 softirq=12492/12697 fqs=3179
+# [ 515.929296] rcu: (t=6505 jiffies g=36205 q=58 ncpus=8)
+# [ 515.934709] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
+# [ 515.934727] Tainted: [W]=WARN
+# [ 515.934732] Hardware name: Google Cheza (rev3+) (DT)
+# [ 515.934739] pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+# [ 515.934751] pc : rcu_core+0x59c/0xe68
+# [ 515.934769] lr : rcu_core+0x74/0xe68
+# [ 515.934781] sp : ffff800080003e50
+# [ 515.934785] x29: ffff800080003e50 x28: ffff225d038e9bc0 x27: 0000000000000002
+# [ 515.934805] x26: ffffc171a8ee6108 x25: ffffc171a85bc2c0 x24: ffff60ecd691e000
+# [ 515.934820] x23: ffffc171a85d15c0 x22: ffffc171a8f8d780 x21: ffff225e7eeef5c0
+# [ 515.934835] x20: ffffc171a8ef0e80 x19: ffffc171a85d15d1 x18: ffffc171a9461e70
+# [ 515.934850] x17: ffff60ecd691e000 x16: ffff800080000000 x15: 0000000000000000
+# [ 515.934866] x14: ffffc171a85d0780 x13: 0000000000000400 x12: 0000000000000000
+# [ 515.934880] x11: ffffc171a85ce900 x10: ffffc171a8ef5000 x9 : ffffc171a8ef0000
+# [ 515.934894] x8 : ffff800080003d88 x7 : ffffc171a8ee6100 x6 : ffff800080003de0
+# [ 515.934909] x5 : ffff800080003dc8 x4 : 0000000000000003 x3 : 0000000000000000
+# [ 515.934923] x2 : 0000000000000101 x1 : 0000000000000000 x0 : ffff225d038e9bc0
+# [ 515.934939] Call trace:
+# [ 515.934945] rcu_core+0x59c/0xe68 (P)
+# [ 515.934962] rcu_core_si+0x10/0x1c
+# [ 515.934976] handle_softirqs+0x118/0x4b8
+# [ 515.934994] __do_softirq+0x14/0x20
+# [ 515.935007] ____do_softirq+0x10/0x1c
+# [ 515.935021] call_on_irq_stack+0x24/0x4c
+# [ 515.935034] do_softirq_own_stack+0x1c/0x28
+# [ 515.935048] __irq_exit_rcu+0x174/0x1b4
+# [ 515.935063] irq_exit_rcu+0x10/0x38
+# [ 515.935077] el1_interrupt+0x38/0x64
+# [ 515.935092] el1h_64_irq_handler+0x18/0x24
+# [ 515.935104] el1h_64_irq+0x6c/0x70
+# [ 515.935115] lock_acquire+0x1e0/0x338 (P)
+# [ 515.935129] __mutex_lock+0xa8/0x4b8
+# [ 515.935144] mutex_lock_nested+0x24/0x30
+# [ 515.935159] _find_opp_table_unlocked+0x40/0xfc
+# [ 515.935174] _find_key+0x64/0x16c
+# [ 515.935184] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 515.935197] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 515.935211] __cpufreq_driver_target+0x144/0x29c
+# [ 515.935227] sugov_work+0x58/0x74
+# [ 515.935239] kthread_worker_fn+0xf4/0x324
+# [ 515.935254] kthread+0x12c/0x208
+# [ 515.935266] ret_from_fork+0x10/0x20
+# [ 516.808569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 516.820174] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 516.830968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 517.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 517.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 517.851032] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 518.820642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 518.832237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 518.843030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 519.812636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 519.824231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 519.835026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 520.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 520.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 520.830959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 521.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 521.840238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 521.851033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 522.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 522.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 522.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 523.812639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 523.824239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 523.835034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 524.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 524.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 524.827026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 525.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 525.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 525.851031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 526.820641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 526.832244] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 526.843041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 527.812642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 527.824242] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 527.835038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 528.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 528.816234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 528.827027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 529.832634] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 529.844231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 529.855017] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 530.820646] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 530.832270] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 530.843065] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 531.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 531.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 531.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 532.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 532.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 532.827031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 533.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 533.840243] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 533.851037] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 534.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 534.832245] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 534.843038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 535.812641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 535.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 535.835033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 536.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 536.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 536.827030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 537.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 537.840234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 537.851020] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 538.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 538.832235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 538.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 539.812644] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 539.824247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 539.835040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 540.124426] watchdog: BUG: soft lockup - CPU#0 stuck for 49s! [sugov:0:126]
+# [ 540.124439] Modules linked in:
+# [ 540.124448] irq event stamp: 9912389
+# [ 540.124453] hardirqs last enabled at (9912388): [<ffffc171a767a24c>] exit_to_kernel_mode+0x38/0x130
+# [ 540.124473] hardirqs last disabled at (9912389): [<ffffc171a767a368>] el1_interrupt+0x24/0x64
+# [ 540.124486] softirqs last enabled at (9898068): [<ffffc171a62bc290>] handle_softirqs+0x4a0/0x4b8
+# [ 540.124505] softirqs last disabled at (9898071): [<ffffc171a62105b0>] __do_softirq+0x14/0x20
+# [ 540.124525] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
+# [ 540.124540] Tainted: [W]=WARN
+# [ 540.124544] Hardware name: Google Cheza (rev3+) (DT)
+# [ 540.124549] pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+# [ 540.124560] pc : xhci_urb_enqueue+0xbc/0x32c
+# [ 540.124573] lr : xhci_urb_enqueue+0xb4/0x32c
+# [ 540.124581] sp : ffff800080003c20
+# [ 540.124586] x29: ffff800080003c20 x28: 0000000000000000 x27: ffff225d00b1e6a0
+# [ 540.124602] x26: ffff225d01c3d800 x25: 0000000000000001 x24: 0000000000000006
+# [ 540.124617] x23: ffff225d044dc000 x22: ffff225d044dc000 x21: 0000000000000001
+# [ 540.124632] x20: ffff225d002d7280 x19: ffff225d0573a780 x18: ffff225e7eff0f50
+# [ 540.124647] x17: 000000000000cab0 x16: 0000000000000000 x15: ffff225d0353a000
+# [ 540.124661] x14: 0000000000000000 x13: 0000000000000820 x12: 0000000000000000
+# [ 540.124674] x11: ffff800080003a30 x10: 0000000000000001 x9 : 0000000000000000
+# [ 540.124689] x8 : ffff225d002d7300 x7 : 0000000000000000 x6 : 000000000000003f
+# [ 540.124702] x5 : 00000000ffffffff x4 : 0000000000000920 x3 : 0000000000000080
+# [ 540.124716] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff225d002d7280
+# [ 540.124731] Call trace:
+# [ 540.124736] xhci_urb_enqueue+0xbc/0x32c (P)
+# [ 540.124751] usb_hcd_submit_urb+0x98/0x7fc
+# [ 540.124766] usb_submit_urb+0x294/0x560
+# [ 540.124780] intr_callback+0x78/0x1fc
+# [ 540.124798] __usb_hcd_giveback_urb+0x68/0x128
+# [ 540.124812] usb_giveback_urb_bh+0xa8/0x140
+# [ 540.124825] process_one_work+0x208/0x5e8
+# [ 540.124840] bh_worker+0x1a8/0x20c
+# [ 540.124853] workqueue_softirq_action+0x78/0x88
+# [ 540.124868] tasklet_hi_action+0x14/0x3c
+# [ 540.124883] handle_softirqs+0x118/0x4b8
+# [ 540.124897] __do_softirq+0x14/0x20
+# [ 540.124908] ____do_softirq+0x10/0x1c
+# [ 540.124922] call_on_irq_stack+0x24/0x4c
+# [ 540.124934] do_softirq_own_stack+0x1c/0x28
+# [ 540.124947] __irq_exit_rcu+0x174/0x1b4
+# [ 540.124961] irq_exit_rcu+0x10/0x38
+# [ 540.124976] el1_interrupt+0x38/0x64
+# [ 540.124987] el1h_64_irq_handler+0x18/0x24
+# [ 540.124998] el1h_64_irq+0x6c/0x70
+# [ 540.125009] lock_acquire+0x1e0/0x338 (P)
+# [ 540.125023] __mutex_lock+0xa8/0x4b8
+# [ 540.125038] mutex_lock_nested+0x24/0x30
+# [ 540.125052] _find_opp_table_unlocked+0x40/0xfc
+# [ 540.125067] _find_key+0x64/0x16c
+# [ 540.125078] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 540.125090] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 540.125105] __cpufreq_driver_target+0x144/0x29c
+# [ 540.125121] sugov_work+0x58/0x74
+# [ 540.125133] kthread_worker_fn+0xf4/0x324
+# [ 540.125148] kthread+0x12c/0x208
+# [ 540.125160] ret_from_fork+0x10/0x20
+# [ 540.125176] Kernel panic - not syncing: softlockup: hung tasks
+# [ 540.423567] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W L 6.14.0-rc4-gdddf15cff632 #1
+# [ 540.433411] Tainted: [W]=WARN, [L]=SOFTLOCKUP
+# [ 540.437901] Hardware name: Google Cheza (rev3+) (DT)
+# [ 540.443022] Call trace:
+# [ 540.445559] show_stack+0x18/0x24 (C)
+# [ 540.449357] dump_stack_lvl+0x38/0xd0
+# [ 540.453157] dump_stack+0x18/0x24
+# [ 540.456599] panic+0x3bc/0x41c
+# [ 540.459767] watchdog_timer_fn+0x254/0x2e4
+# [ 540.464005] __hrtimer_run_queues+0x3c4/0x440
+# [ 540.468508] hrtimer_interrupt+0xe4/0x244
+# [ 540.472662] arch_timer_handler_phys+0x2c/0x44
+# [ 540.477256] handle_percpu_devid_irq+0x90/0x1f0
+# [ 540.481943] handle_irq_desc+0x40/0x58
+# [ 540.485829] generic_handle_domain_irq+0x1c/0x28
+# [ 540.490604] gic_handle_irq+0x4c/0x11c
+# [ 540.494483] do_interrupt_handler+0x50/0x84
+# [ 540.498811] el1_interrupt+0x34/0x64
+# [ 540.502518] el1h_64_irq_handler+0x18/0x24
+# [ 540.506758] el1h_64_irq+0x6c/0x70
+# [ 540.510279] xhci_urb_enqueue+0xbc/0x32c (P)
+# [ 540.514693] usb_hcd_submit_urb+0x98/0x7fc
+# [ 540.518932] usb_submit_urb+0x294/0x560
+# [ 540.522901] intr_callback+0x78/0x1fc
+# [ 540.526700] __usb_hcd_giveback_urb+0x68/0x128
+# [ 540.531288] usb_giveback_urb_bh+0xa8/0x140
+# [ 540.535614] process_one_work+0x208/0x5e8
+# [ 540.539769] bh_worker+0x1a8/0x20c
+# [ 540.543293] workqueue_softirq_action+0x78/0x88
+# [ 540.547980] tasklet_hi_action+0x14/0x3c
+# [ 540.552038] handle_softirqs+0x118/0x4b8
+# [ 540.556096] __do_softirq+0x14/0x20
+# [ 540.559705] ____do_softirq+0x10/0x1c
+# [ 540.563500] call_on_irq_stack+0x24/0x4c
+# [ 540.567554] do_softirq_own_stack+0x1c/0x28
+# [ 540.571878] __irq_exit_rcu+0x174/0x1b4
+# [ 540.575849] irq_exit_rcu+0x10/0x38
+# [ 540.579462] el1_interrupt+0x38/0x64
+# [ 540.583158] el1h_64_irq_handler+0x18/0x24
+# [ 540.587397] el1h_64_irq+0x6c/0x70
+# [ 540.590918] lock_acquire+0x1e0/0x338 (P)
+# [ 540.595060] __mutex_lock+0xa8/0x4b8
+# [ 540.598760] mutex_lock_nested+0x24/0x30
+# [ 540.602818] _find_opp_table_unlocked+0x40/0xfc
+# [ 540.607503] _find_key+0x64/0x16c
+# [ 540.610940] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 540.615798] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 540.620924] __cpufreq_driver_target+0x144/0x29c
+# [ 540.625698] sugov_work+0x58/0x74
+# [ 540.629134] kthread_worker_fn+0xf4/0x324
+# [ 540.633278] kthread+0x12c/0x208
+# [ 540.636619] ret_from_fork+0x10/0x20
+# [ 540.640321] SMP: stopping secondary CPUs
+# [ 540.644518] Kernel Offset: 0x417126200000 from 0xffff800080000000
+# [ 540.650848] PHYS_OFFSET: 0xfff0dda400000000
+# [ 540.655170] CPU features: 0x000,00000100,00901250,8200721b
+# [ 540.660829] Memory Limit: none
+# [ 540.663999] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]---
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
index 329770c520d9..9450f2a002fd 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index ba9160d4d8eb..61122ea7f008 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -5,6 +5,5 @@ core_setmaster_vs_auth,Crash
dumb_buffer@create-clear,Crash
fbdev@pan,Crash
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
-kms_flip@flip-vs-modeset-vs-hang,Crash
kms_prop_blob@invalid-set-prop,Crash
kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
index eb16b29dee48..71418ea35a17 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 2803d0d80192..45dd8d493f6e 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -2,7 +2,6 @@ dumb_buffer@create-clear,Crash
kms_atomic_transition@modeset-transition,Fail
kms_atomic_transition@modeset-transition-fencing,Fail
kms_atomic_transition@plane-toggle-modeset-transition,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_color@gamma,Fail
kms_color@legacy-gamma,Fail
kms_cursor_crc@cursor-alpha-opaque,Fail
@@ -55,6 +54,7 @@ kms_flip@plain-flip-ts-check,Fail
kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 348b4ce7eb4b..b467991d4094 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -75,58 +75,72 @@ kms_bw@linear-tiling-2-displays-2160x1440p
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f944dd08-c88c-49ae-aff0-274374550a93@collabora.com/T/#u
# Failure Rate: 40
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-2160x1440p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/afa2d3bf-29f2-488d-8cc9-f30d461444b0@collabora.com/T/#u
# Failure Rate: 80
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane_multiple@tiling-none
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/6fdaa97f-c1a5-4216-831f-dbb7c5f90498@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/616aa015-9574-4527-9d07-d8d698bbcc3c@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/7a1b888f-d7db-4ed7-96cd-3975ace837fb@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-absolute-wf_vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f17fffb6-abc4-464e-8465-395311b01f6a@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-blocking-wf-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/9b590b26-1bf9-4951-b6a3-ef6c67e6a1c6@collabora.com/T/#u
# Failure Rate: 60
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-2-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/059545fa-65b1-4f5c-a13e-4d2898679f51@collabora.com/T/#u
# Failure Rate: 20
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@modeset-vs-vblank-race-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/eece9a80-42f3-41f4-86cc-69d8a51b976a@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/63dfd5b7-8a54-44a3-9530-f8dcd77a21d1@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@linear-tiling-1-displays-3840x2160p
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index e8e994d92557..b83ec75161b2 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index c72fee70e739..9749ddb75121 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -157,6 +157,7 @@ kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index adbcdd0f28d2..28e37185bac0 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -19,6 +19,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
index 62428f3c8f31..e3ca6da8cde7 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
@@ -88,3 +88,31 @@ kms_flip@flip-vs-expired-vblank
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_pipe_crc_basic@nonblocking-crc-frame-sequence
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/2364a6bf-e6bc-4741-8c78-cea8bdb06e03@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@modeset-vs-vblank-race
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/f7d72ed9-a783-46d7-b75d-54072bda32a3@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_pipe_crc_basic@suspend-read-crc
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/98d3ba54-bcb9-41ab-adb1-a18ba61ee2e4@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-panning-bottom-right-suspend
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/b58d15eb-094d-4ac2-aad3-83e518c2f55d@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_vblank@ts-continuation-dpms-suspend
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index 319789806271..716d2d4e452d 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -1,5 +1,6 @@
# keeps printing vkms_vblank_simulate: vblank timer overrun and never ends
kms_invalid_mode@int-max-clock
+kms_invalid_mode@overflow-vrefresh
# kernel panic seen with kms_cursor_crc tests
kms_cursor_crc.*
@@ -802,6 +803,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 30c736fc0067..7d2e499ea5de 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -98,6 +98,21 @@ struct drm_bridge_connector {
* HDMI connector infrastructure, if any (see &DRM_BRIDGE_OP_HDMI).
*/
struct drm_bridge *bridge_hdmi;
+ /**
+ * @bridge_hdmi_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * HDMI Audio infrastructure, if any (see &DRM_BRIDGE_OP_HDMI_AUDIO).
+ */
+ struct drm_bridge *bridge_hdmi_audio;
+ /**
+ * @bridge_dp_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * DisplayPort Audio infrastructure, if any (see
+ * &DRM_BRIDGE_OP_DP_AUDIO).
+ */
+ struct drm_bridge *bridge_dp_audio;
};
#define to_drm_bridge_connector(x) \
@@ -433,14 +448,25 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
- if (!bridge->funcs->hdmi_audio_startup)
- return 0;
+ if (!bridge->funcs->dp_audio_startup)
+ return 0;
- return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ return bridge->funcs->dp_audio_startup(connector, bridge);
+ }
+
+ return -EINVAL;
}
static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
@@ -451,11 +477,19 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ }
- return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ return bridge->funcs->dp_audio_prepare(connector, bridge, fmt, hparms);
+ }
+
+ return -EINVAL;
}
static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
@@ -464,11 +498,15 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+ bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ }
- bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+ bridge->funcs->dp_audio_shutdown(connector, bridge);
+ }
}
static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
@@ -478,15 +516,27 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ if (!bridge->funcs->hdmi_audio_mute_stream)
+ return -ENOTSUPP;
- if (bridge->funcs->hdmi_audio_mute_stream)
return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
enable, direction);
- else
- return -ENOTSUPP;
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ if (!bridge->funcs->dp_audio_mute_stream)
+ return -ENOTSUPP;
+
+ return bridge->funcs->dp_audio_mute_stream(connector, bridge,
+ enable, direction);
+ }
+
+ return -EINVAL;
}
static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
@@ -576,6 +626,42 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc = bridge->max_bpc;
}
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_AUDIO) {
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_hdmi_audio = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) {
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->dp_audio_prepare ||
+ !bridge->funcs->dp_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_dp_audio = bridge;
+ }
+
if (!drm_bridge_get_next_bridge(bridge))
connector_type = bridge->type;
@@ -611,22 +697,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc);
if (ret)
return ERR_PTR(ret);
-
- if (bridge->hdmi_audio_max_i2s_playback_channels ||
- bridge->hdmi_audio_spdif_playback) {
- if (!bridge->funcs->hdmi_audio_prepare ||
- !bridge->funcs->hdmi_audio_shutdown)
- return ERR_PTR(-EINVAL);
-
- ret = drm_connector_hdmi_audio_init(connector,
- bridge->hdmi_audio_dev,
- &drm_bridge_connector_hdmi_audio_funcs,
- bridge->hdmi_audio_max_i2s_playback_channels,
- bridge->hdmi_audio_spdif_playback,
- bridge->hdmi_audio_dai_port);
- if (ret)
- return ERR_PTR(ret);
- }
} else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
@@ -635,6 +705,24 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(ret);
}
+ if (bridge_connector->bridge_hdmi_audio ||
+ bridge_connector->bridge_dp_audio) {
+ struct device *dev;
+
+ if (bridge_connector->bridge_hdmi_audio)
+ dev = bridge_connector->bridge_hdmi_audio->hdmi_audio_dev;
+ else
+ dev = bridge_connector->bridge_dp_audio->hdmi_audio_dev;
+
+ ret = drm_connector_hdmi_audio_init(connector, dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 56a4965e518c..ed31471bd0e2 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -96,7 +96,7 @@ static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
ssize_t err = 0;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
return (enable && err < 0) ? err : 0;
}
@@ -112,7 +112,7 @@ static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
mask[0] = la_mask & 0xff;
mask[1] = la_mask >> 8;
- err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
}
@@ -123,15 +123,14 @@ static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
unsigned int retries = min(5, attempts - 1);
ssize_t err;
- err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
- msg->msg, msg->len);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
if (err < 0)
return err;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
- (msg->len - 1) | (retries << 4) |
- DP_CEC_TX_MESSAGE_SEND);
- return err < 0 ? err : 0;
+ return drm_dp_dpcd_write_byte(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
}
static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
@@ -144,13 +143,13 @@ static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
return 0;
- err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
- if (err >= 0) {
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (!err) {
if (enable)
val |= DP_CEC_SNOOPING_ENABLE;
else
val &= ~DP_CEC_SNOOPING_ENABLE;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
}
return (enable && err < 0) ? err : 0;
}
@@ -194,7 +193,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
u8 rx_msg_info;
ssize_t err;
- err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
if (err < 0)
return err;
@@ -202,7 +201,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
return 0;
msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
- err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ err = drm_dp_dpcd_read_data(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
if (err < 0)
return err;
@@ -215,7 +214,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
struct cec_adapter *adap = aux->cec.adap;
u8 flags;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
return;
if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
@@ -230,7 +229,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
(DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES);
- drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+ drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
}
/**
@@ -253,13 +252,13 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux)
if (!aux->cec.adap)
goto unlock;
- ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
- &cec_irq);
+ ret = drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
goto unlock;
drm_dp_cec_handle_irq(aux);
- drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+ drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
unlock:
mutex_unlock(&aux->cec.lock);
}
@@ -269,7 +268,7 @@ static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
{
u8 cap = 0;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) < 0 ||
!(cap & DP_CEC_TUNNELING_CAPABLE))
return false;
if (cec_cap)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index dbce1c3f4969..f2a6559a2710 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -327,7 +327,7 @@ static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SI
if (offset < DP_RECEIVER_CAP_SIZE) {
rd_interval = dpcd[offset];
} else {
- if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, offset, &rd_interval) < 0) {
drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* arbitrary default delay */
@@ -358,7 +358,7 @@ int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux)
int unit;
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) < 0) {
drm_err(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* default to max */
@@ -704,6 +704,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_read_data() instead.
*/
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -752,6 +754,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_write_data() instead.
*/
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -774,14 +778,13 @@ EXPORT_SYMBOL(drm_dp_dpcd_write);
* @aux: DisplayPort AUX channel
* @status: buffer to store the link status in (must be at least 6 bytes)
*
- * Returns the number of bytes transferred on success or a negative error
- * code on failure.
+ * Returns a negative error code on failure or 0 on success.
*/
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE])
{
- return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
- DP_LINK_STATUS_SIZE);
+ return drm_dp_dpcd_read_data(aux, DP_LANE0_1_STATUS, status,
+ DP_LINK_STATUS_SIZE);
}
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
@@ -804,30 +807,20 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
{
int ret;
- if (dp_phy == DP_PHY_DPRX) {
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS,
- link_status,
- DP_LINK_STATUS_SIZE);
-
- if (ret < 0)
- return ret;
-
- WARN_ON(ret != DP_LINK_STATUS_SIZE);
-
- return 0;
- }
+ if (dp_phy == DP_PHY_DPRX)
+ return drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE);
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
- link_status,
- DP_LINK_STATUS_SIZE - 1);
+ ret = drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
+ link_status,
+ DP_LINK_STATUS_SIZE - 1);
if (ret < 0)
return ret;
- WARN_ON(ret != DP_LINK_STATUS_SIZE - 1);
-
/* Convert the LTTPR to the sink PHY link status layout */
memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1],
&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS],
@@ -838,12 +831,81 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_up);
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_down);
+
static int read_payload_update_status(struct drm_dp_aux *aux)
{
int ret;
u8 status;
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0)
return ret;
@@ -870,21 +932,21 @@ int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
int ret;
int retries = 0;
- drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
+ drm_dp_dpcd_write_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
payload_alloc[0] = vcpid;
payload_alloc[1] = start_time_slot;
payload_alloc[2] = time_slot_count;
- ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
+ ret = drm_dp_dpcd_write_data(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
goto fail;
}
retry:
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
goto fail;
@@ -1040,15 +1102,15 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
{
u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
- if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
- if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_TEST_REQUEST, &link_edid_read) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_TEST_REQUEST);
return false;
@@ -1061,23 +1123,23 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
return false;
}
- if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
/* send back checksum for the last edid extension block data */
- if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
- &real_edid_checksum, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_EDID_CHECKSUM,
+ real_edid_checksum) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_EDID_CHECKSUM);
return false;
}
test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
- if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_RESPONSE, test_resp) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_RESPONSE);
return false;
@@ -1114,12 +1176,10 @@ static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
return 0;
- ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext,
- sizeof(dpcd_ext));
+ ret = drm_dp_dpcd_read_data(aux, DP_DP13_DPCD_REV, &dpcd_ext,
+ sizeof(dpcd_ext));
if (ret < 0)
return ret;
- if (ret != sizeof(dpcd_ext))
- return -EIO;
if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
drm_dbg_kms(aux->drm_dev,
@@ -1156,10 +1216,10 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
{
int ret;
- ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
if (ret < 0)
return ret;
- if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0)
+ if (dpcd[DP_DPCD_REV] == 0)
return -EIO;
ret = drm_dp_read_extended_dpcd_caps(aux, dpcd);
@@ -1209,11 +1269,9 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;
- ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
+ ret = drm_dp_dpcd_read_data(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
if (ret < 0)
return ret;
- if (ret != len)
- return -EIO;
drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports);
@@ -1570,7 +1628,7 @@ EXPORT_SYMBOL(drm_dp_downstream_mode);
*/
int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
{
- return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+ return drm_dp_dpcd_read_data(aux, DP_BRANCH_ID, id, 6);
}
EXPORT_SYMBOL(drm_dp_downstream_id);
@@ -1635,13 +1693,13 @@ void drm_dp_downstream_debug(struct seq_file *m,
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+ if (!len)
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_SW_REV, rev, 2);
+ if (!len)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
if (detailed_cap_info) {
@@ -1779,11 +1837,9 @@ int drm_dp_read_sink_count(struct drm_dp_aux *aux)
u8 count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count);
+ ret = drm_dp_dpcd_read_byte(aux, DP_SINK_COUNT, &count);
if (ret < 0)
return ret;
- if (ret != 1)
- return -EIO;
return DP_GET_SINK_COUNT(count);
}
@@ -2081,14 +2137,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
- drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
- /* Send a bare address packet to start the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.buffer = NULL;
- msg.size = 0;
- err = drm_dp_i2c_do_msg(aux, &msg);
+
+ if (!aux->no_zero_sized) {
+ drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
+ /* Send a bare address packet to start the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.buffer = NULL;
+ msg.size = 0;
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ }
/*
* Reset msg.request in case in case it got
@@ -2107,6 +2166,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.buffer = msgs[i].buf + j;
msg.size = min(transfer_size, msgs[i].len - j);
+ if (j + msg.size == msgs[i].len && aux->no_zero_sized)
+ msg.request &= ~DP_AUX_I2C_MOT;
err = drm_dp_i2c_drain_msg(aux, &msg);
/*
@@ -2124,15 +2185,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
}
if (err >= 0)
err = num;
- /* Send a bare address packet to close out the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.request &= ~DP_AUX_I2C_MOT;
- msg.buffer = NULL;
- msg.size = 0;
- (void)drm_dp_i2c_do_msg(aux, &msg);
+ if (!aux->no_zero_sized) {
+ /* Send a bare address packet to close out the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.request &= ~DP_AUX_I2C_MOT;
+ msg.buffer = NULL;
+ msg.size = 0;
+ (void)drm_dp_i2c_do_msg(aux, &msg);
+ }
return err;
}
@@ -2172,13 +2235,13 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
u8 buf, count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
WARN_ON(!(buf & DP_TEST_SINK_START));
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK_MISC, &buf);
if (ret < 0)
return ret;
@@ -2192,11 +2255,7 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
* At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes
* per component (RGB or CrYCb).
*/
- ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_read_data(aux, DP_TEST_CRC_R_CR, crc, 6);
}
static void drm_dp_aux_crc_work(struct work_struct *work)
@@ -2395,11 +2454,11 @@ int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2422,11 +2481,11 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2512,11 +2571,7 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
struct drm_dp_dpcd_ident *ident)
{
- int ret;
-
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
-
- return ret < 0 ? ret : 0;
+ return drm_dp_dpcd_read_data(aux, offset, ident, sizeof(*ident));
}
static void drm_dp_dump_desc(struct drm_dp_aux *aux,
@@ -2774,13 +2829,11 @@ static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
int ret;
for (offset = 0; offset < buf_size; offset += block_size) {
- ret = drm_dp_dpcd_read(aux,
- address + offset,
- &buf[offset], block_size);
+ ret = drm_dp_dpcd_read_data(aux,
+ address + offset,
+ &buf[offset], block_size);
if (ret < 0)
return ret;
-
- WARN_ON(ret != block_size);
}
return 0;
@@ -2995,12 +3048,12 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
int err;
u8 rate, lanes;
- err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LINK_RATE, &rate);
if (err < 0)
return err;
data->link_rate = drm_dp_bw_code_to_link_rate(rate);
- err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LANE_COUNT, &lanes);
if (err < 0)
return err;
data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
@@ -3008,22 +3061,22 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
if (lanes & DP_ENHANCED_FRAME_CAP)
data->enhanced_frame_cap = true;
- err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+ err = drm_dp_dpcd_read_byte(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
if (err < 0)
return err;
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
- &data->custom80, sizeof(data->custom80));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ &data->custom80, sizeof(data->custom80));
if (err < 0)
return err;
break;
case DP_PHY_TEST_PATTERN_CP2520:
- err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
- &data->hbr2_reset,
- sizeof(data->hbr2_reset));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+ &data->hbr2_reset,
+ sizeof(data->hbr2_reset));
if (err < 0)
return err;
}
@@ -3050,15 +3103,15 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
DP_LINK_QUAL_PATTERN_11_MASK;
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux, DP_TRAINING_PATTERN_SET,
+ test_pattern);
if (err < 0)
return err;
} else {
for (i = 0; i < data->num_lanes; i++) {
- err = drm_dp_dpcd_writeb(aux,
- DP_LINK_QUAL_LANE0_SET + i,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux,
+ DP_LINK_QUAL_LANE0_SET + i,
+ test_pattern);
if (err < 0)
return err;
}
@@ -3265,8 +3318,8 @@ bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_C
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
- &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev,
"Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
return false;
@@ -3290,7 +3343,7 @@ bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
return false;
}
@@ -3421,16 +3474,13 @@ EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
*/
int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
{
- int ret;
u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
DP_PCON_ENABLE_LINK_FRL_MODE;
if (enable_frl_ready_hpd)
buf |= DP_PCON_ENABLE_HPD_READY;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
-
- return ret;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
@@ -3445,7 +3495,7 @@ bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3474,7 +3524,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
@@ -3509,11 +3559,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
return -EINVAL;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
@@ -3539,7 +3585,7 @@ int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
else
buf &= ~DP_PCON_FRL_LINK_TRAIN_EXTENDED;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
if (ret < 0)
return ret;
@@ -3555,13 +3601,7 @@ EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
*/
int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
}
EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
@@ -3576,7 +3616,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
int ret;
u8 buf = 0;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
@@ -3585,11 +3625,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
return -EINVAL;
}
buf |= DP_PCON_ENABLE_HDMI_LINK;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
@@ -3604,7 +3640,7 @@ bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3629,7 +3665,7 @@ int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
int mode;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
if (ret < 0)
return ret;
@@ -3658,7 +3694,7 @@ void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
for (i = 0; i < hdmi->max_lanes; i++) {
- if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
return;
error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
@@ -3793,7 +3829,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3804,11 +3840,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
buf |= pps_buf_config << 2;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
/**
@@ -3820,13 +3852,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
*/
int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_default);
@@ -3842,15 +3868,11 @@ int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
@@ -3867,21 +3889,17 @@ int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
@@ -3897,7 +3915,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3906,11 +3924,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
else
buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
@@ -3942,12 +3956,12 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[0] = level;
}
- ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_write_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
+ if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -3965,22 +3979,22 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
if (!bl->aux_enable)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (enable)
buf |= DP_EDP_BACKLIGHT_ENABLE;
else
buf &= ~DP_EDP_BACKLIGHT_ENABLE;
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -4016,15 +4030,16 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
if (bl->pwmgen_bit_count) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
}
if (bl->pwm_freq_pre_divider) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_FREQ_SET,
+ bl->pwm_freq_pre_divider);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev,
"%s: Failed to write aux backlight frequency: %d\n",
aux->name, ret);
@@ -4032,8 +4047,8 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4088,8 +4103,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
if (!bl->aux_set)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
aux->name, ret);
return -ENODEV;
@@ -4122,14 +4137,14 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
aux->name, ret);
return 0;
}
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
aux->name, ret);
return 0;
@@ -4154,8 +4169,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
break;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
return 0;
@@ -4180,8 +4195,8 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
u8 buf[2];
u8 mode_reg;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4194,11 +4209,11 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
- ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
- if (ret != size) {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (bl->lsb_reg_used)
@@ -4343,8 +4358,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
if (!panel || !panel->dev || !aux)
return -EINVAL;
- ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd,
- EDP_DISPLAY_CTL_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_DPCD_REV, edp_dpcd,
+ EDP_DISPLAY_CTL_CAP_SIZE);
if (ret < 0)
return ret;
@@ -4385,8 +4400,9 @@ EXPORT_SYMBOL(drm_panel_dp_aux_backlight);
#endif
/* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */
-static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
- int symbol_size, bool is_mst)
+static int drm_dp_link_data_symbol_cycles(int lane_count, int pixels,
+ int bpp_x16, int symbol_size,
+ bool is_mst)
{
int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count);
int align = is_mst ? 4 / lane_count : 1;
@@ -4394,22 +4410,42 @@ static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
return ALIGN(cycles, align);
}
-static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count,
- int bpp_x16, int symbol_size, bool is_mst)
+/**
+ * drm_dp_link_symbol_cycles - calculate the link symbol count with/without dsc
+ * @lane_count: DP link lane count
+ * @pixels: number of pixels in a scanline
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
+ * @bpp_x16: bits per pixel in .4 binary fixed format
+ * @symbol_size: DP symbol size
+ * @is_mst: %true for MST and %false for SST
+ *
+ * Calculate the link symbol cycles for both DSC (@dsc_slice_count !=0) and
+ * non-DSC case (@dsc_slice_count == 0) and return the count.
+ */
+int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count,
+ int bpp_x16, int symbol_size, bool is_mst)
{
+ int slice_count = dsc_slice_count ? : 1;
int slice_pixels = DIV_ROUND_UP(pixels, slice_count);
- int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels,
- bpp_x16, symbol_size, is_mst);
- int slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
+ int slice_data_cycles = drm_dp_link_data_symbol_cycles(lane_count,
+ slice_pixels,
+ bpp_x16,
+ symbol_size,
+ is_mst);
+ int slice_eoc_cycles = 0;
+
+ if (dsc_slice_count)
+ slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
return slice_count * (slice_data_cycles + slice_eoc_cycles);
}
+EXPORT_SYMBOL(drm_dp_link_symbol_cycles);
/**
* drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream
* @lane_count: DP link lane count
* @hactive: pixel count of the active period in one scanline of the stream
- * @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
* @bpp_x16: bits per pixel in .4 binary fixed point
* @flags: DRM_DP_OVERHEAD_x flags
*
@@ -4423,7 +4459,7 @@ static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_c
* as well as the stream's
* - @hactive timing
* - @bpp_x16 color depth
- * - compression mode (@flags / %DRM_DP_OVERHEAD_DSC).
+ * - compression mode (@dsc_slice_count != 0)
* Note that this overhead doesn't account for the 8b/10b, 128b/132b
* channel coding efficiency, for that see
* @drm_dp_link_bw_channel_coding_efficiency().
@@ -4478,15 +4514,10 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) &&
(flags & DRM_DP_BW_OVERHEAD_FEC));
- if (flags & DRM_DP_BW_OVERHEAD_DSC)
- symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive,
- dsc_slice_count,
- bpp_x16, symbol_size,
- is_mst);
- else
- symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
- bpp_x16, symbol_size,
- is_mst);
+ symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
+ dsc_slice_count,
+ bpp_x16, symbol_size,
+ is_mst);
return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count,
overhead * 16),
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 3a1f1ffc7b55..a89f38fd3218 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -2192,24 +2192,20 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
guid_copy(&mstb->guid, guid);
if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ struct drm_dp_aux *aux;
u8 buf[UUID_SIZE];
export_guid(buf, &mstb->guid);
- if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- mstb->port_parent,
- DP_GUID, sizeof(buf), buf);
- } else {
- ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, buf, sizeof(buf));
- }
- }
+ if (mstb->port_parent)
+ aux = &mstb->port_parent->aux;
+ else
+ aux = mstb->mgr->aux;
- if (ret < 16 && ret > 0)
- return -EPROTO;
+ ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf));
+ }
- return ret == 16 ? 0 : ret;
+ return ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2744,14 +2740,13 @@ retry:
do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
- ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
- &msg[offset],
- tosend);
- if (ret != tosend) {
- if (ret == -EIO && retries < 5) {
- retries++;
- goto retry;
- }
+ ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ } else if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO;
@@ -3624,7 +3619,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
return DRM_DP_SST;
- if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
+ if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)
return DRM_DP_SST;
if (mstm_cap & DP_MST_CAP)
@@ -3679,10 +3674,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0)
goto out_unlock;
@@ -3697,7 +3692,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mstb = mgr->mst_primary;
mgr->mst_primary = NULL;
/* this can fail if the device is gone */
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
@@ -3763,8 +3758,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
@@ -3813,18 +3808,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
goto out_fail;
}
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
@@ -3883,8 +3878,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
*mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
@@ -3922,9 +3917,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
- replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
@@ -4881,9 +4876,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
int i;
for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
- if (drm_dp_dpcd_read(mgr->aux,
- DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
- &buf[i], 16) != 16)
+ if (drm_dp_dpcd_read_data(mgr->aux,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
+ &buf[i], 16) < 0)
return false;
}
return true;
@@ -4972,23 +4967,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
}
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret != 2) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret < 0) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret < 0) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
- ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
+ DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret < 0) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
@@ -6112,14 +6108,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
/* Enpoint decompression with DP-to-DP peer device */
@@ -6157,8 +6153,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_dpcd_read(immediate_upstream_aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(immediate_upstream_aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
@@ -6180,11 +6176,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* therefore the endpoint needs to be
* both DSC and FEC capable.
*/
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE))
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 90fe07a89260..076edf161048 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -222,7 +222,7 @@ static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *r
while ((len = next_reg_area(&offset))) {
int address = DP_TUNNELING_BASE + offset;
- if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+ if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
return -EIO;
offset += len;
@@ -913,7 +913,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
goto out_err;
if (enable)
@@ -921,7 +921,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
else
val &= ~mask;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
goto out_err;
tunnel->bw_alloc_enabled = enable;
@@ -1039,7 +1039,7 @@ static int clear_bw_req_state(struct drm_dp_aux *aux)
{
u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
- if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
+ if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
return -EIO;
return 0;
@@ -1052,7 +1052,7 @@ static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
u8 val;
int err;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
*status_changed = val & status_change_mask;
@@ -1095,7 +1095,7 @@ static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
if (err)
goto out;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
err = -EIO;
goto out;
}
@@ -1196,13 +1196,13 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
goto out_err;
val &= mask;
if (val) {
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
goto out_err;
return 1;
@@ -1215,7 +1215,7 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
* Check for estimated BW changes explicitly to account for lost
* BW change notifications.
*/
- if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
goto out_err;
if (val * tunnel->bw_granularity != tunnel->estimated_bw)
@@ -1300,7 +1300,7 @@ int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *a
{
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 74dd4d01dd9b..855cb02b827d 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -256,3 +256,171 @@ drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
return DIV_ROUND_CLOSEST_ULL(clock * bpc, 8);
}
EXPORT_SYMBOL(drm_hdmi_compute_mode_clock);
+
+struct drm_hdmi_acr_n_cts_entry {
+ unsigned int n;
+ unsigned int cts;
+};
+
+struct drm_hdmi_acr_data {
+ unsigned long tmds_clock_khz;
+ struct drm_hdmi_acr_n_cts_entry n_cts_32k,
+ n_cts_44k1,
+ n_cts_48k;
+};
+
+static const struct drm_hdmi_acr_data hdmi_acr_n_cts[] = {
+ {
+ /* "Other" entry */
+ .n_cts_32k = { .n = 4096, },
+ .n_cts_44k1 = { .n = 6272, },
+ .n_cts_48k = { .n = 6144, },
+ }, {
+ .tmds_clock_khz = 25175,
+ .n_cts_32k = { .n = 4576, .cts = 28125, },
+ .n_cts_44k1 = { .n = 7007, .cts = 31250, },
+ .n_cts_48k = { .n = 6864, .cts = 28125, },
+ }, {
+ .tmds_clock_khz = 25200,
+ .n_cts_32k = { .n = 4096, .cts = 25200, },
+ .n_cts_44k1 = { .n = 6272, .cts = 28000, },
+ .n_cts_48k = { .n = 6144, .cts = 25200, },
+ }, {
+ .tmds_clock_khz = 27000,
+ .n_cts_32k = { .n = 4096, .cts = 27000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30000, },
+ .n_cts_48k = { .n = 6144, .cts = 27000, },
+ }, {
+ .tmds_clock_khz = 27027,
+ .n_cts_32k = { .n = 4096, .cts = 27027, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30030, },
+ .n_cts_48k = { .n = 6144, .cts = 27027, },
+ }, {
+ .tmds_clock_khz = 54000,
+ .n_cts_32k = { .n = 4096, .cts = 54000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60000, },
+ .n_cts_48k = { .n = 6144, .cts = 54000, },
+ }, {
+ .tmds_clock_khz = 54054,
+ .n_cts_32k = { .n = 4096, .cts = 54054, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60060, },
+ .n_cts_48k = { .n = 6144, .cts = 54054, },
+ }, {
+ .tmds_clock_khz = 74176,
+ .n_cts_32k = { .n = 11648, .cts = 210937, }, /* and 210938 */
+ .n_cts_44k1 = { .n = 17836, .cts = 234375, },
+ .n_cts_48k = { .n = 11648, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 74250,
+ .n_cts_32k = { .n = 4096, .cts = 74250, },
+ .n_cts_44k1 = { .n = 6272, .cts = 82500, },
+ .n_cts_48k = { .n = 6144, .cts = 74250, },
+ }, {
+ .tmds_clock_khz = 148352,
+ .n_cts_32k = { .n = 11648, .cts = 421875, },
+ .n_cts_44k1 = { .n = 8918, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 148500,
+ .n_cts_32k = { .n = 4096, .cts = 148500, },
+ .n_cts_44k1 = { .n = 6272, .cts = 165000, },
+ .n_cts_48k = { .n = 6144, .cts = 148500, },
+ }, {
+ .tmds_clock_khz = 296703,
+ .n_cts_32k = { .n = 5824, .cts = 421875, },
+ .n_cts_44k1 = { .n = 4459, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 281250, },
+ }, {
+ .tmds_clock_khz = 297000,
+ .n_cts_32k = { .n = 3072, .cts = 222750, },
+ .n_cts_44k1 = { .n = 4704, .cts = 247500, },
+ .n_cts_48k = { .n = 5120, .cts = 247500, },
+ }, {
+ .tmds_clock_khz = 593407,
+ .n_cts_32k = { .n = 5824, .cts = 843750, },
+ .n_cts_44k1 = { .n = 8918, .cts = 937500, },
+ .n_cts_48k = { .n = 5824, .cts = 562500, },
+ }, {
+ .tmds_clock_khz = 594000,
+ .n_cts_32k = { .n = 3072, .cts = 445500, },
+ .n_cts_44k1 = { .n = 9408, .cts = 990000, },
+ .n_cts_48k = { .n = 6144, .cts = 594000, },
+ },
+};
+
+static int drm_hdmi_acr_find_tmds_entry(unsigned long tmds_clock_khz)
+{
+ int i;
+
+ /* skip the "other" entry */
+ for (i = 1; i < ARRAY_SIZE(hdmi_acr_n_cts); i++) {
+ if (hdmi_acr_n_cts[i].tmds_clock_khz == tmds_clock_khz)
+ return i;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_hdmi_acr_get_n_cts() - get N and CTS values for Audio Clock Regeneration
+ *
+ * @tmds_char_rate: TMDS clock (char rate) as used by the HDMI connector
+ * @sample_rate: audio sample rate
+ * @out_n: a pointer to write the N value
+ * @out_cts: a pointer to write the CTS value
+ *
+ * Get the N and CTS values (either by calculating them or by returning data
+ * from the tables. This follows the HDMI 1.4b Section 7.2 "Audio Sample Clock
+ * Capture and Regeneration".
+ *
+ * Note, @sample_rate corresponds to the Fs value, see sections 7.2.4 - 7.2.6
+ * on how to select Fs for non-L-PCM formats.
+ */
+void
+drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
+ unsigned int sample_rate,
+ unsigned int *out_n,
+ unsigned int *out_cts)
+{
+ /* be a bit more tolerant, especially for the 1.001 entries */
+ unsigned long tmds_clock_khz = DIV_ROUND_CLOSEST_ULL(tmds_char_rate, 1000);
+ const struct drm_hdmi_acr_n_cts_entry *entry;
+ unsigned int n, cts, mult;
+ int tmds_idx;
+
+ tmds_idx = drm_hdmi_acr_find_tmds_entry(tmds_clock_khz);
+
+ /*
+ * Don't change the order, 192 kHz is divisible by 48k and 32k, but it
+ * should use 48k entry.
+ */
+ if (sample_rate % 48000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_48k;
+ mult = sample_rate / 48000;
+ } else if (sample_rate % 44100 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_44k1;
+ mult = sample_rate / 44100;
+ } else if (sample_rate % 32000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_32k;
+ mult = sample_rate / 32000;
+ } else {
+ entry = NULL;
+ }
+
+ if (entry) {
+ n = entry->n * mult;
+ cts = entry->cts;
+ } else {
+ /* Recommended optimal value, HDMI 1.4b, Section 7.2.1 */
+ n = 128 * sample_rate / 1000;
+ cts = 0;
+ }
+
+ if (!cts)
+ cts = DIV_ROUND_CLOSEST_ULL(tmds_char_rate * n,
+ 128 * sample_rate);
+
+ *out_n = n;
+ *out_cts = cts;
+}
+EXPORT_SYMBOL(drm_hdmi_acr_get_n_cts);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index c205f37da1e1..d9d9948b29e9 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -10,6 +10,298 @@
#include <drm/display/drm_hdmi_state_helper.h>
/**
+ * DOC: hdmi helpers
+ *
+ * These functions contain an implementation of the HDMI specification
+ * in the form of KMS helpers.
+ *
+ * It contains TMDS character rate computation, automatic selection of
+ * output formats, infoframes generation, etc.
+ *
+ * Infoframes Compliance
+ * ~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Drivers using the helpers will expose the various infoframes
+ * generated according to the HDMI specification in debugfs.
+ *
+ * Compliance can then be tested using ``edid-decode`` from the ``v4l-utils`` project
+ * (https://git.linuxtv.org/v4l-utils.git/). A sample run would look like:
+ *
+ * .. code-block:: bash
+ *
+ * # edid-decode \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/avi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdmi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/spd \
+ * /sys/class/drm/card1-HDMI-A-1/edid \
+ * -c
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 1e 6d f4 5b 1e ef 06 00
+ * 07 20 01 03 80 2f 34 78 ea 24 05 af 4f 42 ab 25
+ * 0f 50 54 21 08 00 d1 c0 61 40 45 40 01 01 01 01
+ * 01 01 01 01 01 01 98 d0 00 40 a1 40 d4 b0 30 20
+ * 3a 00 d1 0b 12 00 00 1a 00 00 00 fd 00 3b 3d 1e
+ * b2 31 00 0a 20 20 20 20 20 20 00 00 00 fc 00 4c
+ * 47 20 53 44 51 48 44 0a 20 20 20 20 00 00 00 ff
+ * 00 32 30 37 4e 54 52 4c 44 43 34 33 30 0a 01 46
+ *
+ * 02 03 42 72 23 09 07 07 4d 01 03 04 90 12 13 1f
+ * 22 5d 5e 5f 60 61 83 01 00 00 6d 03 0c 00 10 00
+ * b8 3c 20 00 60 01 02 03 67 d8 5d c4 01 78 80 03
+ * e3 0f 00 18 e2 00 6a e3 05 c0 00 e6 06 05 01 52
+ * 52 51 11 5d 00 a0 a0 40 29 b0 30 20 3a 00 d1 0b
+ * 12 00 00 1a 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 c3
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: GSM
+ * Model: 23540
+ * Serial Number: 454430 (0x0006ef1e)
+ * Made in: week 7 of 2022
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 47 cm x 52 cm
+ * Gamma: 2.20
+ * DPMS levels: Standby Suspend Off
+ * RGB color display
+ * First detailed timing is the preferred timing
+ * Color Characteristics:
+ * Red : 0.6835, 0.3105
+ * Green: 0.2587, 0.6679
+ * Blue : 0.1445, 0.0585
+ * White: 0.3134, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * Standard Timings:
+ * DMT 0x52: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * Detailed Timing Descriptors:
+ * DTD 1: 2560x2880 59.966580 Hz 8:9 185.417 kHz 534.000000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 240 Hpol P
+ * Vfront 3 Vsync 10 Vback 199 Vpol N
+ * Display Range Limits:
+ * Monitor ranges (GTF): 59-61 Hz V, 30-178 kHz H, max dotclock 490 MHz
+ * Display Product Name: 'LG SDQHD'
+ * Display Product Serial Number: '207NTRLDC430'
+ * Extension blocks: 1
+ * Checksum: 0x46
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Basic audio support
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 2
+ * Audio Data Block:
+ * Linear PCM:
+ * Max channels: 2
+ * Supported sample rates (kHz): 48 44.1 32
+ * Supported sample sizes (bits): 24 20 16
+ * Video Data Block:
+ * VIC 1: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * VIC 3: 720x480 59.940060 Hz 16:9 31.469 kHz 27.000000 MHz
+ * VIC 4: 1280x720 60.000000 Hz 16:9 45.000 kHz 74.250000 MHz
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 18: 720x576 50.000000 Hz 16:9 31.250 kHz 27.000000 MHz
+ * VIC 19: 1280x720 50.000000 Hz 16:9 37.500 kHz 74.250000 MHz
+ * VIC 31: 1920x1080 50.000000 Hz 16:9 56.250 kHz 148.500000 MHz
+ * VIC 34: 1920x1080 30.000000 Hz 16:9 33.750 kHz 74.250000 MHz
+ * VIC 93: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * VIC 94: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Speaker Allocation Data Block:
+ * FL/FR - Front Left/Right
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * Supports_AI
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 300 MHz
+ * Extended HDMI video details:
+ * HDMI VICs:
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * HDMI VIC 2: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * HDMI VIC 3: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 600 MHz
+ * SCDC Present
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Video Capability Data Block:
+ * YCbCr quantization: No Data
+ * RGB quantization: Selectable (via AVI Q)
+ * PT scan behavior: Always Underscanned
+ * IT scan behavior: Always Underscanned
+ * CE scan behavior: Always Underscanned
+ * Colorimetry Data Block:
+ * BT2020YCC
+ * BT2020RGB
+ * HDR Static Metadata Data Block:
+ * Electro optical transfer functions:
+ * Traditional gamma - SDR luminance range
+ * SMPTE ST2084
+ * Supported static metadata descriptors:
+ * Static metadata type 1
+ * Desired content max luminance: 82 (295.365 cd/m^2)
+ * Desired content max frame-average luminance: 82 (295.365 cd/m^2)
+ * Desired content min luminance: 81 (0.298 cd/m^2)
+ * Detailed Timing Descriptors:
+ * DTD 2: 2560x2880 29.986961 Hz 8:9 87.592 kHz 238.250000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 80 Hpol P
+ * Vfront 3 Vsync 10 Vback 28 Vpol N
+ * Checksum: 0xc3 Unused space in Extension Block: 43 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.29.0-5346
+ * edid-decode SHA: c363e9aa6d70 2025-03-11 11:41:18
+ *
+ * Warnings:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * IT Video Formats are overscanned by default, but normally this should be underscanned.
+ * Video Data Block: VIC 1 and the first DTD are not identical. Is this intended?
+ * Video Data Block: All VICs are in ascending order, and the first (preferred) VIC <= 4, is that intended?
+ * Video Capability Data Block: Set Selectable YCbCr Quantization to avoid interop issues.
+ * Video Capability Data Block: S_PT is equal to S_IT and S_CE, so should be set to 0 instead.
+ * Colorimetry Data Block: Set the sRGB colorimetry bit to avoid interop issues.
+ * Display Product Serial Number is set, so the Serial Number in the Base EDID should be 0.
+ * EDID:
+ * Base EDID: Some timings are out of range of the Monitor Ranges:
+ * Vertical Freq: 24.000 - 60.317 Hz (Monitor: 59.000 - 61.000 Hz)
+ * Horizontal Freq: 31.250 - 185.416 kHz (Monitor: 30.000 - 178.000 kHz)
+ * Maximum Clock: 594.000 MHz (Monitor: 490.000 MHz)
+ *
+ * Failures:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Video Capability Data Block: IT video formats are always underscanned, but bit 7 of Byte 3 of the CTA-861 Extension header is set to overscanned.
+ * EDID:
+ * CTA-861: Native progressive timings are a mix of several resolutions.
+ *
+ * EDID conformity: FAIL
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 82 02 0d 31 12 28 04 00 00 00 00 00 00 00 00 00
+ * 00
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x31
+ *
+ * AVI InfoFrame
+ * Version: 2
+ * Length: 13
+ * Y: Color Component Sample Format: RGB
+ * A: Active Format Information Present: Yes
+ * B: Bar Data Present: Bar Data not present
+ * S: Scan Information: Composed for an underscanned display
+ * C: Colorimetry: No Data
+ * M: Picture Aspect Ratio: 16:9
+ * R: Active Portion Aspect Ratio: 8
+ * ITC: IT Content: No Data
+ * EC: Extended Colorimetry: xvYCC601
+ * Q: RGB Quantization Range: Limited Range
+ * SC: Non-Uniform Picture Scaling: No Known non-uniform scaling
+ * YQ: YCC Quantization Range: Limited Range
+ * CN: IT Content Type: Graphics
+ * PR: Pixel Data Repetition Count: 0
+ * Line Number of End of Top Bar: 0
+ * Line Number of Start of Bottom Bar: 0
+ * Pixel Number of End of Left Bar: 0
+ * Pixel Number of Start of Right Bar: 0
+ *
+ * ----------------
+ *
+ * AVI InfoFrame conformity: PASS
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 81 01 05 49 03 0c 00 20 01
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x49
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03
+ * Version: 1
+ * Length: 5
+ * HDMI Video Format: HDMI_VIC is present
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ *
+ * ----------------
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 conformity: PASS
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 83 01 19 93 42 72 6f 61 64 63 6f 6d 56 69 64 65
+ * 6f 63 6f 72 65 00 00 00 00 00 00 00 09
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x93
+ *
+ * Source Product Description InfoFrame
+ * Version: 1
+ * Length: 25
+ * Vendor Name: 'Broadcom'
+ * Product Description: 'Videocore'
+ * Source Information: PC general
+ *
+ * ----------------
+ *
+ * Source Product Description InfoFrame conformity: PASS
+ *
+ * Testing
+ * ~~~~~~~
+ *
+ * The helpers have unit testing and can be tested using kunit with:
+ *
+ * .. code-block:: bash
+ *
+ * $ ./tools/testing/kunit/kunit.py run \
+ * --kunitconfig=drivers/gpu/drm/tests \
+ * drm_atomic_helper_connector_hdmi_*
+ */
+
+/**
* __drm_atomic_helper_connector_hdmi_reset() - Initializes all HDMI @drm_connector_state resources
* @connector: DRM connector
* @new_conn_state: connector state to reset
@@ -816,7 +1108,7 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
* @status: Connection status
*
* This function should be called as a part of the .detect() / .detect_ctx()
- * callbacks, updating the HDMI-specific connector's data.
+ * callbacks for all status changes.
*/
void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
enum drm_connector_status status)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9ea2611770f4..0138cf0b8b63 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -933,6 +933,9 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
* state). This is especially true in enable hooks because the pipeline has
* changed.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The old connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -967,6 +970,9 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
* attached to @encoder vs ones that do (and to inspect their state). This is
* especially true in disable hooks because the pipeline will change.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The new connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -988,6 +994,59 @@ drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
/**
+ * drm_atomic_get_connector_for_encoder - Get connector currently assigned to an encoder
+ * @encoder: The encoder to find the connector of
+ * @ctx: Modeset locking context
+ *
+ * This function finds and returns the connector currently assigned to
+ * an @encoder.
+ *
+ * It is similar to the drm_atomic_get_old_connector_for_encoder() and
+ * drm_atomic_get_new_connector_for_encoder() helpers, but doesn't
+ * require access to the atomic state. If you have access to it, prefer
+ * using these. This helper is typically useful in situations where you
+ * don't have access to the atomic state, like detect, link repair,
+ * threaded interrupt handlers, or hooks from other frameworks (ALSA,
+ * CEC, etc.).
+ *
+ * Returns:
+ * The connector connected to @encoder, or an error pointer otherwise.
+ * When the error is EDEADLK, a deadlock has been detected and the
+ * sequence must be restarted.
+ */
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *out_connector = ERR_PTR(-EINVAL);
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (!connector->state)
+ continue;
+
+ if (encoder == connector->state->best_encoder) {
+ out_connector = connector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return out_connector;
+}
+EXPORT_SYMBOL(drm_atomic_get_connector_for_encoder);
+
+
+/**
* drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the crtc state for
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5302ab324898..ee64ca1b1bec 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3409,6 +3409,9 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all);
* This implies a reset of all active components available between the CRTC and
* connectors.
*
+ * A variant of this function exists with
+ * drm_bridge_helper_reset_crtc(), dedicated to bridges.
+ *
* NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
* For drivers which optimize out unnecessary modesets this will result in
* a no-op commit, achieving nothing.
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 6e74de833466..6852d73c931c 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -75,6 +75,12 @@
* the currently visible vertical area of the &drm_crtc.
* FB_ID:
* Mode object ID of the &drm_framebuffer this plane should scan out.
+ *
+ * When a KMS client is performing front-buffer rendering, it should set
+ * FB_ID to the same front-buffer FB on each atomic commit. This implies
+ * to the driver that it needs to re-read the same FB again. Otherwise
+ * drivers which do not employ continuously repeated scanout cycles might
+ * not update the screen.
* CRTC_ID:
* Mode object ID of the &drm_crtc this plane should be connected to.
*
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index fa2794217a90..b4c89ec01998 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -198,10 +199,96 @@
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
+static void __drm_bridge_free(struct kref *kref)
+{
+ struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+
+ kfree(bridge->container);
+}
+
+/**
+ * drm_bridge_get - Acquire a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function increments the bridge's refcount.
+ *
+ * Returns:
+ * Pointer to @bridge.
+ */
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_get(&bridge->refcount);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drm_bridge_get);
+
+/**
+ * drm_bridge_put - Release a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function decrements the bridge's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_bridge_put(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_put(&bridge->refcount, __drm_bridge_free);
+}
+EXPORT_SYMBOL(drm_bridge_put);
+
+/**
+ * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_bridge, cast to a void pointer
+ *
+ * Wrapper of drm_bridge_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_bridge_put_void(void *data)
+{
+ struct drm_bridge *bridge = (struct drm_bridge *)data;
+
+ drm_bridge_put(bridge);
+}
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs)
+{
+ void *container;
+ struct drm_bridge *bridge;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ bridge = container + offset;
+ bridge->container = container;
+ bridge->funcs = funcs;
+ kref_init(&bridge->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
+ if (err)
+ return ERR_PTR(err);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_bridge_alloc);
+
/**
* drm_bridge_add - add the given bridge to the global bridge list
*
* @bridge: bridge control structure
+ *
+ * The bridge to be added must have been allocated by
+ * devm_drm_bridge_alloc().
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
@@ -280,6 +367,11 @@ static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
};
+static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
+{
+ return bridge->funcs->atomic_reset != NULL;
+}
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
@@ -327,12 +419,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge, flags);
+ ret = bridge->funcs->attach(bridge, encoder, flags);
if (ret < 0)
goto err_reset_bridge;
}
- if (bridge->funcs->atomic_reset) {
+ if (drm_bridge_is_atomic(bridge)) {
struct drm_bridge_state *state;
state = bridge->funcs->atomic_reset(bridge);
@@ -377,7 +469,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
- if (bridge->funcs->atomic_reset)
+ if (drm_bridge_is_atomic(bridge))
drm_atomic_private_obj_fini(&bridge->base);
if (bridge->funcs->detach)
@@ -1300,6 +1392,75 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
+ struct drm_bridge *bridge,
+ unsigned int idx)
+{
+ drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
+ drm_printf(p, "\ttype: [%d] %s\n",
+ bridge->type,
+ drm_get_connector_type_name(bridge->type));
+
+ if (bridge->of_node)
+ drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
+
+ drm_printf(p, "\tops: [0x%x]", bridge->ops);
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ drm_puts(p, " detect");
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ drm_puts(p, " edid");
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_puts(p, " hpd");
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ drm_puts(p, " modes");
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ drm_puts(p, " hdmi");
+ drm_puts(p, "\n");
+}
+
+static int allbridges_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ mutex_lock(&bridge_lock);
+
+ list_for_each_entry(bridge, &bridge_list, list)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ mutex_unlock(&bridge_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(allbridges);
+
+static int encoder_bridges_show(struct seq_file *m, void *data)
+{
+ struct drm_encoder *encoder = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ drm_for_each_bridge_in_chain(encoder, bridge)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
+
+void drm_bridge_debugfs_params(struct dentry *root)
+{
+ debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
+}
+
+void drm_bridge_debugfs_encoder_params(struct dentry *root,
+ struct drm_encoder *encoder)
+{
+ /* bridges list */
+ debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
+}
+
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c
new file mode 100644
index 000000000000..af80d2496194
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_helper.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_modeset_lock.h>
+
+/**
+ * drm_bridge_helper_reset_crtc - Reset the pipeline feeding a bridge
+ * @bridge: DRM bridge to reset
+ * @ctx: lock acquisition context
+ *
+ * Reset a @bridge pipeline. It will power-cycle all active components
+ * between the CRTC and connector that bridge is connected to.
+ *
+ * As it relies on drm_atomic_helper_reset_crtc(), the same limitations
+ * apply.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. If the error
+ * returned is EDEADLK, the whole atomic sequence must be restarted.
+ */
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector *connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ connector = drm_atomic_get_connector_for_encoder(encoder, ctx);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ goto out;
+ }
+
+ if (!connector->state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ crtc = connector->state->crtc;
+ ret = drm_atomic_helper_reset_crtc(crtc, ctx);
+ if (ret)
+ goto out;
+
+out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bridge_helper_reset_crtc);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 549b28a5918c..f1de7faf9fb4 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(drm_client_release);
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
if (buffer->gem) {
- drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
+ drm_gem_vunmap(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
@@ -252,7 +252,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
drm_gem_lock(gem);
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap_unlocked;
*map_copy = *map;
@@ -278,7 +278,7 @@ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
@@ -316,7 +316,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer,
ret = drm_gem_pin_locked(gem);
if (ret)
goto err_drm_gem_pin_locked;
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap;
@@ -348,7 +348,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
struct iosys_map *map = &buffer->map;
drm_gem_lock(gem);
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unpin_locked(gem);
drm_gem_unlock(gem);
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index aca442c25209..0f9d5ba36c81 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -39,7 +39,7 @@ int drm_client_modeset_create(struct drm_client_dev *client)
unsigned int max_connector_count = 1;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
- unsigned int i = 0;
+ int i = 0;
/* Add terminating zero entry to enable index less iteration */
client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL);
@@ -73,9 +73,10 @@ err_free:
static void drm_client_modeset_release(struct drm_client_dev *client)
{
struct drm_mode_set *modeset;
- unsigned int i;
drm_client_for_each_modeset(modeset, client) {
+ int i;
+
drm_mode_destroy(client->dev, modeset->mode);
modeset->mode = NULL;
modeset->fb = NULL;
@@ -117,10 +118,10 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_get_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -130,10 +131,10 @@ drm_connector_get_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -144,10 +145,10 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_preferred_mode(struct drm_connector *connector, int width, int height)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay > width ||
@@ -159,16 +160,18 @@ drm_connector_preferred_mode(struct drm_connector *connector, int width, int hei
return NULL;
}
-static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_first_mode(struct drm_connector *connector)
{
return list_first_entry_or_null(&connector->modes,
struct drm_display_mode, head);
}
-static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
- struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode;
+ const struct drm_cmdline_mode *cmdline_mode;
+ const struct drm_display_mode *mode;
bool prefer_non_interlace;
/*
@@ -237,9 +240,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
return enable;
}
-static void drm_client_connectors_enabled(struct drm_connector **connectors,
+static void drm_client_connectors_enabled(struct drm_connector *connectors[],
unsigned int connector_count,
- bool *enabled)
+ bool enabled[])
{
bool any_enabled = false;
struct drm_connector *connector;
@@ -263,16 +266,35 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
enabled[i] = drm_connector_enabled(connectors[i], false);
}
+static void mode_replace(struct drm_device *dev,
+ const struct drm_display_mode **dst,
+ const struct drm_display_mode *src)
+{
+ drm_mode_destroy(dev, (struct drm_display_mode *)*dst);
+
+ *dst = src ? drm_mode_duplicate(dev, src) : NULL;
+}
+
+static void modes_destroy(struct drm_device *dev,
+ const struct drm_display_mode *modes[],
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mode_replace(dev, &modes[i], NULL);
+}
+
static bool drm_client_target_cloned(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
- int count, i, j;
+ int count, i;
bool can_clone = false;
- struct drm_display_mode *dmt_mode, *mode;
+ struct drm_display_mode *dmt_mode;
/* only contemplate cloning in the single crtc case */
if (dev->mode_config.num_crtc > 1)
@@ -291,9 +313,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
for (i = 0; i < connector_count; i++) {
+ int j;
+
if (!enabled[i])
continue;
- modes[i] = drm_connector_pick_cmdline_mode(connectors[i]);
+
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connectors[i]));
if (!modes[i]) {
can_clone = false;
break;
@@ -323,6 +349,8 @@ static bool drm_client_target_cloned(struct drm_device *dev,
goto fail;
for (i = 0; i < connector_count; i++) {
+ const struct drm_display_mode *mode;
+
if (!enabled[i])
continue;
@@ -332,7 +360,7 @@ static bool drm_client_target_cloned(struct drm_device *dev,
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
- modes[i] = mode;
+ mode_replace(dev, &modes[i], mode);
}
if (!modes[i])
can_clone = false;
@@ -349,19 +377,19 @@ fail:
}
static int drm_client_get_tile_offsets(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
int idx,
int h_idx, int v_idx)
{
- struct drm_connector *connector;
int i;
int hoffset = 0, voffset = 0;
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+
if (!connector->has_tile)
continue;
@@ -384,14 +412,13 @@ static int drm_client_get_tile_offsets(struct drm_device *dev,
}
static bool drm_client_target_preferred(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const u64 mask = BIT_ULL(connector_count) - 1;
- struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
int num_tiled_conns = 0;
@@ -405,7 +432,9 @@ static bool drm_client_target_preferred(struct drm_device *dev,
retry:
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+ const char *mode_type;
+
if (conn_configured & BIT_ULL(i))
continue;
@@ -438,20 +467,23 @@ retry:
modes, offsets, i,
connector->tile_h_loc, connector->tile_v_loc);
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
- /* got for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
+
if (!modes[i]) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n",
- connector->base.id, connector->name,
- connector->tile_group ? connector->tile_group->id : 0);
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred modes, pick one off the list */
- if (!modes[i])
- modes[i] = drm_connector_first_mode(connector);
+
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
+ }
+
/*
* In case of tiled mode if all tiles not present fallback to
* first available non tiled mode.
@@ -466,18 +498,24 @@ retry:
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
} else {
- modes[i] = drm_connector_get_tiled_mode(connector);
+ mode_type = "tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_get_tiled_mode(connector));
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n",
- connector->base.id, connector->name,
- modes[i] ? modes[i]->name : "none");
+ if (modes[i])
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] found %s mode: %s\n",
+ connector->base.id, connector->name,
+ mode_type, modes[i]->name);
+ else
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] no mode found\n",
+ connector->base.id, connector->name);
+
conn_configured |= BIT_ULL(i);
}
@@ -502,18 +540,17 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
}
static int drm_client_pick_crtcs(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
+ struct drm_crtc *best_crtcs[],
+ const struct drm_display_mode *modes[],
int n, int width, int height)
{
struct drm_device *dev = client->dev;
struct drm_connector *connector;
int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
+ struct drm_crtc **crtcs;
struct drm_mode_set *modeset;
- int o;
if (n == connector_count)
return 0;
@@ -543,7 +580,8 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
* remaining connectors
*/
drm_client_for_each_modeset(modeset, client) {
- crtc = modeset->crtc;
+ struct drm_crtc *crtc = modeset->crtc;
+ int o;
if (!connector_has_possible_crtc(connector, crtc))
continue;
@@ -577,17 +615,17 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
/* Try to read the BIOS display configuration and use it for the initial config */
static bool drm_client_firmware_config(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ struct drm_crtc *crtcs[],
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq, mask;
struct drm_device *dev = client->dev;
- int i, j;
+ int i;
bool *save_enabled;
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
@@ -621,11 +659,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
- struct drm_connector *connector;
+ struct drm_connector *connector = connectors[i];
struct drm_encoder *encoder;
- struct drm_crtc *new_crtc;
-
- connector = connectors[i];
+ struct drm_crtc *crtc;
+ const char *mode_type;
+ int j;
if (conn_configured & BIT(i))
continue;
@@ -664,7 +702,7 @@ retry:
num_connectors_enabled++;
- new_crtc = connector->state->crtc;
+ crtc = connector->state->crtc;
/*
* Make sure we're not trying to drive multiple connectors
@@ -672,69 +710,52 @@ retry:
* match the BIOS.
*/
for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- drm_dbg_kms(dev, "fallback: cloned configuration\n");
+ if (crtcs[j] == crtc) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] fallback: cloned configuration\n",
+ connector->base.id, connector->name);
goto bail;
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
-
- /* go for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
- /* try for preferred next */
if (!modes[i]) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n",
- connector->base.id, connector->name,
- str_yes_no(connector->has_tile));
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_first_mode(connector);
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
}
/* last resort: use current mode */
if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly.
- */
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n",
- connector->base.id, connector->name);
- modes[i] = &connector->state->crtc->mode;
+ mode_type = "current";
+ mode_replace(dev, &modes[i],
+ &crtc->state->mode);
}
+
/*
* In case of tiled modes, if all tiles are not present
* then fallback to a non tiled mode.
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
}
- crtcs[i] = new_crtc;
+ crtcs[i] = crtc;
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n",
+ drm_dbg_kms(dev, "[CONNECTOR::%d:%s] on [CRTC:%d:%s] using %s mode: %s\n",
connector->base.id, connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+ crtc->base.id, crtc->name,
+ mode_type, modes[i]->name);
fallback = false;
conn_configured |= BIT(i);
@@ -799,8 +820,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
- /* points to modes protected by mode_config.mutex */
- struct drm_display_mode **modes;
+ const struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
bool *enabled;
@@ -851,7 +871,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
modes, offsets, enabled, width, height)) {
- memset(modes, 0, connector_count * sizeof(*modes));
+ modes_destroy(dev, modes, connector_count);
memset(crtcs, 0, connector_count * sizeof(*crtcs));
memset(offsets, 0, connector_count * sizeof(*offsets));
@@ -868,10 +888,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
crtcs, modes, 0, width, height);
}
+ mutex_unlock(&dev->mode_config.mutex);
+
drm_client_modeset_release(client);
for (i = 0; i < connector_count; i++) {
- struct drm_display_mode *mode = modes[i];
+ const struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
struct drm_client_offset *offset = &offsets[i];
@@ -902,11 +924,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
- mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
kfree(crtcs);
+ modes_destroy(dev, modes, connector_count);
kfree(modes);
kfree(offsets);
kfree(enabled);
@@ -938,7 +960,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
struct drm_plane *plane = modeset->crtc->primary;
struct drm_cmdline_mode *cmdline;
u64 valid_mask = 0;
- unsigned int i;
+ int i;
if (!modeset->num_connectors)
return false;
@@ -1219,11 +1241,12 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
struct drm_connector *connector;
struct drm_mode_set *modeset;
struct drm_modeset_acquire_ctx ctx;
- int j;
int ret;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
drm_client_for_each_modeset(modeset, client) {
+ int j;
+
if (!modeset->crtc->enabled)
continue;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 0955f1c385dd..39497493f74c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -334,7 +334,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6b2178864c7e..3dfd8b34dceb 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -740,40 +740,6 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
crtc->debugfs_entry = NULL;
}
-static int bridges_show(struct seq_file *m, void *data)
-{
- struct drm_encoder *encoder = m->private;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_bridge *bridge;
- unsigned int idx = 0;
-
- drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs);
- drm_printf(&p, "\ttype: [%d] %s\n",
- bridge->type,
- drm_get_connector_type_name(bridge->type));
-
- if (bridge->of_node)
- drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
-
- drm_printf(&p, "\tops: [0x%x]", bridge->ops);
- if (bridge->ops & DRM_BRIDGE_OP_DETECT)
- drm_puts(&p, " detect");
- if (bridge->ops & DRM_BRIDGE_OP_EDID)
- drm_puts(&p, " edid");
- if (bridge->ops & DRM_BRIDGE_OP_HPD)
- drm_puts(&p, " hpd");
- if (bridge->ops & DRM_BRIDGE_OP_MODES)
- drm_puts(&p, " modes");
- if (bridge->ops & DRM_BRIDGE_OP_HDMI)
- drm_puts(&p, " hdmi");
- drm_puts(&p, "\n");
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(bridges);
-
void drm_debugfs_encoder_add(struct drm_encoder *encoder)
{
struct drm_minor *minor = encoder->dev->primary;
@@ -789,9 +755,7 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder)
encoder->debugfs_entry = root;
- /* bridges list */
- debugfs_create_file("bridges", 0444, root, encoder,
- &bridges_fops);
+ drm_bridge_debugfs_encoder_params(root, encoder);
if (encoder->funcs && encoder->funcs->debugfs_init)
encoder->funcs->debugfs_init(encoder, root);
diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h
index aee1b86a73c1..957dd0619f5c 100644
--- a/drivers/gpu/drm/drm_displayid_internal.h
+++ b/drivers/gpu/drm/drm_displayid_internal.h
@@ -66,6 +66,7 @@ struct drm_edid;
#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
#define DATA_BLOCK_2_CONTAINER_ID 0x29
+#define DATA_BLOCK_2_TYPE_10_FORMULA_TIMING 0x2a
#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
@@ -114,20 +115,32 @@ struct displayid_tiled_block {
struct displayid_detailed_timings_1 {
u8 pixel_clock[3];
u8 flags;
- u8 hactive[2];
- u8 hblank[2];
- u8 hsync[2];
- u8 hsw[2];
- u8 vactive[2];
- u8 vblank[2];
- u8 vsync[2];
- u8 vsw[2];
+ __le16 hactive;
+ __le16 hblank;
+ __le16 hsync;
+ __le16 hsw;
+ __le16 vactive;
+ __le16 vblank;
+ __le16 vsync;
+ __le16 vsw;
} __packed;
struct displayid_detailed_timing_block {
struct displayid_block base;
struct displayid_detailed_timings_1 timings[];
-};
+} __packed;
+
+struct displayid_formula_timings_9 {
+ u8 flags;
+ __le16 hactive;
+ __le16 vactive;
+ u8 vrefresh;
+} __packed;
+
+struct displayid_formula_timing_block {
+ struct displayid_block base;
+ struct displayid_formula_timings_9 timings[];
+} __packed;
#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index 385eb5e10047..9dc0408fbbea 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -13,85 +13,7 @@
#include <drm/drm_fourcc.h>
#include "drm_draw_internal.h"
-
-/*
- * Conversions from xrgb8888
- */
-
-static u16 convert_xrgb8888_to_rgb565(u32 pix)
-{
- return ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
-{
- return ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
-}
-
-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
-{
- return ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_argb1555(u32 pix)
-{
- return BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u32 convert_xrgb8888_to_argb8888(u32 pix)
-{
- return pix | GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
-}
-
-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
-{
- pix = ((pix & 0x00FF0000) >> 14) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x000000FF) << 22);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
+#include "drm_format_internal.h"
/**
* drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
@@ -106,28 +28,28 @@ u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
{
switch (format) {
case DRM_FORMAT_RGB565:
- return convert_xrgb8888_to_rgb565(color);
+ return drm_pixel_xrgb8888_to_rgb565(color);
case DRM_FORMAT_RGBA5551:
- return convert_xrgb8888_to_rgba5551(color);
+ return drm_pixel_xrgb8888_to_rgba5551(color);
case DRM_FORMAT_XRGB1555:
- return convert_xrgb8888_to_xrgb1555(color);
+ return drm_pixel_xrgb8888_to_xrgb1555(color);
case DRM_FORMAT_ARGB1555:
- return convert_xrgb8888_to_argb1555(color);
+ return drm_pixel_xrgb8888_to_argb1555(color);
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
return color;
case DRM_FORMAT_ARGB8888:
- return convert_xrgb8888_to_argb8888(color);
+ return drm_pixel_xrgb8888_to_argb8888(color);
case DRM_FORMAT_XBGR8888:
- return convert_xrgb8888_to_xbgr8888(color);
+ return drm_pixel_xrgb8888_to_xbgr8888(color);
case DRM_FORMAT_ABGR8888:
- return convert_xrgb8888_to_abgr8888(color);
+ return drm_pixel_xrgb8888_to_abgr8888(color);
case DRM_FORMAT_XRGB2101010:
- return convert_xrgb8888_to_xrgb2101010(color);
+ return drm_pixel_xrgb8888_to_xrgb2101010(color);
case DRM_FORMAT_ARGB2101010:
- return convert_xrgb8888_to_argb2101010(color);
+ return drm_pixel_xrgb8888_to_argb2101010(color);
case DRM_FORMAT_ABGR2101010:
- return convert_xrgb8888_to_abgr2101010(color);
+ return drm_pixel_xrgb8888_to_abgr2101010(color);
default:
WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
return 0;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 60e5ac179c15..56dd61f8e05a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -40,6 +40,7 @@
#include <linux/xarray.h>
#include <drm/drm_accel.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_cache.h>
#include <drm/drm_client_event.h>
#include <drm/drm_color_mgmt.h>
@@ -500,6 +501,25 @@ void drm_dev_unplug(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unplug);
+/**
+ * drm_dev_set_dma_dev - set the DMA device for a DRM device
+ * @dev: DRM device
+ * @dma_dev: DMA device or NULL
+ *
+ * Sets the DMA device of the given DRM device. Only required if
+ * the DMA device is different from the DRM device's parent. After
+ * calling this function, the DRM device holds a reference on
+ * @dma_dev. Pass NULL to clear the DMA device.
+ */
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev)
+{
+ dma_dev = get_device(dma_dev);
+
+ put_device(dev->dma_dev);
+ dev->dma_dev = dma_dev;
+}
+EXPORT_SYMBOL(drm_dev_set_dma_dev);
+
/*
* Available recovery methods for wedged device. To be sent along with device
* wedged uevent.
@@ -654,6 +674,8 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
{
drm_fs_inode_free(dev->anon_inode);
+ put_device(dev->dma_dev);
+ dev->dma_dev = NULL;
put_device(dev->dev);
/* Prevent use-after-free in drm_managed_release when debugging is
* enabled. Slightly awkward, but can't really be helped. */
@@ -808,36 +830,62 @@ void *__devm_drm_dev_alloc(struct device *parent,
EXPORT_SYMBOL(__devm_drm_dev_alloc);
/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
+ * __drm_dev_alloc - Allocation of a &drm_device instance
* @parent: Parent device object
+ * @driver: DRM driver
+ * @size: the size of the struct which contains struct drm_device
+ * @offset: the offset of the &drm_device within the container.
*
- * This is the deprecated version of devm_drm_dev_alloc(), which does not support
- * subclassing through embedding the struct &drm_device in a driver private
- * structure, and which does not support automatic cleanup through devres.
+ * This should *NOT* be by any drivers, but is a dedicated interface for the
+ * corresponding Rust abstraction.
*
- * RETURNS:
- * Pointer to new DRM device, or ERR_PTR on failure.
+ * This is the same as devm_drm_dev_alloc(), but without the corresponding
+ * resource management through the parent device, but not the same as
+ * drm_dev_alloc(), since the latter is the deprecated version, which does not
+ * support subclassing.
+ *
+ * Returns: A pointer to new DRM device, or an ERR_PTR on failure.
*/
-struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
- struct device *parent)
+void *__drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset)
{
- struct drm_device *dev;
+ void *container;
+ struct drm_device *drm;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
return ERR_PTR(-ENOMEM);
- ret = drm_dev_init(dev, driver, parent);
+ drm = container + offset;
+ ret = drm_dev_init(drm, driver, parent);
if (ret) {
- kfree(dev);
+ kfree(container);
return ERR_PTR(ret);
}
+ drmm_add_final_kfree(drm, container);
- drmm_add_final_kfree(dev, dev);
+ return container;
+}
+EXPORT_SYMBOL(__drm_dev_alloc);
- return dev;
+/**
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * This is the deprecated version of devm_drm_dev_alloc(), which does not support
+ * subclassing through embedding the struct &drm_device in a driver private
+ * structure, and which does not support automatic cleanup through devres.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or ERR_PTR on failure.
+ */
+struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
+ struct device *parent)
+{
+ return __drm_dev_alloc(parent, driver, sizeof(struct drm_device), 0);
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -1188,6 +1236,7 @@ static int __init drm_core_init(void)
}
drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ drm_bridge_debugfs_params(drm_debugfs_root);
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9edb3247c767..74e77742b2bd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -6761,23 +6761,23 @@ out:
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
- struct displayid_detailed_timings_1 *timings,
+ const struct displayid_detailed_timings_1 *timings,
bool type_7)
{
struct drm_display_mode *mode;
- unsigned pixel_clock = (timings->pixel_clock[0] |
- (timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16)) + 1;
- unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
- unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
- unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
- unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
- unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
- unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1;
- unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1;
- unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
- bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
- bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
+ unsigned int pixel_clock = (timings->pixel_clock[0] |
+ (timings->pixel_clock[1] << 8) |
+ (timings->pixel_clock[2] << 16)) + 1;
+ unsigned int hactive = le16_to_cpu(timings->hactive) + 1;
+ unsigned int hblank = le16_to_cpu(timings->hblank) + 1;
+ unsigned int hsync = (le16_to_cpu(timings->hsync) & 0x7fff) + 1;
+ unsigned int hsync_width = le16_to_cpu(timings->hsw) + 1;
+ unsigned int vactive = le16_to_cpu(timings->vactive) + 1;
+ unsigned int vblank = le16_to_cpu(timings->vblank) + 1;
+ unsigned int vsync = (le16_to_cpu(timings->vsync) & 0x7fff) + 1;
+ unsigned int vsync_width = le16_to_cpu(timings->vsw) + 1;
+ bool hsync_positive = le16_to_cpu(timings->hsync) & (1 << 15);
+ bool vsync_positive = le16_to_cpu(timings->vsync) & (1 << 15);
mode = drm_mode_create(dev);
if (!mode)
@@ -6834,6 +6834,66 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
return num_modes;
}
+static struct drm_display_mode *drm_mode_displayid_formula(struct drm_device *dev,
+ const struct displayid_formula_timings_9 *timings,
+ bool type_10)
+{
+ struct drm_display_mode *mode;
+ u16 hactive = le16_to_cpu(timings->hactive) + 1;
+ u16 vactive = le16_to_cpu(timings->vactive) + 1;
+ u8 timing_formula = timings->flags & 0x7;
+
+ /* TODO: support RB-v2 & RB-v3 */
+ if (timing_formula > 1)
+ return NULL;
+
+ /* TODO: support video-optimized refresh rate */
+ if (timings->flags & (1 << 4))
+ drm_dbg_kms(dev, "Fractional vrefresh is not implemented, proceeding with non-video-optimized refresh rate");
+
+ mode = drm_cvt_mode(dev, hactive, vactive, timings->vrefresh + 1, timing_formula == 1, false, false);
+ if (!mode)
+ return NULL;
+
+ /* TODO: interpret S3D flags */
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+static int add_displayid_formula_modes(struct drm_connector *connector,
+ const struct displayid_block *block)
+{
+ const struct displayid_formula_timing_block *formula_block = (struct displayid_formula_timing_block *)block;
+ int num_timings;
+ struct drm_display_mode *newmode;
+ int num_modes = 0;
+ bool type_10 = block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING;
+ int timing_size = 6 + ((formula_block->base.rev & 0x70) >> 4);
+
+ /* extended blocks are not supported yet */
+ if (timing_size != 6)
+ return 0;
+
+ if (block->num_bytes % timing_size)
+ return 0;
+
+ num_timings = block->num_bytes / timing_size;
+ for (int i = 0; i < num_timings; i++) {
+ const struct displayid_formula_timings_9 *timings = &formula_block->timings[i];
+
+ newmode = drm_mode_displayid_formula(connector->dev, timings, type_10);
+ if (!newmode)
+ continue;
+
+ drm_mode_probed_add(connector, newmode);
+ num_modes++;
+ }
+ return num_modes;
+}
+
static int add_displayid_detailed_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -6846,6 +6906,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING ||
block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING)
num_modes += add_displayid_detailed_1_modes(connector, block);
+ else if (block->tag == DATA_BLOCK_2_TYPE_9_FORMULA_TIMING ||
+ block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING)
+ num_modes += add_displayid_formula_modes(connector, block);
}
displayid_iter_end(&iter);
@@ -7100,18 +7163,12 @@ EXPORT_SYMBOL(drm_add_edid_modes);
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay)
+ unsigned int hdisplay, unsigned int vdisplay)
{
- int i, count, num_modes = 0;
+ int i, count = ARRAY_SIZE(drm_dmt_modes), num_modes = 0;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
- count = ARRAY_SIZE(drm_dmt_modes);
- if (hdisplay < 0)
- hdisplay = 0;
- if (vdisplay < 0)
- vdisplay = 0;
-
for (i = 0; i < count; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index cf2463090d3a..246cf845e2c9 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -993,6 +993,40 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
EXPORT_SYMBOL(drm_show_fdinfo);
/**
+ * drm_file_err - log process name, pid and client_name associated with a drm_file
+ * @file_priv: context of interest for process name and pid
+ * @fmt: printf() like format string
+ *
+ * Helper function for clients which needs to log process details such
+ * as name and pid etc along with user logs.
+ */
+void drm_file_err(struct drm_file *file_priv, const char *fmt, ...)
+{
+ va_list args;
+ struct va_format vaf;
+ struct pid *pid;
+ struct task_struct *task;
+ struct drm_device *dev = file_priv->minor->dev;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ mutex_lock(&file_priv->client_name_lock);
+ rcu_read_lock();
+ pid = rcu_dereference(file_priv->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+
+ drm_err(dev, "comm: %s pid: %d client: %s ... %pV", task ? task->comm : "Unset",
+ task ? task->pid : 0, file_priv->client_name ?: "Unset", &vaf);
+
+ va_end(args);
+ rcu_read_unlock();
+ mutex_unlock(&file_priv->client_name_lock);
+}
+EXPORT_SYMBOL(drm_file_err);
+
+/**
* mock_drm_getfile - Create a new struct file for the drm device
* @minor: drm minor to wrap (e.g. #drm_device.primary)
* @flags: file creation mode (O_RDWR etc)
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 01d3ab307ac3..d36e6cacc575 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -20,6 +20,8 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
+#include "drm_format_internal.h"
+
/**
* drm_format_conv_state_init - Initialize format-conversion state
* @state: The state to initialize
@@ -244,6 +246,152 @@ static int drm_fb_xfrm(struct iosys_map *dst,
xfrm_line);
}
+#define ALIGN_DOWN_PIXELS(end, n, a) \
+ ((end) - ((n) & ((a) - 1)))
+
+static __always_inline void drm_fb_xfrm_line_32to8(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 8) |
+ (xfrm_pixel(pix[2]) << 16) |
+ (xfrm_pixel(pix[3]) << 24);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixels */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf8++ = xfrm_pixel(le32_to_cpup(sbuf32++));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to16(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le64 *dbuf64 = dbuf;
+ __le32 *dbuf32;
+ __le16 *dbuf16;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+#if defined(CONFIG_64BIT)
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u64 val64 = ((u64)xfrm_pixel(pix[0])) |
+ ((u64)xfrm_pixel(pix[1]) << 16) |
+ ((u64)xfrm_pixel(pix[2]) << 32) |
+ ((u64)xfrm_pixel(pix[3]) << 48);
+ *dbuf64++ = cpu_to_le64(val64);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+#endif
+
+ /* write 2 pixels at once */
+ dbuf32 = (__le32 __force *)dbuf64;
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 2)) {
+ u32 pix[2] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 16);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixel */
+ dbuf16 = (__le16 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf16++ = cpu_to_le16(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to24(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write pixels in chunks of 4 */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 val24[4] = {
+ xfrm_pixel(le32_to_cpup(sbuf32)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 1)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 2)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 3)),
+ };
+ u32 out32[3] = {
+ /* write output bytes in reverse order for little endianness */
+ ((val24[0] & 0x000000ff)) |
+ ((val24[0] & 0x0000ff00)) |
+ ((val24[0] & 0x00ff0000)) |
+ ((val24[1] & 0x000000ff) << 24),
+ ((val24[1] & 0x0000ff00) >> 8) |
+ ((val24[1] & 0x00ff0000) >> 8) |
+ ((val24[2] & 0x000000ff) << 16) |
+ ((val24[2] & 0x0000ff00) << 16),
+ ((val24[2] & 0x00ff0000) >> 16) |
+ ((val24[3] & 0x000000ff) << 8) |
+ ((val24[3] & 0x0000ff00) << 8) |
+ ((val24[3] & 0x00ff0000) << 8),
+ };
+
+ *dbuf32++ = cpu_to_le32(out32[0]);
+ *dbuf32++ = cpu_to_le32(out32[1]);
+ *dbuf32++ = cpu_to_le32(out32[2]);
+ sbuf32 += ARRAY_SIZE(val24);
+ }
+
+ /* write trailing pixel */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32) {
+ u32 val24 = xfrm_pixel(le32_to_cpup(sbuf32++));
+ /* write output in reverse order for little endianness */
+ *dbuf8++ = (val24 & 0x000000ff);
+ *dbuf8++ = (val24 & 0x0000ff00) >> 8;
+ *dbuf8++ = (val24 & 0x00ff0000) >> 16;
+ }
+}
+
+static __always_inline void drm_fb_xfrm_line_32to32(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ while (sbuf32 < send32)
+ *dbuf32++ = cpu_to_le32(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
/**
* drm_fb_memcpy - Copy clip buffer
* @dst: Array of destination buffers
@@ -368,17 +516,7 @@ EXPORT_SYMBOL(drm_fb_swab);
static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- dbuf8[x] = ((pix & 0x00e00000) >> 16) |
- ((pix & 0x0000e000) >> 11) |
- ((pix & 0x000000c0) >> 6);
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb332);
}
/**
@@ -417,38 +555,19 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565);
+}
+
+static __always_inline u32 drm_xrgb8888_to_rgb565_swab(u32 pix)
+{
+ return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
}
/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(swab16(val16));
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_xrgb8888_to_rgb565_swab);
}
/**
@@ -495,19 +614,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb1555);
}
/**
@@ -547,20 +654,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb1555);
}
/**
@@ -600,20 +694,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgba5551);
}
/**
@@ -653,18 +734,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write blue-green-red to output in little endianness */
- *dbuf8++ = (pix & 0x000000FF) >> 0;
- *dbuf8++ = (pix & 0x0000FF00) >> 8;
- *dbuf8++ = (pix & 0x00FF0000) >> 16;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb888);
}
/**
@@ -704,18 +774,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write red-green-blue to output in little endianness */
- *dbuf8++ = (pix & 0x00ff0000) >> 16;
- *dbuf8++ = (pix & 0x0000ff00) >> 8;
- *dbuf8++ = (pix & 0x000000ff) >> 0;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgr888);
}
/**
@@ -755,16 +814,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix |= GENMASK(31, 24); /* fill alpha bits */
- dbuf32[x] = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb8888);
}
/**
@@ -804,19 +854,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
}
static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
@@ -835,19 +873,7 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
}
static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
@@ -866,20 +892,7 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- pix = val32 | ((val32 >> 8) & 0x00300C03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb2101010);
}
/**
@@ -920,21 +933,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000ff) << 2) |
- ((pix & 0x0000ff00) << 4) |
- ((pix & 0x00ff0000) << 6);
- pix = GENMASK(31, 30) | /* set alpha bits */
- val32 | ((val32 >> 8) & 0x00300c03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb2101010);
}
/**
@@ -975,19 +974,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
-
- for (x = 0; x < pixels; x++) {
- u32 pix = le32_to_cpu(sbuf32[x]);
- u8 r = (pix & 0x00ff0000) >> 16;
- u8 g = (pix & 0x0000ff00) >> 8;
- u8 b = pix & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dbuf8++ = (3 * r + 6 * g + b) / 10;
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_r8_bt601);
}
/**
@@ -1031,36 +1018,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
static void drm_fb_argb8888_to_argb4444_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- unsigned int pixels2 = pixels & ~GENMASK_ULL(0, 0);
- __le32 *dbuf32 = dbuf;
- __le16 *dbuf16 = dbuf + pixels2 * sizeof(*dbuf16);
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u16 val16;
- u32 pix[2];
-
- for (x = 0; x < pixels2; x += 2, ++dbuf32) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- pix[1] = le32_to_cpu(sbuf32[x + 1]);
- val32 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4) |
- ((pix[1] & 0xf0000000) >> 0) |
- ((pix[1] & 0x00f00000) << 4) |
- ((pix[1] & 0x0000f000) << 8) |
- ((pix[1] & 0x000000f0) << 12);
- *dbuf32 = cpu_to_le32(val32);
- }
- for (; x < pixels; x++) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- val16 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_argb8888_to_argb4444);
}
/**
diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
new file mode 100644
index 000000000000..9f857bfa368d
--- /dev/null
+++ b/drivers/gpu/drm/drm_format_internal.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+#ifndef DRM_FORMAT_INTERNAL_H
+#define DRM_FORMAT_INTERNAL_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/*
+ * Each pixel-format conversion helper takes a raw pixel in a
+ * specific input format and returns a raw pixel in a specific
+ * output format. All pixels are in little-endian byte order.
+ *
+ * Function names are
+ *
+ * drm_pixel_<input>_to_<output>_<algorithm>()
+ *
+ * where <input> and <output> refer to pixel formats. The
+ * <algorithm> is optional and hints to the method used for the
+ * conversion. Helpers with no algorithm given apply pixel-bit
+ * shifting.
+ *
+ * The argument type is u32. We expect this to be wide enough to
+ * hold all conversion input from 32-bit RGB to any output format.
+ * The Linux kernel should avoid format conversion for anything
+ * but XRGB8888 input data. Converting from other format can still
+ * be acceptable in some cases.
+ *
+ * The return type is u32. It is wide enough to hold all conversion
+ * output from XRGB8888. For output formats wider than 32 bit, a
+ * return type of u64 would be acceptable.
+ */
+
+/*
+ * Conversions from XRGB8888
+ */
+
+static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix)
+{
+ u32 r = (pix & 0x00ff0000) >> 16;
+ u32 g = (pix & 0x0000ff00) >> 8;
+ u32 b = pix & 0x000000ff;
+
+ /* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ return (3 * r + 6 * g + b) / 10;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix)
+{
+ return ((pix & 0x00e00000) >> 16) |
+ ((pix & 0x0000e000) >> 11) |
+ ((pix & 0x000000c0) >> 6);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000fc00) >> 5) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgba5551(u32 pix)
+{
+ return drm_pixel_xrgb8888_to_rgbx5551(pix) |
+ BIT(0); /* set alpha bit */
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ drm_pixel_xrgb8888_to_xrgb1555(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb888(u32 pix)
+{
+ return pix & GENMASK(23, 0);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_bgr888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ pix;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0xff000000)) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ drm_pixel_xrgb8888_to_xbgr8888(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000ff) << 2) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x00ff0000) << 6);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xrgb2101010(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00ff0000) >> 14) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x000000ff) << 22);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xbgr2101010(pix);
+}
+
+/*
+ * Conversion from ARGB8888
+ */
+
+static inline u32 drm_pixel_argb8888_to_argb4444(u32 pix)
+{
+ return ((pix & 0xf0000000) >> 16) |
+ ((pix & 0x00f00000) >> 12) |
+ ((pix & 0x0000f000) >> 8) |
+ ((pix & 0x000000f0) >> 4);
+}
+
+#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c6240bab3fa5..1e659d2660f7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1216,7 +1216,7 @@ void drm_gem_unpin(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1233,9 +1233,9 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vmap);
+EXPORT_SYMBOL(drm_gem_vmap_locked);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_assert_held(obj->resv);
@@ -1248,7 +1248,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
/* Always set the mapping to NULL. Callers may rely on this. */
iosys_map_clear(map);
}
-EXPORT_SYMBOL(drm_gem_vunmap);
+EXPORT_SYMBOL(drm_gem_vunmap_locked);
void drm_gem_lock(struct drm_gem_object *obj)
{
@@ -1262,25 +1262,25 @@ void drm_gem_unlock(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_unlock);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_vmap(obj, map);
+ ret = drm_gem_vmap_locked(obj, map);
dma_resv_unlock(obj->resv);
return ret;
}
-EXPORT_SYMBOL(drm_gem_vmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vmap);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_lock(obj->resv, NULL);
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
dma_resv_unlock(obj->resv);
}
-EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vunmap);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
@@ -1543,10 +1543,10 @@ tail:
EXPORT_SYMBOL(drm_gem_lru_scan);
/**
- * drm_gem_evict - helper to evict backing pages for a GEM object
+ * drm_gem_evict_locked - helper to evict backing pages for a GEM object
* @obj: obj in question
*/
-int drm_gem_evict(struct drm_gem_object *obj)
+int drm_gem_evict_locked(struct drm_gem_object *obj)
{
dma_resv_assert_held(obj->resv);
@@ -1558,4 +1558,4 @@ int drm_gem_evict(struct drm_gem_object *obj)
return 0;
}
-EXPORT_SYMBOL(drm_gem_evict);
+EXPORT_SYMBOL(drm_gem_evict_locked);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 0fbeb686e561..6f72e7a0f427 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -362,7 +362,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
- ret = drm_gem_vmap_unlocked(obj, &map[i]);
+ ret = drm_gem_vmap(obj, &map[i]);
if (ret)
goto err_drm_gem_vunmap;
}
@@ -384,7 +384,7 @@ err_drm_gem_vunmap:
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
return ret;
}
@@ -411,7 +411,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
continue;
if (iosys_map_is_null(&map[i]))
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index d99dee67353a..aa43265f4f4f 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -165,7 +165,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
} else {
dma_resv_lock(shmem->base.resv, NULL);
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -174,9 +174,10 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
}
if (shmem->pages)
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
dma_resv_unlock(shmem->base.resv);
}
@@ -186,21 +187,20 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
-static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->pages_use_count++ > 0)
+ if (refcount_inc_not_zero(&shmem->pages_use_count))
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
- shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -216,38 +216,36 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
shmem->pages = pages;
+ refcount_set(&shmem->pages_use_count, 1);
+
return 0;
}
/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function decreases the use count and puts the backing pages when use drops to zero.
*/
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- return;
-
- if (--shmem->pages_use_count > 0)
- return;
-
+ if (refcount_dec_and_test(&shmem->pages_use_count)) {
#ifdef CONFIG_X86
- if (shmem->map_wc)
- set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
- drm_gem_put_pages(obj, shmem->pages,
- shmem->pages_mark_dirty_on_put,
- shmem->pages_mark_accessed_on_put);
- shmem->pages = NULL;
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+ }
}
-EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
@@ -257,7 +255,12 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
- ret = drm_gem_shmem_get_pages(shmem);
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ if (!ret)
+ refcount_set(&shmem->pages_pin_count, 1);
return ret;
}
@@ -267,7 +270,8 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
- drm_gem_shmem_put_pages(shmem);
+ if (refcount_dec_and_test(&shmem->pages_pin_count))
+ drm_gem_shmem_put_pages_locked(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
@@ -288,6 +292,9 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
return ret;
@@ -296,7 +303,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_pin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
@@ -311,14 +318,17 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_dec_not_one(&shmem->pages_pin_count))
+ return;
+
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_unpin_locked(shmem);
dma_resv_unlock(shmem->base.resv);
}
-EXPORT_SYMBOL(drm_gem_shmem_unpin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
/*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
* store.
@@ -327,47 +337,43 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
* exists for the buffer backing the shmem GEM object. It hides the differences
* between dma-buf imported and natively allocated objects.
*
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->dma_buf, map);
- if (!ret) {
- if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->dma_buf, map);
- return -EIO;
- }
- }
} else {
pgprot_t prot = PAGE_KERNEL;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->vmap_use_count++ > 0) {
+ if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
- goto err_zero_use;
+ return ret;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
- if (!shmem->vaddr)
+ if (!shmem->vaddr) {
ret = -ENOMEM;
- else
+ } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+ refcount_set(&shmem->vmap_use_count, 1);
+ }
}
if (ret) {
@@ -379,28 +385,26 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
err_put_pages:
if (!drm_gem_is_imported(obj))
- drm_gem_shmem_put_pages(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
+ drm_gem_shmem_unpin_locked(shmem);
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
* This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
+ * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
+ * drops to zero.
*
* This function hides the differences between dma-buf imported and natively
* allocated objects.
*/
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
@@ -409,19 +413,15 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
} else {
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
+ if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+ vunmap(shmem->vaddr);
+ shmem->vaddr = NULL;
- vunmap(shmem->vaddr);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_unpin_locked(shmem);
+ }
}
-
- shmem->vaddr = NULL;
}
-EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
@@ -449,7 +449,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
{
dma_resv_assert_held(shmem->base.resv);
@@ -460,9 +460,9 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
return (madv >= 0);
}
-EXPORT_SYMBOL(drm_gem_shmem_madvise);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
@@ -476,7 +476,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
shmem->sgt = NULL;
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
shmem->madv = -1;
@@ -492,7 +492,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
-EXPORT_SYMBOL(drm_gem_shmem_purge);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
@@ -575,8 +575,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- shmem->pages_use_count++;
+ drm_WARN_ON_ONCE(obj->dev,
+ !refcount_inc_not_zero(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
@@ -589,7 +589,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
dma_resv_lock(shmem->base.resv, NULL);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
drm_gem_vm_close(vma);
@@ -639,7 +639,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return -EINVAL;
dma_resv_lock(shmem->base.resv, NULL);
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
if (ret)
@@ -666,11 +666,12 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
if (drm_gem_is_imported(&shmem->base))
return;
- drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
- drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
-EXPORT_SYMBOL(drm_gem_shmem_print_info);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
/**
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
@@ -707,7 +708,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
return ERR_PTR(ret);
@@ -729,7 +730,7 @@ err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 4b2f32889f00..735bfdf4322f 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1338,7 +1338,6 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
unsigned long num_dma_mapped;
unsigned int order = 0;
unsigned long *pfns;
- struct page **pages;
int err = 0;
struct dev_pagemap *pagemap;
struct drm_pagemap *dpagemap;
@@ -1378,7 +1377,6 @@ retry:
if (err)
goto err_free;
- pages = (struct page **)pfns;
map_pages:
/*
* Perform all dma mappings under the notifier lock to not
@@ -1454,8 +1452,6 @@ map_pages:
err = -EFAULT;
goto err_unmap;
}
-
- pages[i] = page;
} else {
dma_addr_t addr;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b2b6a8e49dda..e44f28fd81d3 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -179,8 +179,8 @@ int drm_gem_pin_locked(struct drm_gem_object *obj);
void drm_gem_unpin_locked(struct drm_gem_object *obj);
int drm_gem_pin(struct drm_gem_object *obj);
void drm_gem_unpin(struct drm_gem_object *obj);
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index dfa595556320..e5184a0c2465 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -36,6 +36,8 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
+#include <linux/media-bus-format.h>
+
#include <video/mipi_display.h>
/**
@@ -871,6 +873,41 @@ ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
EXPORT_SYMBOL(mipi_dsi_generic_read);
/**
+ * drm_mipi_dsi_get_input_bus_fmt() - Get the required MEDIA_BUS_FMT_* based
+ * input pixel format for a given DSI output
+ * pixel format
+ * @dsi_format: pixel format that a DSI host needs to output
+ *
+ * Various DSI hosts can use this function during their
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation to ascertain
+ * the MEDIA_BUS_FMT_* pixel format required as input.
+ *
+ * RETURNS:
+ * a 32-bit MEDIA_BUS_FMT_* value on success or 0 in case of failure.
+ */
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format)
+{
+ switch (dsi_format) {
+ case MIPI_DSI_FMT_RGB888:
+ return MEDIA_BUS_FMT_RGB888_1X24;
+
+ case MIPI_DSI_FMT_RGB666:
+ return MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return MEDIA_BUS_FMT_RGB666_1X18;
+
+ case MIPI_DSI_FMT_RGB565:
+ return MEDIA_BUS_FMT_RGB565_1X16;
+
+ default:
+ /* Unsupported DSI Format */
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_mipi_dsi_get_input_bus_fmt);
+
+/**
* mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
* @dsi: DSI peripheral device
* @data: buffer containing data to be transmitted
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 8642a2fb25a9..b4239fd04e9d 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -383,6 +383,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
prop = drm_property_create(dev,
DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
+ "IN_FORMATS_ASYNC", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.async_modifiers_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
"SIZE_HINTS", 0);
if (!prop)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index c627e42a7ce7..650de4da0853 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -74,8 +74,9 @@ EXPORT_SYMBOL(drm_panel_init);
* drm_panel_add - add a panel to the global registry
* @panel: panel to add
*
- * Add a panel to the global registry so that it can be looked up by display
- * drivers.
+ * Add a panel to the global registry so that it can be looked
+ * up by display drivers. The panel to be added must have been
+ * allocated by devm_drm_panel_alloc().
*/
void drm_panel_add(struct drm_panel *panel)
{
@@ -105,21 +106,21 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* Calling this function will enable power and deassert any reset signals to
* the panel. After this has completed it is possible to communicate with any
- * integrated circuitry via a command bus.
- *
- * Return: 0 on success or a negative error code on failure.
+ * integrated circuitry via a command bus. This function cannot fail (as it is
+ * called from the pre_enable call chain). There will always be a call to
+ * drm_panel_disable() afterwards.
*/
-int drm_panel_prepare(struct drm_panel *panel)
+void drm_panel_prepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->prepared) {
dev_warn(panel->dev, "Skipping prepare of already prepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
@@ -138,11 +139,8 @@ int drm_panel_prepare(struct drm_panel *panel)
follower->funcs->panel_prepared, ret);
}
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_prepare);
@@ -154,16 +152,14 @@ EXPORT_SYMBOL(drm_panel_prepare);
* reset, turn off power supplies, ...). After this function has completed, it
* is usually no longer possible to communicate with the panel until another
* call to drm_panel_prepare().
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_unprepare(struct drm_panel *panel)
+void drm_panel_unprepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -176,7 +172,7 @@ int drm_panel_unprepare(struct drm_panel *panel)
*/
if (!panel->prepared) {
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
@@ -195,11 +191,8 @@ int drm_panel_unprepare(struct drm_panel *panel)
}
panel->prepared = false;
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_unprepare);
@@ -209,26 +202,26 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*
* Calling this function will cause the panel display drivers to be turned on
* and the backlight to be enabled. Content will be visible on screen after
- * this call completes.
- *
- * Return: 0 on success or a negative error code on failure.
+ * this call completes. This function cannot fail (as it is called from the
+ * enable call chain). There will always be a call to drm_panel_disable()
+ * afterwards.
*/
-int drm_panel_enable(struct drm_panel *panel)
+void drm_panel_enable(struct drm_panel *panel)
{
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->enabled) {
dev_warn(panel->dev, "Skipping enable of already enabled panel\n");
- return 0;
+ return;
}
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
- return ret;
+ return;
}
panel->enabled = true;
@@ -236,8 +229,6 @@ int drm_panel_enable(struct drm_panel *panel)
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -248,15 +239,13 @@ EXPORT_SYMBOL(drm_panel_enable);
* This will typically turn off the panel's backlight or disable the display
* drivers. For smart panels it should still be possible to communicate with
* the integrated circuitry via any command bus after this call.
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_disable(struct drm_panel *panel)
+void drm_panel_disable(struct drm_panel *panel)
{
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -269,7 +258,7 @@ int drm_panel_disable(struct drm_panel *panel)
*/
if (!panel->enabled) {
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
- return 0;
+ return;
}
ret = backlight_disable(panel->backlight);
@@ -280,11 +269,9 @@ int drm_panel_disable(struct drm_panel *panel)
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
- return ret;
+ return;
}
panel->enabled = false;
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_disable);
@@ -317,6 +304,93 @@ int drm_panel_get_modes(struct drm_panel *panel,
}
EXPORT_SYMBOL(drm_panel_get_modes);
+static void __drm_panel_free(struct kref *kref)
+{
+ struct drm_panel *panel = container_of(kref, struct drm_panel, refcount);
+
+ kfree(panel->container);
+}
+
+/**
+ * drm_panel_get - Acquire a panel reference
+ * @panel: DRM panel
+ *
+ * This function increments the panel's refcount.
+ * Returns:
+ * Pointer to @panel
+ */
+struct drm_panel *drm_panel_get(struct drm_panel *panel)
+{
+ if (!panel)
+ return panel;
+
+ kref_get(&panel->refcount);
+
+ return panel;
+}
+EXPORT_SYMBOL(drm_panel_get);
+
+/**
+ * drm_panel_put - Release a panel reference
+ * @panel: DRM panel
+ *
+ * This function decrements the panel's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_panel_put(struct drm_panel *panel)
+{
+ if (panel)
+ kref_put(&panel->refcount, __drm_panel_free);
+}
+EXPORT_SYMBOL(drm_panel_put);
+
+/**
+ * drm_panel_put_void - wrapper to drm_panel_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_panel, cast to a void pointer
+ *
+ * Wrapper of drm_panel_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_panel_put_void(void *data)
+{
+ struct drm_panel *panel = (struct drm_panel *)data;
+
+ drm_panel_put(panel);
+}
+
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type)
+{
+ void *container;
+ struct drm_panel *panel;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ panel = container + offset;
+ panel->container = container;
+ panel->funcs = funcs;
+ kref_init(&panel->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_panel_put_void, panel);
+ if (err)
+ return ERR_PTR(err);
+
+ drm_panel_init(panel, dev, funcs, connector_type);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_panel_alloc);
+
#ifdef CONFIG_OF
/**
* of_drm_find_panel - look up a panel using a device tree node
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index c554ad8f246b..7ac0fd5391fe 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -517,6 +517,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* ZOTAC Gaming Zone */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ZOTAC"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "G0A1W"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* One Mix 2S (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index b47ea25fdfaa..b4de79583805 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -7,6 +7,7 @@
*/
#include <linux/font.h>
+#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/iosys-map.h>
#include <linux/kdebug.h>
@@ -154,6 +155,90 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
+static void drm_panic_write_pixel16(void *vaddr, unsigned int offset, u16 color)
+{
+ u16 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
+{
+ u8 *p = vaddr + offset;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p = color & 0xff;
+}
+
+static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
+{
+ u32 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel(void *vaddr, unsigned int offset, u32 color, unsigned int cpp)
+{
+ switch (cpp) {
+ case 2:
+ drm_panic_write_pixel16(vaddr, offset, color);
+ break;
+ case 3:
+ drm_panic_write_pixel24(vaddr, offset, color);
+ break;
+ case 4:
+ drm_panic_write_pixel32(vaddr, offset, color);
+ break;
+ default:
+ pr_debug_once("Can't blit with pixel width %d\n", cpp);
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, const u8 *sbuf8,
+ unsigned int spitch, struct drm_rect *clip,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ unsigned int height = drm_rect_height(clip);
+ unsigned int width = drm_rect_width(clip);
+ void *vaddr = NULL;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = (y + clip->y1) * dpitch + (x + clip->x1) * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (!pages[new_page])
+ continue;
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ if (vaddr)
+ drm_panic_write_pixel(vaddr, offset, fg32, cpp);
+ }
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_blit - convert a monochrome image to a linear framebuffer
* @sb: destination scanout buffer
@@ -177,6 +262,10 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color);
+ if (sb->pages)
+ return drm_panic_blit_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ sbuf8, spitch, clip, scale, fg_color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -209,6 +298,35 @@ static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, color);
}
+static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, struct drm_rect *clip,
+ u32 color)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ void *vaddr = NULL;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ for (x = clip->x1; x < clip->x2; x++) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = y * dpitch + x * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ drm_panic_write_pixel(vaddr, offset, color, cpp);
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_fill - Fill a rectangle with a color
* @sb: destination scanout buffer
@@ -225,6 +343,10 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_fill_pixel(sb, clip, color);
+ if (sb->pages)
+ return drm_panic_fill_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ clip, color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -709,16 +831,24 @@ static void draw_panic_plane(struct drm_plane *plane, const char *description)
if (!drm_panic_trylock(plane->dev, flags))
return;
+ ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+
+ if (ret || !drm_panic_is_format_supported(sb.format))
+ goto unlock;
+
+ /* One of these should be set, or it can't draw pixels */
+ if (!sb.set_pixel && !sb.pages && iosys_map_is_null(&sb.map[0]))
+ goto unlock;
+
drm_panic_set_description(description);
- ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+ draw_panic_dispatch(&sb);
+ if (plane->helper_private->panic_flush)
+ plane->helper_private->panic_flush(plane);
- if (!ret && drm_panic_is_format_supported(sb.format)) {
- draw_panic_dispatch(&sb);
- if (plane->helper_private->panic_flush)
- plane->helper_private->panic_flush(plane);
- }
drm_panic_clear_description();
+
+unlock:
drm_panic_unlock(plane->dev, flags);
}
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index f2a99681b998..dd55b1cb764d 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -5,7 +5,7 @@
//! It is called from a panic handler, so it should't allocate memory and
//! does all the work on the stack or on the provided buffers. For
//! simplification, it only supports low error correction, and applies the
-//! first mask (checkerboard). It will draw the smallest QRcode that can
+//! first mask (checkerboard). It will draw the smallest QR code that can
//! contain the string passed as parameter. To get the most compact
//! QR code, the start of the URL is encoded as binary, and the
//! compressed kmsg is encoded as numeric.
@@ -315,7 +315,7 @@ impl Segment<'_> {
}
}
- // Returns the size of the length field in bits, depending on QR Version.
+ /// Returns the size of the length field in bits, depending on QR Version.
fn length_bits_count(&self, version: Version) -> usize {
let Version(v) = version;
match self {
@@ -331,7 +331,7 @@ impl Segment<'_> {
}
}
- // Number of characters in the segment.
+ /// Number of characters in the segment.
fn character_count(&self) -> usize {
match self {
Segment::Binary(data) => data.len(),
@@ -366,8 +366,48 @@ impl Segment<'_> {
SegmentIterator {
segment: self,
offset: 0,
- carry: 0,
- carry_len: 0,
+ decfifo: Default::default(),
+ }
+ }
+}
+
+/// Max fifo size is 17 (max push) + 2 (max remaining)
+const MAX_FIFO_SIZE: usize = 19;
+
+/// A simple Decimal digit FIFO
+#[derive(Default)]
+struct DecFifo {
+ decimals: [u8; MAX_FIFO_SIZE],
+ len: usize,
+}
+
+impl DecFifo {
+ fn push(&mut self, data: u64, len: usize) {
+ let mut chunk = data;
+ for i in (0..self.len).rev() {
+ self.decimals[i + len] = self.decimals[i];
+ }
+ for i in 0..len {
+ self.decimals[i] = (chunk % 10) as u8;
+ chunk /= 10;
+ }
+ self.len += len;
+ }
+
+ /// Pop 3 decimal digits from the FIFO
+ fn pop3(&mut self) -> Option<(u16, usize)> {
+ if self.len == 0 {
+ None
+ } else {
+ let poplen = 3.min(self.len);
+ self.len -= poplen;
+ let mut out = 0;
+ let mut exp = 1;
+ for i in 0..poplen {
+ out += self.decimals[self.len + i] as u16 * exp;
+ exp *= 10;
+ }
+ Some((out, NUM_CHARS_BITS[poplen]))
}
}
}
@@ -375,8 +415,7 @@ impl Segment<'_> {
struct SegmentIterator<'a> {
segment: &'a Segment<'a>,
offset: usize,
- carry: u64,
- carry_len: usize,
+ decfifo: DecFifo,
}
impl Iterator for SegmentIterator<'_> {
@@ -394,31 +433,17 @@ impl Iterator for SegmentIterator<'_> {
}
}
Segment::Numeric(data) => {
- if self.carry_len < 3 && self.offset < data.len() {
- // If there are less than 3 decimal digits in the carry,
- // take the next 7 bytes of input, and add them to the carry.
+ if self.decfifo.len < 3 && self.offset < data.len() {
+ // If there are less than 3 decimal digits in the fifo,
+ // take the next 7 bytes of input, and push them to the fifo.
let mut buf = [0u8; 8];
let len = 7.min(data.len() - self.offset);
buf[..len].copy_from_slice(&data[self.offset..self.offset + len]);
let chunk = u64::from_le_bytes(buf);
- let pow = u64::pow(10, BYTES_TO_DIGITS[len] as u32);
- self.carry = chunk + self.carry * pow;
+ self.decfifo.push(chunk, BYTES_TO_DIGITS[len]);
self.offset += len;
- self.carry_len += BYTES_TO_DIGITS[len];
- }
- match self.carry_len {
- 0 => None,
- len => {
- // take the next 3 decimal digits of the carry
- // and return 10bits of numeric data.
- let out_len = 3.min(len);
- self.carry_len -= out_len;
- let pow = u64::pow(10, self.carry_len as u32);
- let out = (self.carry / pow) as u16;
- self.carry = self.carry % pow;
- Some((out, NUM_CHARS_BITS[out_len]))
- }
}
+ self.decfifo.pop3()
}
}
}
@@ -569,8 +594,8 @@ struct EncodedMsgIterator<'a> {
impl Iterator for EncodedMsgIterator<'_> {
type Item = u8;
- // Send the bytes in interleaved mode, first byte of first block of group1,
- // then first byte of second block of group1, ...
+ /// Send the bytes in interleaved mode, first byte of first block of group1,
+ /// then first byte of second block of group1, ...
fn next(&mut self) -> Option<Self::Item> {
let em = self.em;
let blocks = em.g1_blocks + em.g2_blocks;
@@ -638,7 +663,7 @@ impl QrImage<'_> {
self.data.fill(0);
}
- // Set pixel to light color.
+ /// Set pixel to light color.
fn set(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
let mut v = self.data[off];
@@ -646,13 +671,13 @@ impl QrImage<'_> {
self.data[off] = v;
}
- // Invert a module color.
+ /// Invert a module color.
fn xor(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
self.data[off] ^= 0x80 >> (x % 8);
}
- // Draw a light square at (x, y) top left corner.
+ /// Draw a light square at (x, y) top left corner.
fn draw_square(&mut self, x: u8, y: u8, size: u8) {
for k in 0..size {
self.set(x + k, y);
@@ -784,7 +809,7 @@ impl QrImage<'_> {
vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6))
}
- // Returns true if the module is reserved (Not usable for data and EC).
+ /// Returns true if the module is reserved (Not usable for data and EC).
fn is_reserved(&self, x: u8, y: u8) -> bool {
self.is_alignment(x, y)
|| self.is_finder(x, y)
@@ -793,13 +818,14 @@ impl QrImage<'_> {
|| self.is_version_info(x, y)
}
- // Last module to draw, at bottom left corner.
+ /// Last module to draw, at bottom left corner.
fn is_last(&self, x: u8, y: u8) -> bool {
x == 0 && y == self.width - 1
}
- // Move to the next module according to QR code order.
- // From bottom right corner, to bottom left corner.
+ /// Move to the next module according to QR code order.
+ ///
+ /// From bottom right corner, to bottom left corner.
fn next(&self, x: u8, y: u8) -> (u8, u8) {
let x_adj = if x <= 6 { x + 1 } else { x };
let column_type = (self.width - x_adj) % 4;
@@ -812,7 +838,7 @@ impl QrImage<'_> {
}
}
- // Find next module that can hold data.
+ /// Find next module that can hold data.
fn next_available(&self, x: u8, y: u8) -> (u8, u8) {
let (mut x, mut y) = self.next(x, y);
while self.is_reserved(x, y) && !self.is_last(x, y) {
@@ -841,7 +867,7 @@ impl QrImage<'_> {
}
}
- // Apply checkerboard mask to all non-reserved modules.
+ /// Apply checkerboard mask to all non-reserved modules.
fn apply_mask(&mut self) {
for x in 0..self.width {
for y in 0..self.width {
@@ -852,7 +878,7 @@ impl QrImage<'_> {
}
}
- // Draw the QR code with the provided data iterator.
+ /// Draw the QR code with the provided data iterator.
fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
// First clear the table, as it may have already some data.
self.clear();
@@ -876,7 +902,7 @@ impl QrImage<'_> {
/// will be encoded as binary segment, otherwise it will be encoded
/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than `data_size`.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index a28b22fdd7a4..04992dfd4c79 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -141,6 +141,14 @@
* various bugs in this area with inconsistencies between the capability
* flag and per-plane properties.
*
+ * IN_FORMATS_ASYNC:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane for asynchronous flips. The blob is a struct
+ * drm_format_modifier_blob. Userspace cannot change this property. This is an
+ * optional property and if not present then user should expect a failure in
+ * atomic ioctl when the modifier/format is not supported by that plane under
+ * asynchronous flip.
+ *
* SIZE_HINTS:
* Blob property which contains the set of recommended plane size
* which can used for simple "cursor like" use cases (eg. no scaling).
@@ -185,9 +193,13 @@ modifiers_ptr(struct drm_format_modifier_blob *blob)
return (struct drm_format_modifier *)(((char *)blob) + blob->modifiers_offset);
}
-static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane)
+static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
+ struct drm_plane *plane,
+ bool (*format_mod_supported)
+ (struct drm_plane *plane,
+ u32 format,
+ u64 modifier))
{
- const struct drm_mode_config *config = &dev->mode_config;
struct drm_property_blob *blob;
struct drm_format_modifier *mod;
size_t blob_size, formats_size, modifiers_size;
@@ -213,7 +225,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
blob = drm_property_create_blob(dev, blob_size, NULL);
if (IS_ERR(blob))
- return -1;
+ return NULL;
blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
@@ -229,10 +241,10 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod = modifiers_ptr(blob_data);
for (i = 0; i < plane->modifier_count; i++) {
for (j = 0; j < plane->format_count; j++) {
- if (!plane->funcs->format_mod_supported ||
- plane->funcs->format_mod_supported(plane,
- plane->format_types[j],
- plane->modifiers[i])) {
+ if (!format_mod_supported ||
+ format_mod_supported(plane,
+ plane->format_types[j],
+ plane->modifiers[i])) {
mod->formats |= 1ULL << j;
}
}
@@ -243,10 +255,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod++;
}
- drm_object_attach_property(&plane->base, config->modifiers_property,
- blob->base.id);
-
- return 0;
+ return blob;
}
/**
@@ -358,6 +367,7 @@ static int __drm_universal_plane_init(struct drm_device *dev,
const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
static const uint64_t default_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
};
@@ -469,8 +479,24 @@ static int __drm_universal_plane_init(struct drm_device *dev,
drm_plane_create_hotspot_properties(plane);
}
- if (format_modifier_count)
- create_in_format_blob(dev, plane);
+ if (format_modifier_count) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->modifiers_property,
+ blob->base.id);
+ }
+
+ if (plane->funcs->format_mod_supported_async) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported_async);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->async_modifiers_property,
+ blob->base.id);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index bdb51c8f262e..d828502268b8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -707,7 +707,7 @@ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- return drm_gem_vmap(obj, map);
+ return drm_gem_vmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -723,7 +723,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
@@ -804,7 +804,6 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
- .cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
@@ -998,7 +997,7 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
- return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
+ return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev));
}
EXPORT_SYMBOL(drm_gem_prime_import);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 7ba16323e7c2..6b3541159c0f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -958,15 +958,16 @@ static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res)
* cleaned up when the DRM device goes away.
*
* See drm_kms_helper_poll_init() for more information.
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise.
*/
-int drmm_kms_helper_poll_init(struct drm_device *dev)
+void drmm_kms_helper_poll_init(struct drm_device *dev)
{
+ int ret;
+
drm_kms_helper_poll_init(dev);
- return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ ret = drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ if (ret)
+ drm_warn(dev, "Connector status will not be updated, error %d\n", ret);
}
EXPORT_SYMBOL(drmm_kms_helper_poll_init);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4f2ab8a7b50f..636cd83ca29e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -741,7 +741,7 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
- int fd, int handle)
+ int fd, int handle, u64 point)
{
struct dma_fence *fence = sync_file_get_fence(fd);
struct drm_syncobj *syncobj;
@@ -755,14 +755,24 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, fence);
+ if (point) {
+ struct dma_fence_chain *chain = dma_fence_chain_alloc();
+
+ if (!chain)
+ return -ENOMEM;
+
+ drm_syncobj_add_point(syncobj, chain, fence, point);
+ } else {
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
+
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
}
static int drm_syncobj_export_sync_file(struct drm_file *file_private,
- int handle, int *p_fd)
+ int handle, u64 point, int *p_fd)
{
int ret;
struct dma_fence *fence;
@@ -772,7 +782,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, point, 0, &fence);
if (ret)
goto err_put_fd;
@@ -869,6 +879,9 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -876,13 +889,18 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
return drm_syncobj_export_sync_file(file_private, args->handle,
- &args->fd);
+ point, &args->fd);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_handle_to_fd(file_private, args->handle,
&args->fd);
@@ -893,6 +911,9 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -900,14 +921,20 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
return drm_syncobj_import_sync_file_fence(file_private,
args->fd,
- args->handle);
+ args->handle,
+ point);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 42e57d142554..917ad527c961 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -39,7 +39,7 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
@@ -51,7 +51,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
@@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
- dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
+ dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
lockdep_assert_held(&etnaviv_obj->lock);
- ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+ ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map);
if (ret)
return NULL;
return map.vaddr;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index b34ec6728337..29a8366513fa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -379,11 +379,11 @@ static int exynos_mic_probe(struct platform_device *pdev)
struct resource res;
int ret, i;
- mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
- if (!mic) {
+ mic = devm_drm_bridge_alloc(dev, struct exynos_mic, bridge, &mic_bridge_funcs);
+ if (IS_ERR(mic)) {
DRM_DEV_ERROR(dev,
"mic: Failed to allocate memory for MIC object\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(mic);
goto err;
}
@@ -421,7 +421,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
- mic->bridge.funcs = &mic_bridge_funcs;
mic->bridge.of_node = dev->of_node;
drm_bridge_add(&mic->bridge);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 03b076db9381..3bbfc1b56a65 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -260,7 +260,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev;
struct drm_device *drm;
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *base;
struct clk *pix_clk_in;
char pix_clk_name[32];
@@ -278,8 +277,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
return -ENODEV;
fsl_dev->soc = id->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
return ret;
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 4d78b33eaa82..e6753282e70e 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -730,44 +730,3 @@ out:
return ret;
}
-
-int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn)
-{
- int ret;
- struct psb_mmu_pt *pt;
- uint32_t tmp;
- spinlock_t *lock = &pd->driver->lock;
-
- down_read(&pd->driver->sem);
- pt = psb_mmu_pt_map_lock(pd, virtual);
- if (!pt) {
- uint32_t *v;
-
- spin_lock(lock);
- v = kmap_atomic(pd->p);
- tmp = v[psb_mmu_pd_index(virtual)];
- kunmap_atomic(v);
- spin_unlock(lock);
-
- if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
- !(pd->invalid_pte & PSB_PTE_VALID)) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *pfn = pd->invalid_pte >> PAGE_SHIFT;
- goto out;
- }
- tmp = pt->v[psb_mmu_pt_index(virtual)];
- if (!(tmp & PSB_PTE_VALID)) {
- ret = -EINVAL;
- } else {
- ret = 0;
- *pfn = tmp >> PAGE_SHIFT;
- }
- psb_mmu_pt_unmap_unlock(pt);
-out:
- up_read(&pd->driver->sem);
- return ret;
-}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
index d4b5720ef08e..e6d39703718c 100644
--- a/drivers/gpu/drm/gma500/mmu.h
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -71,8 +71,6 @@ extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
-extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn);
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index de8ccfe9890f..ea9b41af0867 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -658,10 +658,3 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
};
-
-/* Not used yet */
-const struct gma_clock_funcs mrst_clock_funcs = {
- .clock = mrst_lvds_clock,
- .limit = mrst_limit,
- .pll_is_valid = gma_pll_is_valid,
-};
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 9dc9dcd1b09f..979ea8ecf0d5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -182,7 +182,6 @@ struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg,
void gma_i2c_destroy(struct gma_i2c_chan *chan);
int psb_intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter);
-extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 8be0ec340de5..45b10f30a2a9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -12,37 +12,6 @@
#include "psb_intel_drv.h"
/**
- * psb_intel_ddc_probe
- * @adapter: Associated I2C adaptor
- */
-bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
-{
- u8 out_buf[] = { 0x0, 0x0 };
- u8 buf[2];
- int ret;
- struct i2c_msg msgs[] = {
- {
- .addr = 0x50,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = 0x50,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- ret = i2c_transfer(adapter, msgs, 2);
- if (ret == 2)
- return true;
-
- return false;
-}
-
-/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: Associated I2C adaptor
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index cb405771d6e2..5385a2126e45 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -309,21 +309,6 @@ out:
return ret;
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
-{
- struct gud_device *gdrm = to_gud_device(drm);
-
- if (!gdrm->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
-}
-
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
@@ -376,7 +361,6 @@ static const struct drm_driver gud_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gud_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = "gud",
@@ -434,6 +418,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
size_t max_buffer_size = 0;
struct gud_device *gdrm;
struct drm_device *drm;
+ struct device *dma_dev;
u8 *formats_dev;
u32 *formats;
int ret, i;
@@ -609,17 +594,19 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_set_intfdata(intf, gdrm);
- gdrm->dmadev = usb_intf_get_dma_device(intf);
- if (!gdrm->dmadev)
- dev_warn(dev, "buffer sharing not supported");
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ dev_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
ret = drm_dev_register(drm, 0);
- if (ret) {
- put_device(gdrm->dmadev);
+ if (ret)
return ret;
- }
drm_kms_helper_poll_init(drm);
@@ -638,8 +625,6 @@ static void gud_disconnect(struct usb_interface *interface)
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
- put_device(gdrm->dmadev);
- gdrm->dmadev = NULL;
}
static int gud_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index 0d148a6f27aa..d6fb25388722 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -16,7 +16,6 @@
struct gud_device {
struct drm_device drm;
struct drm_simple_display_pipe pipe;
- struct device *dmadev;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 77cfcf37ddd2..feff73cc0005 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -261,7 +261,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
else if (ctx.sgr.bytes != len)
ret = -EIO;
- destroy_timer_on_stack(&ctx.timer);
+ timer_destroy_on_stack(&ctx.timer);
return ret;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 95a4ed599d98..1f65c683282f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \
- dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o
+ dp/dp_aux.o dp/dp_link.o dp/dp_hw.o dp/dp_serdes.o hibmc_drm_dp.o \
+ hibmc_drm_debugfs.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
index 0a903cce1fa9..8732cd1d8cb6 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
@@ -8,6 +8,7 @@
#include <drm/drm_print.h>
#include "dp_comm.h"
#include "dp_reg.h"
+#include "dp_hw.h"
#define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4)
#define HIBMC_AUX_CMD_ADDR GENMASK(27, 8)
@@ -124,7 +125,8 @@ static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_ms
/* ret >= 0 ,ret is size; ret < 0, ret is err code */
static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
- struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux);
+ struct hibmc_dp *dp_priv = container_of(aux, struct hibmc_dp, aux);
+ struct hibmc_dp_dev *dp = dp_priv->dp_dev;
u32 aux_cmd;
int ret;
u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */
@@ -151,14 +153,16 @@ static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *
return hibmc_dp_aux_parse_xfer(dp, msg);
}
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp)
+void hibmc_dp_aux_init(struct hibmc_dp *dp)
{
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
HIBMC_DP_MIN_PULSE_NUM);
dp->aux.transfer = hibmc_dp_aux_xfer;
- dp->aux.is_remote = 0;
+ dp->aux.name = "HIBMC DRM dp aux";
+ dp->aux.drm_dev = dp->drm_dev;
drm_dp_aux_init(&dp->aux);
+ dp->dp_dev->aux = &dp->aux;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
index 2c52a4476c4d..4add05c7f161 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
@@ -13,6 +13,8 @@
#include <linux/io.h>
#include <drm/display/drm_dp_helper.h>
+#include "dp_hw.h"
+
#define HIBMC_DP_LANE_NUM_MAX 2
struct hibmc_link_status {
@@ -32,12 +34,13 @@ struct hibmc_dp_link {
};
struct hibmc_dp_dev {
- struct drm_dp_aux aux;
+ struct drm_dp_aux *aux;
struct drm_device *dev;
void __iomem *base;
struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */
struct hibmc_dp_link link;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ void __iomem *serdes_base;
};
#define dp_field_modify(reg_value, mask, val) \
@@ -57,7 +60,10 @@ struct hibmc_dp_dev {
mutex_unlock(&_dp->lock); \
} while (0)
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp);
+void hibmc_dp_aux_init(struct hibmc_dp *dp);
int hibmc_dp_link_training(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX]);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
index 74dd9956144e..08f9e1caf7fc 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
@@ -15,5 +15,7 @@
#define HIBMC_DP_CLK_EN 0x7
#define HIBMC_DP_SYNC_EN_MASK 0x3
#define HIBMC_DP_LINK_RATE_CAL 27
+#define HIBMC_DP_SYNC_DELAY(lanes) ((lanes) == 0x2 ? 86 : 46)
+#define HIBMC_DP_INT_ENABLE 0xc
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
index a8d543881c09..8f0daec7d174 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
@@ -72,6 +72,9 @@ static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *m
HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size);
hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION,
+ HIBMC_DP_SYNC_DELAY(dp->link.cap.lanes));
}
static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
@@ -151,6 +154,7 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
{
struct drm_device *drm_dev = dp->drm_dev;
struct hibmc_dp_dev *dp_dev;
+ int ret;
dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL);
if (!dp_dev)
@@ -163,10 +167,14 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
dp_dev->dev = drm_dev;
dp_dev->base = dp->mmio + HIBMC_DP_OFFSET;
- hibmc_dp_aux_init(dp_dev);
+ hibmc_dp_aux_init(dp);
+
+ ret = hibmc_dp_serdes_init(dp_dev);
+ if (ret)
+ return ret;
dp_dev->link.cap.lanes = 0x2;
- dp_dev->link.cap.link_rate = DP_LINK_BW_2_7;
+ dp_dev->link.cap.link_rate = DP_LINK_BW_8_1;
/* hdcp data */
writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
@@ -181,6 +189,36 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
return 0;
}
+void hibmc_dp_enable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+}
+
+void hibmc_dp_disable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+}
+
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM, 0x9);
+ writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL);
+ writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL);
+}
+
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable)
{
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
@@ -218,3 +256,52 @@ int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode)
return 0;
}
+
+void hibmc_dp_reset_link(struct hibmc_dp *dp)
+{
+ dp->dp_dev->link.status.clock_recovered = false;
+ dp->dp_dev->link.status.channel_equalized = false;
+}
+
+static const struct hibmc_dp_color_raw g_rgb_raw[] = {
+ {CBAR_COLOR_BAR, 0x000, 0x000, 0x000},
+ {CBAR_WHITE, 0xfff, 0xfff, 0xfff},
+ {CBAR_RED, 0xfff, 0x000, 0x000},
+ {CBAR_ORANGE, 0xfff, 0x800, 0x000},
+ {CBAR_YELLOW, 0xfff, 0xfff, 0x000},
+ {CBAR_GREEN, 0x000, 0xfff, 0x000},
+ {CBAR_CYAN, 0x000, 0x800, 0x800},
+ {CBAR_BLUE, 0x000, 0x000, 0xfff},
+ {CBAR_PURPLE, 0x800, 0x000, 0x800},
+ {CBAR_BLACK, 0x000, 0x000, 0x000},
+};
+
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+ struct hibmc_dp_color_raw raw_data;
+
+ if (cfg->enable) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(9),
+ cfg->self_timing);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(8, 1),
+ cfg->dynamic_rate);
+ if (cfg->pattern == CBAR_COLOR_BAR) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 0);
+ } else {
+ raw_data = g_rgb_raw[cfg->pattern];
+ drm_dbg_dp(dp->drm_dev, "r:%x g:%x b:%x\n", raw_data.r_value,
+ raw_data.g_value, raw_data.b_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 1);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(23, 12),
+ raw_data.r_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(23, 12),
+ raw_data.g_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(11, 0),
+ raw_data.b_value);
+ }
+ }
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(0), cfg->enable);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
index 4dc13b3d9875..665f5b166dfb 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
@@ -10,19 +10,55 @@
#include <drm/drm_encoder.h>
#include <drm/drm_connector.h>
#include <drm/drm_print.h>
+#include <drm/display/drm_dp_helper.h>
struct hibmc_dp_dev;
+enum hibmc_dp_cbar_pattern {
+ CBAR_COLOR_BAR,
+ CBAR_WHITE,
+ CBAR_RED,
+ CBAR_ORANGE,
+ CBAR_YELLOW,
+ CBAR_GREEN,
+ CBAR_CYAN,
+ CBAR_BLUE,
+ CBAR_PURPLE,
+ CBAR_BLACK,
+};
+
+struct hibmc_dp_color_raw {
+ enum hibmc_dp_cbar_pattern pattern;
+ u32 r_value;
+ u32 g_value;
+ u32 b_value;
+};
+
+struct hibmc_dp_cbar_cfg {
+ u8 enable;
+ u8 self_timing;
+ u8 dynamic_rate; /* 0:static, 1-255(frame):dynamic */
+ enum hibmc_dp_cbar_pattern pattern;
+};
+
struct hibmc_dp {
struct hibmc_dp_dev *dp_dev;
struct drm_device *drm_dev;
struct drm_encoder encoder;
struct drm_connector connector;
void __iomem *mmio;
+ struct drm_dp_aux aux;
+ struct hibmc_dp_cbar_cfg cfg;
+ u32 irq_status;
};
int hibmc_dp_hw_init(struct hibmc_dp *dp);
int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode);
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable);
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg);
+void hibmc_dp_reset_link(struct hibmc_dp *dp);
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp);
+void hibmc_dp_enable_int(struct hibmc_dp *dp);
+void hibmc_dp_disable_int(struct hibmc_dp *dp);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
index f6355c16cc0a..74f7832ea53e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -9,6 +9,22 @@
#define HIBMC_EQ_MAX_RETRY 5
+static inline int hibmc_dp_get_serdes_rate_cfg(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.link_rate) {
+ case DP_LINK_BW_1_62:
+ return DP_SERDES_BW_1_62;
+ case DP_LINK_BW_2_7:
+ return DP_SERDES_BW_2_7;
+ case DP_LINK_BW_5_4:
+ return DP_SERDES_BW_5_4;
+ case DP_LINK_BW_8_1:
+ return DP_SERDES_BW_8_1;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
{
u8 buf[2];
@@ -26,7 +42,7 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set rate and lane count */
buf[0] = dp->link.cap.link_rate;
buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes;
- ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
@@ -35,17 +51,13 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set 8b/10b and downspread */
buf[0] = DP_SPREAD_AMP_0_5;
buf[1] = DP_SET_ANSI_8B10B;
- ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
}
- ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
- if (ret)
- drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
-
- return ret;
+ return 0;
}
static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
@@ -84,7 +96,7 @@ static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -108,9 +120,13 @@ static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp)
return ret;
for (i = 0; i < dp->link.cap.lanes; i++)
- train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -137,21 +153,29 @@ static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp,
return false;
}
-static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
+static int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
{
+ int ret;
+
switch (dp->link.cap.link_rate) {
case DP_LINK_BW_2_7:
dp->link.cap.link_rate = DP_LINK_BW_1_62;
- return 0;
+ break;
case DP_LINK_BW_5_4:
dp->link.cap.link_rate = DP_LINK_BW_2_7;
- return 0;
+ break;
case DP_LINK_BW_8_1:
dp->link.cap.link_rate = DP_LINK_BW_5_4;
- return 0;
+ break;
default:
return -EINVAL;
}
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ return hibmc_dp_serdes_rate_switch(ret, dp);
}
static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
@@ -159,6 +183,7 @@ static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
switch (dp->link.cap.lanes) {
case 0x2:
dp->link.cap.lanes--;
+ drm_dbg_dp(dp->dev, "dp link training reduce to 1 lane\n");
break;
case 0x1:
drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n");
@@ -185,10 +210,10 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
voltage_tries = 1;
for (cr_tries = 0; cr_tries < 80; cr_tries++) {
- drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_clock_recovery_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "Get lane status failed\n");
return ret;
}
@@ -206,7 +231,12 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
}
level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -233,10 +263,10 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
return ret;
for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) {
- drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "get lane status failed\n");
break;
}
@@ -255,7 +285,12 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
}
hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET,
dp->link.train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -295,6 +330,21 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
struct hibmc_dp_link *link = &dp->link;
int ret;
+ ret = drm_dp_read_dpcd_caps(dp->aux, dp->dpcd);
+ if (ret)
+ drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
+
+ dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
+ dp->link.cap.lanes = 0x2;
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ ret = hibmc_dp_serdes_rate_switch(ret, dp);
+ if (ret)
+ return ret;
+
while (true) {
ret = hibmc_dp_link_training_cr_pre(dp);
if (ret)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
index 4a515c726d52..394b1e933c3a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
@@ -5,72 +5,128 @@
#define DP_REG_H
#define HIBMC_DP_AUX_CMD_ADDR 0x50
+
#define HIBMC_DP_AUX_WR_DATA0 0x54
#define HIBMC_DP_AUX_WR_DATA1 0x58
#define HIBMC_DP_AUX_WR_DATA2 0x5c
#define HIBMC_DP_AUX_WR_DATA3 0x60
#define HIBMC_DP_AUX_RD_DATA0 0x64
+
#define HIBMC_DP_AUX_REQ 0x74
+#define HIBMC_DP_CFG_AUX_REQ BIT(0)
+#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
+#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
+#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
+
#define HIBMC_DP_AUX_STATUS 0x78
+#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
+#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
+#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
+#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
+
#define HIBMC_DP_PHYIF_CTRL0 0xa0
+#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
+#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
+#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
+
#define HIBMC_DP_VIDEO_CTRL 0x100
+#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
+#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
+#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
+#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
+#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
+
#define HIBMC_DP_VIDEO_CONFIG0 0x104
+#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG1 0x108
+#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG2 0x10c
+#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG3 0x110
+#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_VIDEO_PACKET 0x114
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
+#define HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION GENMASK(31, 20)
+
#define HIBMC_DP_VIDEO_MSA0 0x118
+#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_MSA1 0x11c
#define HIBMC_DP_VIDEO_MSA2 0x120
+
#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
+#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+
+#define HIBMC_DP_COLOR_BAR_CTRL 0x260
+#define HIBMC_DP_COLOR_BAR_CTRL1 0x264
+
#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
+#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
+#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
+#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_HDCP_CFG 0x600
+
#define HIBMC_DP_DPTX_RST_CTRL 0x700
+#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
+
#define HIBMC_DP_DPTX_CLK_CTRL 0x704
+
#define HIBMC_DP_DPTX_GCTL0 0x708
+#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
+
#define HIBMC_DP_INTR_ENABLE 0x720
#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
+
#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
+#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
+
#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
-#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
-#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
-#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
-#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
-#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
-#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
-#define HIBMC_DP_CFG_AUX_REQ BIT(0)
-#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
-#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
-#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
-#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
-#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
-#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
-#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
-#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
-#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
-#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
-#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
-#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
-#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+#define HIBMC_DP_INTSTAT 0x1e0724
+#define HIBMC_DP_INTCLR 0x1e0728
+
+/* dp serdes reg */
+#define HIBMC_DP_HOST_OFFSET 0x10000
+#define HIBMC_DP_LANE0_RATE_OFFSET 0x4
+#define HIBMC_DP_LANE1_RATE_OFFSET 0xc
+#define HIBMC_DP_LANE_STATUS_OFFSET 0x10
+#define HIBMC_DP_PMA_LANE0_OFFSET 0x18
+#define HIBMC_DP_PMA_LANE1_OFFSET 0x1c
+#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
+#define HIBMC_DP_PMA_TXDEEMPH GENMASK(18, 1)
+#define DP_SERDES_DONE 0x3
+
+/* dp serdes TX-Deempth Configuration */
+#define DP_SERDES_VOL0_PRE0 0x280
+#define DP_SERDES_VOL0_PRE1 0x2300
+#define DP_SERDES_VOL0_PRE2 0x53c0
+#define DP_SERDES_VOL0_PRE3 0x8400
+#define DP_SERDES_VOL1_PRE0 0x380
+#define DP_SERDES_VOL1_PRE1 0x3440
+#define DP_SERDES_VOL1_PRE2 0x6480
+#define DP_SERDES_VOL2_PRE0 0x4c1
+#define DP_SERDES_VOL2_PRE1 0x4500
+#define DP_SERDES_VOL3_PRE0 0x600
+#define DP_SERDES_BW_8_1 0x3
+#define DP_SERDES_BW_5_4 0x2
+#define DP_SERDES_BW_2_7 0x1
+#define DP_SERDES_BW_1_62 0x0
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
new file mode 100644
index 000000000000..676059d4c1e6
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_config.h"
+#include "dp_reg.h"
+
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX])
+{
+ static const u32 serdes_tx_cfg[4][4] = { {DP_SERDES_VOL0_PRE0, DP_SERDES_VOL0_PRE1,
+ DP_SERDES_VOL0_PRE2, DP_SERDES_VOL0_PRE3},
+ {DP_SERDES_VOL1_PRE0, DP_SERDES_VOL1_PRE1,
+ DP_SERDES_VOL1_PRE2}, {DP_SERDES_VOL2_PRE0,
+ DP_SERDES_VOL2_PRE1}, {DP_SERDES_VOL3_PRE0}};
+ int cfg[2];
+ int i;
+
+ for (i = 0; i < HIBMC_DP_LANE_NUM_MAX; i++) {
+ cfg[i] = serdes_tx_cfg[FIELD_GET(DP_TRAIN_VOLTAGE_SWING_MASK, train_set[i])]
+ [FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, train_set[i])];
+ if (!cfg[i])
+ return -EINVAL;
+
+ /* lane1 offset is 4 */
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, cfg[i]),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET + i * 4);
+ }
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes cfg failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp)
+{
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE0_RATE_OFFSET);
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE1_RATE_OFFSET);
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes rate switching failed\n");
+ return -EAGAIN;
+ }
+
+ if (rate < DP_SERDES_BW_8_1)
+ drm_dbg_dp(dp->dev, "reducing serdes rate to :%d\n",
+ rate ? rate * HIBMC_DP_LINK_RATE_CAL * 10 : 162);
+
+ return 0;
+}
+
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp)
+{
+ dp->serdes_base = dp->base + HIBMC_DP_HOST_OFFSET;
+
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET);
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE1_OFFSET);
+
+ return hibmc_dp_serdes_rate_switch(DP_SERDES_BW_8_1, dp);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
new file mode 100644
index 000000000000..f585387c3a49
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
+
+#include "hibmc_drm_drv.h"
+
+#define MAX_BUF_SIZE 12
+
+static ssize_t hibmc_control_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hibmc_drm_private *priv = file_inode(file)->i_private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int ret, idx;
+ u8 buf[MAX_BUF_SIZE];
+
+ if (count >= MAX_BUF_SIZE)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ /* Only 4 parameters is allowed, the ranger are as follow:
+ * [0] enable/disable colorbar feature
+ 0: enable colorbar, 1: disable colorbar
+ * [1] the timing source of colorbar displaying
+ 0: timing follows XDP, 1: internal self timing
+ * [2] the movment of colorbar displaying
+ 0: static colorbar image,
+ * 1~255: right shifting a type of color per (1~255)frames
+ * [3] the color type of colorbar displaying
+ 0~9: color bar, white, red, orange,
+ * yellow, green, cyan, bule, pupper, black
+ */
+ if (sscanf(buf, "%hhu %hhu %hhu %u", &cfg->enable, &cfg->self_timing,
+ &cfg->dynamic_rate, &cfg->pattern) != 4) {
+ return -EINVAL;
+ }
+
+ if (cfg->pattern > 9 || cfg->enable > 1 || cfg->self_timing > 1)
+ return -EINVAL;
+
+ ret = drm_dev_enter(&priv->dev, &idx);
+ if (!ret)
+ return -ENODEV;
+
+ hibmc_dp_set_cbar(&priv->dp, cfg);
+
+ drm_dev_exit(idx);
+
+ return count;
+}
+
+static int hibmc_dp_dbgfs_show(struct seq_file *m, void *arg)
+{
+ struct hibmc_drm_private *priv = m->private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int idx;
+
+ if (!drm_dev_enter(&priv->dev, &idx))
+ return -ENODEV;
+
+ seq_printf(m, "hibmc dp colorbar cfg: %u %u %u %u\n", cfg->enable, cfg->self_timing,
+ cfg->dynamic_rate, cfg->pattern);
+
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static int hibmc_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hibmc_dp_dbgfs_show, inode->i_private);
+}
+
+static const struct file_operations hibmc_dbg_fops = {
+ .owner = THIS_MODULE,
+ .write = hibmc_control_write,
+ .read = seq_read,
+ .open = hibmc_open,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root)
+{
+ struct drm_device *dev = connector->dev;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+
+ /* create the file in drm directory, so we don't need to remove manually */
+ debugfs_create_file("colorbar-cfg", 0200,
+ root, priv, &hibmc_dbg_fops);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
index 603d6b198a54..d06832e62e96 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
@@ -13,27 +13,64 @@
#include "hibmc_drm_drv.h"
#include "dp/dp_hw.h"
+#define DP_MASKED_SINK_HPD_PLUG_INT BIT(2)
+
static int hibmc_dp_connector_get_modes(struct drm_connector *connector)
{
+ const struct drm_edid *drm_edid;
int count;
- count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width,
- connector->dev->mode_config.max_height);
- drm_set_preferred_mode(connector, 1024, 768); // temporary implementation
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ count = drm_edid_connector_add_modes(connector);
+
+ drm_edid_free(drm_edid);
return count;
}
+static int hibmc_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ mdelay(200);
+
+ return drm_connector_helper_detect_from_ddc(connector, ctx, force);
+}
+
static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = {
.get_modes = hibmc_dp_connector_get_modes,
+ .detect_ctx = hibmc_dp_detect,
};
+static int hibmc_dp_late_register(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ hibmc_dp_enable_int(dp);
+
+ return drm_dp_aux_register(&dp->aux);
+}
+
+static void hibmc_dp_early_unregister(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ drm_dp_aux_unregister(&dp->aux);
+
+ hibmc_dp_disable_int(dp);
+}
+
static const struct drm_connector_funcs hibmc_dp_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = hibmc_dp_late_register,
+ .early_unregister = hibmc_dp_early_unregister,
+ .debugfs_init = hibmc_debugfs_init,
};
static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode)
@@ -74,6 +111,31 @@ static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = {
.atomic_disable = hibmc_dp_encoder_disable,
};
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ if (priv->dp.irq_status & DP_MASKED_SINK_HPD_PLUG_INT) {
+ drm_dbg_dp(&priv->dev, "HPD IN isr occur!\n");
+ hibmc_dp_hpd_cfg(&priv->dp);
+ } else {
+ drm_dbg_dp(&priv->dev, "HPD OUT isr occur!\n");
+ hibmc_dp_reset_link(&priv->dp);
+ }
+
+ if (dev->registered)
+ drm_connector_helper_hpd_irq_event(&priv->dp.connector);
+
+ drm_dev_exit(idx);
+
+ return IRQ_HANDLED;
+}
+
int hibmc_dp_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
@@ -103,8 +165,8 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs);
- ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_init_with_ddc(dev, connector, &hibmc_dp_conn_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort, &dp->aux.ddc);
if (ret) {
drm_err(dev, "init dp connector failed: %d\n", ret);
return ret;
@@ -114,5 +176,7 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index e6de6d5edf6b..768b97f9e74a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -28,12 +28,12 @@
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
-#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00
-#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff
+#include "dp/dp_reg.h"
DEFINE_DRM_GEM_FOPS(hibmc_fops);
+static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" };
+
static irqreturn_t hibmc_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
@@ -51,6 +51,22 @@ static irqreturn_t hibmc_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t hibmc_dp_interrupt(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ u32 status;
+
+ status = readl(priv->mmio + HIBMC_DP_INTSTAT);
+ if (status) {
+ priv->dp.irq_status = status;
+ writel(status, priv->mmio + HIBMC_DP_INTCLR);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_HANDLED;
+}
+
static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -121,9 +137,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
- /* if DP existed, init DP */
- if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) &
- HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) {
+ /*
+ * If the serdes reg is readable and is not equal to 0,
+ * DP block exists and initializes it.
+ */
+ ret = readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL);
+ if (ret) {
ret = hibmc_dp_init(priv);
if (ret)
drm_err(dev, "failed to init dp: %d\n", ret);
@@ -250,15 +269,48 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
return 0;
}
-static int hibmc_unload(struct drm_device *dev)
+static void hibmc_unload(struct drm_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
drm_atomic_helper_shutdown(dev);
+}
- free_irq(pdev->irq, dev);
+static int hibmc_msi_init(struct drm_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ char name[32] = {0};
+ int valid_irq_num;
+ int irq;
+ int ret;
- pci_disable_msi(to_pci_dev(dev->dev));
+ ret = pci_alloc_irq_vectors(pdev, HIBMC_MIN_VECTORS,
+ HIBMC_MAX_VECTORS, PCI_IRQ_MSI);
+ if (ret < 0) {
+ drm_err(dev, "enabling MSI failed: %d\n", ret);
+ return ret;
+ }
+
+ valid_irq_num = ret;
+
+ for (int i = 0; i < valid_irq_num; i++) {
+ snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
+ dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
+
+ irq = pci_irq_vector(pdev, i);
+
+ if (i)
+ /* PCI devices require shared interrupts. */
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ hibmc_dp_interrupt,
+ hibmc_dp_hpd_isr,
+ IRQF_SHARED, name, dev);
+ else
+ ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
+ IRQF_SHARED, name, dev);
+ if (ret) {
+ drm_err(dev, "install irq failed: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
@@ -290,15 +342,10 @@ static int hibmc_load(struct drm_device *dev)
goto err;
}
- ret = pci_enable_msi(pdev);
+ ret = hibmc_msi_init(dev);
if (ret) {
- drm_warn(dev, "enabling MSI failed: %d\n", ret);
- } else {
- /* PCI devices require shared interrupts. */
- ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED,
- dev->driver->name, dev);
- if (ret)
- drm_warn(dev, "install irq failed: %d\n", ret);
+ drm_err(dev, "hibmc msi init failed, ret:%d\n", ret);
+ goto err;
}
/* reset all the states of crtc/plane/encoder/connector */
@@ -374,7 +421,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
static void hibmc_pci_shutdown(struct pci_dev *pdev)
{
- drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+ hibmc_pci_remove(pdev);
}
static const struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index d982f1e4b958..274feabe7df0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -22,6 +22,9 @@
#include "dp/dp_hw.h"
+#define HIBMC_MIN_VECTORS 1
+#define HIBMC_MAX_VECTORS 2
+
struct hibmc_vdac {
struct drm_device *dev;
struct drm_encoder encoder;
@@ -47,6 +50,11 @@ static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
return container_of(connector, struct hibmc_vdac, connector);
}
+static inline struct hibmc_dp *to_hibmc_dp(struct drm_connector *connector)
+{
+ return container_of(connector, struct hibmc_dp, connector);
+}
+
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
{
return container_of(dev, struct hibmc_drm_private, dev);
@@ -64,4 +72,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
int hibmc_dp_init(struct hibmc_drm_private *priv);
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root);
+
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg);
+
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 05e19ea4c9f9..e8a527ede854 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -60,6 +60,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
+ .detect_ctx = drm_connector_helper_detect_from_ddc,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
@@ -127,5 +128,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 2eea9fb0e76b..e80debdc4176 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -825,7 +825,6 @@ static const struct component_ops dsi_ops = {
static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
{
struct dsi_hw_ctx *ctx = dsi->ctx;
- struct resource *res;
ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ctx->pclk)) {
@@ -833,8 +832,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
return PTR_ERR(ctx->pclk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap dsi io region\n");
return PTR_ERR(ctx->base);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 2eb49177ac42..45c4eb008ad5 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -844,7 +844,6 @@ static struct drm_plane_funcs ade_plane_funcs = {
static void *ade_hw_ctx_alloc(struct platform_device *pdev,
struct drm_crtc *crtc)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct ade_hw_ctx *ctx = NULL;
@@ -856,8 +855,7 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
return ERR_PTR(-ENOMEM);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap ade io base\n");
return ERR_PTR(-EIO);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ed05b131ed3a..e153686256c9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -52,7 +52,6 @@ i915-y += \
i915-y += \
soc/intel_dram.o \
soc/intel_gmch.o \
- soc/intel_pch.o \
soc/intel_rom.o
# core library code
@@ -247,6 +246,7 @@ i915-y += \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
display/intel_display_reset.o \
+ display/intel_display_rpm.o \
display/intel_display_rps.o \
display/intel_display_snapshot.o \
display/intel_display_wa.o \
@@ -281,6 +281,7 @@ i915-y += \
display/intel_modeset_setup.o \
display/intel_modeset_verify.o \
display/intel_overlay.o \
+ display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
display/intel_plane_initial.o \
@@ -408,7 +409,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
#
# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
ifdef CONFIG_DRM_I915_WERROR
- cmd_checkdoc = $(srctree)/scripts/kernel-doc -none -Werror $<
+ cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none -Werror $<
endif
# header test
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 206818f9ad49..f10c0fb8d2c8 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 10ab3cc73e58..49f02aca818b 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index d9c3152d4338..0713b2709412 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,6 +29,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 92d32d6b5bce..80b71bd6a837 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,6 +26,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index b42c717085f3..017b617a8069 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 280699438526..ed560e3438db 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 55b9e9bfcc4d..e0a98e6fd6d1 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -7,9 +7,11 @@
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -28,7 +30,6 @@
#include "intel_hotplug.h"
#include "intel_pch_display.h"
#include "intel_pps.h"
-#include "vlv_sideband.h"
static const struct dpll g4x_dpll[] = {
{ .dot = 162000, .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8, },
@@ -60,14 +61,13 @@ static void g4x_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dpll *divisor = NULL;
int i, count = 0;
if (display->platform.g4x) {
divisor = g4x_dpll;
count = ARRAY_SIZE(g4x_dpll);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
} else if (display->platform.cherryview) {
@@ -93,7 +93,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
@@ -141,7 +140,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(display) && port != PORT_A) {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
intel_de_rmw(display, TRANS_DP_CTL(crtc->pipe),
@@ -183,7 +182,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct intel_display *display, bool state)
{
- bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
+ bool cur_state = intel_de_read(display, DP_A) & EDP_PLL_ENABLE;
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -205,12 +204,12 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n",
pipe_config->port_clock);
- intel_dp->DP &= ~DP_PLL_FREQ_MASK;
+ intel_dp->DP &= ~EDP_PLL_FREQ_MASK;
if (pipe_config->port_clock == 162000)
- intel_dp->DP |= DP_PLL_FREQ_162MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_162MHZ;
else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_270MHZ;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -225,7 +224,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
if (display->platform.ironlake)
intel_wait_for_vblank_if_active(display, !crtc->pipe);
- intel_dp->DP |= DP_PLL_ENABLE;
+ intel_dp->DP |= EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -243,7 +242,7 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "disabling eDP PLL\n");
- intel_dp->DP &= ~DP_PLL_ENABLE;
+ intel_dp->DP &= ~EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -277,7 +276,6 @@ bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
u32 val;
@@ -287,13 +285,13 @@ bool g4x_dp_port_enabled(struct intel_display *display,
/* asserts want to know the pipe even if the port is disabled */
if (display->platform.ivybridge && port == PORT_A)
- *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
- else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_IVB, val);
+ else if (HAS_PCH_CPT(display) && port != PORT_A)
ret &= cpt_dp_port_selected(display, port, pipe);
else if (display->platform.cherryview)
- *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_CHV, val);
else
- *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK, val);
return ret;
}
@@ -338,7 +336,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
enum port port = encoder->port;
@@ -353,7 +350,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ if (HAS_PCH_CPT(display) && port != PORT_A) {
u32 trans_dp = intel_de_read(display,
TRANS_DP_CTL(crtc->pipe));
@@ -389,13 +386,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (display->platform.g4x && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->lane_count =
- ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+ pipe_config->lane_count = REG_FIELD_GET(DP_PORT_WIDTH_MASK, tmp) + 1;
g4x_dp_get_m_n(pipe_config);
if (port == PORT_A) {
- if ((intel_de_read(display, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(display, DP_A) & EDP_PLL_FREQ_MASK) == EDP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -416,7 +412,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
@@ -429,7 +424,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
+ (HAS_PCH_CPT(display) && port != PORT_A)) {
intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
@@ -448,7 +443,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -519,7 +514,7 @@ static void intel_disable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
/*
* Make sure the panel is off before trying to change the mode.
@@ -581,16 +576,10 @@ static void chv_post_disable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
intel_dp_link_down(encoder, old_crtc_state);
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void
@@ -1223,10 +1212,10 @@ static int g4x_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int ret;
- if (HAS_PCH_SPLIT(i915) && encoder->port != PORT_A)
+ if (HAS_PCH_SPLIT(display) && encoder->port != PORT_A)
crtc_state->has_pch_encoder = true;
ret = intel_dp_compute_config(encoder, crtc_state, conn_state);
@@ -1279,7 +1268,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -1353,7 +1341,7 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+ (HAS_PCH_CPT(display) && port != PORT_A))
dig_port->dp.set_link_train = cpt_set_link_train;
else
dig_port->dp.set_link_train = g4x_set_link_train;
@@ -1370,7 +1358,7 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->set_signal_levels = g4x_set_signal_levels;
if (display->platform.valleyview || display->platform.cherryview ||
- (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
+ (HAS_PCH_SPLIT(display) && port != PORT_A)) {
dig_port->dp.preemph_max = intel_dp_preemph_max_3;
dig_port->dp.voltage_max = intel_dp_voltage_max_3;
} else {
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 3dc2c59a3df0..1d252432d729 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -5,8 +5,9 @@
* HDMI support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
*/
+#include <drm/drm_print.h>
+
#include "g4x_hdmi.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -22,13 +23,11 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_sdvo.h"
-#include "vlv_sideband.h"
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -37,7 +36,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
+ if (!HAS_PCH_SPLIT(display) && crtc_state->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -52,7 +51,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
if (crtc_state->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (display->platform.cherryview)
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
@@ -134,9 +133,8 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
@@ -155,7 +153,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 tmp, flags = 0;
int dotclock;
@@ -186,7 +183,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_AUDIO_ENABLE)
pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev_priv) &&
+ if (!HAS_PCH_SPLIT(display) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -383,7 +380,6 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
@@ -401,7 +397,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -539,15 +535,8 @@ static void chv_hdmi_post_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
@@ -682,7 +671,6 @@ static bool assert_hdmi_port_valid(struct intel_display *display, enum port port
bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -724,7 +712,7 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = g4x_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@@ -745,9 +733,9 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev_priv))
+ else if (HAS_PCH_IBX(display))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 674a0e5f0858..4307e2ed03d9 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -10,6 +10,7 @@
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_pcode.h"
@@ -344,10 +345,9 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "Enabled by kernel parameter: %s\n",
str_yes_no(display->params.enable_ips));
@@ -361,7 +361,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 013295f66d56..a2a6d52be0a5 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -7,9 +7,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
@@ -630,85 +631,85 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
static void
bdw_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
bdw_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_disable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
}
static bool i9xx_plane_can_async_flip(u64 modifier)
@@ -820,7 +821,7 @@ unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
- if (intel_plane_can_async_flip(plane, fb->modifier))
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return 256 * 1024;
/* FIXME undocumented so not sure what's actually needed */
@@ -844,7 +845,7 @@ static unsigned int g4x_primary_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
- if (intel_plane_can_async_flip(plane, fb->modifier))
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return 256 * 1024;
if (intel_scanout_needs_vtd_wa(display))
@@ -889,6 +890,7 @@ static const struct drm_plane_funcs i965_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i965_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs i8xx_plane_funcs = {
@@ -898,6 +900,7 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i8xx_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
struct intel_plane *
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 7c80e37c1c5f..77876ef735b7 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -9,6 +9,7 @@
#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
#include "intel_bo.h"
+#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_trace.h"
#include "intel_fb.h"
@@ -81,13 +82,14 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *i915)
+static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int i;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
const struct cxsr_latency *latency = &cxsr_latency_table[i];
- bool is_desktop = !IS_MOBILE(i915);
+ bool is_desktop = !display->platform.mobile;
if (is_desktop == latency->is_desktop &&
i915->is_ddr3 == latency->is_ddr3 &&
@@ -96,15 +98,16 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *
return latency;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
return NULL;
}
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
vlv_punit_get(dev_priv);
@@ -120,14 +123,15 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
vlv_punit_put(dev_priv);
}
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_pm5(struct intel_display *display, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
vlv_punit_get(dev_priv);
@@ -145,53 +149,52 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
#define FW_WM(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
- struct intel_display *display = &dev_priv->display;
bool was_enabled;
u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
- } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ if (display->platform.valleyview || display->platform.cherryview) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_de_write(display, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF_VLV);
+ } else if (display->platform.g4x || display->platform.i965gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.pineview) {
+ val = intel_de_read(display, DSPFW3(display));
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), val);
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW3(dev_priv));
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, DSPFW3(display), val);
+ intel_de_posting_read(display, DSPFW3(display));
+ } else if (display->platform.i945g || display->platform.i945gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_I915GM(dev_priv)) {
+ intel_de_write(display, FW_BLC_SELF, val);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.i915gm) {
/*
* FIXME can't find a bit like this for 915G, and
* yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, INSTPM, val);
- intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
+ intel_de_write(display, INSTPM, val);
+ intel_de_posting_read(display, INSTPM);
} else {
return false;
}
trace_intel_memory_cxsr(display, was_enabled, enable);
- drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ drm_dbg_kms(display->drm, "memory self-refresh is %s (was %s)\n",
str_enabled_disabled(enable),
str_enabled_disabled(was_enabled));
@@ -200,7 +203,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
/**
* intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
+ * @display: display device
* @enable: Allow vs. disallow CxSR
*
* Allow or disallow the system to enter a special CxSR
@@ -235,17 +238,17 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
* the hardware w.r.t. HPLL SR when writing to plane registers.
* Disallowing just CxSR is sufficient.
*/
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
bool ret;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- ret = _intel_set_memory_cxsr(dev_priv, enable);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.wm.vlv.cxsr = enable;
- else if (IS_G4X(dev_priv))
- dev_priv->display.wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(display, enable);
+ if (display->platform.valleyview || display->platform.cherryview)
+ display->wm.vlv.cxsr = enable;
+ else if (display->platform.g4x)
+ display->wm.g4x.cxsr = enable;
+ mutex_unlock(&display->wm.wm_mutex);
return ret;
}
@@ -271,8 +274,8 @@ static const int pessimal_latency_ns = 5000;
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
@@ -280,22 +283,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
+ dsparb2 = intel_de_read(display, DSPARB2);
+ dsparb3 = intel_de_read(display, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -310,26 +311,26 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
fifo_state->plane[PLANE_CURSOR] = 63;
}
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i9xx_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i830_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x1ff;
@@ -337,22 +338,22 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i845_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
@@ -537,7 +538,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
/**
* intel_calculate_wm - calculate watermark level
- * @i915: the device
+ * @display: display device
* @pixel_rate: pixel clock
* @wm: chip FIFO params
* @fifo_size: size of the FIFO buffer
@@ -555,7 +556,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
-static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
+static unsigned int intel_calculate_wm(struct intel_display *display,
int pixel_rate,
const struct intel_watermark_params *wm,
int fifo_size, int cpp,
@@ -573,10 +574,10 @@ static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
wm->guard_size;
- drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
+ drm_dbg_kms(display->drm, "FIFO entries required for mode: %d\n", entries);
wm_size = fifo_size - entries;
- drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
+ drm_dbg_kms(display->drm, "FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > wm->max_wm)
@@ -626,11 +627,11 @@ static bool intel_crtc_active(struct intel_crtc *crtc)
crtc->config->hw.adjusted_mode.crtc_clock;
}
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+static struct intel_crtc *single_enabled_crtc(struct intel_display *display)
{
struct intel_crtc *crtc, *enabled = NULL;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -641,21 +642,21 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
+static void pnv_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned int wm;
- latency = pnv_get_cxsr_latency(dev_priv);
+ latency = pnv_get_cxsr_latency(display);
if (!latency) {
- drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
- intel_set_memory_cxsr(dev_priv, false);
+ drm_dbg_kms(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ intel_set_memory_cxsr(display, false);
return;
}
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
@@ -663,47 +664,46 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
int cpp = fb->format->cpp[0];
/* Display SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ reg = intel_de_read(display, DSPFW1(display));
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
+ intel_de_write(display, DSPFW1(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_CURSOR_SR_MASK,
- FW_WM(wm, CURSOR_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_CURSOR_SR_MASK, FW_WM(wm, CURSOR_SR));
/* Display HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
/* cursor HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ reg = intel_de_read(display, DSPFW3(display));
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
+ intel_de_write(display, DSPFW3(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW3 register is %x\n", reg);
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
} else {
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
}
@@ -794,53 +794,51 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
return max(0, tlb_miss);
}
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_write_wm_values(struct intel_display *display,
const struct g4x_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
+
+ intel_de_posting_read(display, DSPFW1(display));
}
#define FW_WM_VLV(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_write_wm_values(struct intel_display *display,
const struct vlv_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ intel_de_write(display, VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
}
/*
@@ -848,72 +846,72 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
- intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(wm->sr.cursor, CURSOR_SR));
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPHOWM, 0);
+ intel_de_write(display, DSPHOWM1, 0);
+ intel_de_write(display, DSPFW4, 0);
+ intel_de_write(display, DSPFW5, 0);
+ intel_de_write(display, DSPFW6, 0);
+
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (display->platform.cherryview) {
+ intel_de_write(display, DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ intel_de_write(display, DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_posting_read(display, DSPFW1(display));
}
#undef FW_WM_VLV
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void g4x_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+ display->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ display->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ display->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
+ display->wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -962,11 +960,11 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
+ unsigned int latency = display->wm.pri_latency[level] * 10;
unsigned int pixel_rate, htotal, cpp, width, wm;
if (latency == 0)
@@ -1017,10 +1015,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1033,13 +1031,13 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
int level, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
level = max(level, G4X_WM_LEVEL_SR);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->fbc != value;
@@ -1056,8 +1054,8 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
bool dirty = false;
int level;
@@ -1069,7 +1067,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
int wm, max_wm;
@@ -1109,7 +1107,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
plane->base.name,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
@@ -1117,7 +1115,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
if (plane_id == PLANE_PRIMARY)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FBC watermarks: SR=%d, HPLL=%d\n",
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
@@ -1137,9 +1135,9 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (level >= dev_priv->display.wm.num_levels)
+ if (level >= display->wm.num_levels)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1281,7 +1279,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -1311,7 +1309,7 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
max(optimal->wm.plane[plane_id],
active->wm.plane[plane_id]);
- drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ drm_WARN_ON(display->drm, intermediate->wm.plane[plane_id] >
g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
}
@@ -1329,23 +1327,23 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->hpll.fbc = max(optimal->hpll.fbc,
active->hpll.fbc);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
intermediate->hpll_en);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
intermediate->fbc_en && intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
intermediate->fbc_en && intermediate->hpll_en);
@@ -1376,7 +1374,7 @@ static int g4x_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+static void g4x_merge_wm(struct intel_display *display,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
@@ -1386,7 +1384,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->hpll_en = true;
wm->fbc_en = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
if (!crtc->active)
@@ -1408,7 +1406,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->fbc_en = false;
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
enum pipe pipe = crtc->pipe;
@@ -1420,23 +1418,23 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+static void g4x_program_watermarks(struct intel_display *display)
{
- struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *old_wm = &display->wm.g4x;
struct g4x_wm_values new_wm = {};
- g4x_merge_wm(dev_priv, &new_wm);
+ g4x_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- g4x_write_wm_values(dev_priv, &new_wm);
+ g4x_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
*old_wm = new_wm;
}
@@ -1444,30 +1442,30 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
static void g4x_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
/* latency must be in 0.1us units. */
@@ -1486,18 +1484,18 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void vlv_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ display->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+ if (display->platform.cherryview) {
+ display->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ display->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
}
}
@@ -1505,13 +1503,13 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
unsigned int pixel_rate, htotal, cpp, width, wm;
- if (dev_priv->display.wm.pri_latency[level] == 0)
+ if (display->wm.pri_latency[level] == 0)
return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1532,7 +1530,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
wm = 63;
} else {
wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->display.wm.pri_latency[level] * 10);
+ display->wm.pri_latency[level] * 10);
}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -1546,8 +1544,8 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1616,11 +1614,11 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
fifo_left -= plane_extra;
}
- drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
+ drm_WARN_ON(display->drm, active_planes != 0 && fifo_left != 0);
/* give it all to the first plane if none are active */
if (active_planes == 0) {
- drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
+ drm_WARN_ON(display->drm, fifo_left != fifo_size);
fifo_state->plane[PLANE_PRIMARY] = fifo_left;
}
@@ -1631,9 +1629,9 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
static void vlv_invalidate_wms(struct intel_crtc *crtc,
struct vlv_wm_state *wm_state, int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1659,10 +1657,10 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1675,8 +1673,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
int level;
bool dirty = false;
@@ -1686,7 +1684,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
@@ -1703,7 +1701,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
plane->base.name,
crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
@@ -1734,8 +1732,8 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
@@ -1745,7 +1743,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int level;
/* initially allow all levels */
- wm_state->num_levels = dev_priv->display.wm.num_levels;
+ wm_state->num_levels = display->wm.num_levels;
/*
* Note that enabling cxsr with no primary/sprite planes
* enabled can wedge the pipe. Hence we only allow cxsr
@@ -1755,7 +1753,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(display) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -1855,6 +1853,7 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_uncore *uncore = &dev_priv->uncore;
const struct intel_crtc_state *crtc_state =
@@ -1871,8 +1870,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
- drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
+ drm_WARN_ON(display->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(display->drm, fifo_size != 511);
trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
@@ -1889,8 +1888,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
switch (crtc->pipe) {
case PIPE_A:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
@@ -1902,12 +1901,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_B:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
@@ -1919,12 +1918,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_C:
- dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb3 = intel_de_read_fw(display, DSPARB3);
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
@@ -1936,14 +1935,14 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB3, dsparb3);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
default:
break;
}
- intel_uncore_posting_read_fw(uncore, DSPARB(dev_priv));
+ intel_de_read_fw(display, DSPARB(display));
spin_unlock(&uncore->lock);
}
@@ -2018,16 +2017,16 @@ static int vlv_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+static void vlv_merge_wm(struct intel_display *display,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->display.wm.num_levels - 1;
+ wm->level = display->wm.num_levels - 1;
wm->cxsr = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
if (!crtc->active)
@@ -2046,7 +2045,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
enum pipe pipe = crtc->pipe;
@@ -2061,35 +2060,35 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+static void vlv_program_watermarks(struct intel_display *display)
{
- struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values *old_wm = &display->wm.vlv;
struct vlv_wm_values new_wm = {};
- vlv_merge_wm(dev_priv, &new_wm);
+ vlv_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, false);
+ chv_set_memory_dvfs(display, false);
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, false);
+ chv_set_memory_pm5(display, false);
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- vlv_write_wm_values(dev_priv, &new_wm);
+ vlv_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, true);
+ chv_set_memory_pm5(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, true);
+ chv_set_memory_dvfs(display, true);
*old_wm = new_wm;
}
@@ -2097,33 +2096,33 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
static void vlv_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void i965_update_wm(struct drm_i915_private *dev_priv)
+static void i965_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
int srwm = 1;
@@ -2131,7 +2130,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
@@ -2152,7 +2151,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d, wm: %d\n",
entries, srwm);
@@ -2167,7 +2166,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
@@ -2175,39 +2174,38 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
} else {
cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
/* 965 has limitations... */
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(srwm, SR) |
- FW_WM(8, CURSORB) |
- FW_WM(8, PLANEB) |
- FW_WM(8, PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM(8, CURSORA) |
- FW_WM(8, PLANEC_OLD));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(cursor_sr, CURSOR_SR));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
#undef FW_WM
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+static struct intel_crtc *intel_crtc_for_plane(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
if (plane->id == PLANE_PRIMARY &&
plane->i9xx_plane == i9xx_plane)
return intel_crtc_for_pipe(display, plane->pipe);
@@ -2216,7 +2214,7 @@ static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
return NULL;
}
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
+static void i9xx_update_wm(struct intel_display *display)
{
const struct intel_watermark_params *wm_info;
u32 fwater_lo;
@@ -2226,29 +2224,29 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int planea_wm, planeb_wm;
struct intel_crtc *crtc;
- if (IS_I945GM(dev_priv))
+ if (display->platform.i945gm)
wm_info = &i945_wm_info;
- else if (DISPLAY_VER(dev_priv) != 2)
+ else if (DISPLAY_VER(display) != 2)
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_A);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_A);
+ crtc = intel_crtc_for_plane(display, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2257,25 +2255,25 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planea_wm = wm_info->max_wm;
}
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
wm_info = &i830_bc_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_B);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_B);
+ crtc = intel_crtc_for_plane(display, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planeb_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2284,11 +2282,11 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planeb_wm = wm_info->max_wm;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- crtc = single_enabled_crtc(dev_priv);
- if (IS_I915GM(dev_priv) && crtc) {
+ crtc = single_enabled_crtc(display);
+ if (display->platform.i915gm && crtc) {
struct drm_gem_object *obj;
obj = intel_fb_bo(crtc->base.primary->state->fb);
@@ -2304,10 +2302,10 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev_priv) && crtc) {
+ if (HAS_FW_BLC(display) && crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *pipe_mode =
@@ -2320,7 +2318,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int cpp;
int entries;
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (display->platform.i915gm || display->platform.i945gm)
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2328,20 +2326,20 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ if (display->platform.i945g || display->platform.i945gm)
+ intel_de_write(display, FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
+ intel_de_write(display, FW_BLC_SELF, srwm & 0x3f);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
@@ -2352,34 +2350,34 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
- intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
+ intel_de_write(display, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC2, fwater_hi);
if (crtc)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
-static void i845_update_wm(struct drm_i915_private *dev_priv)
+static void i845_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
u32 fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc == NULL)
return;
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
&i845_wm_info,
- i845_get_fifo_size(dev_priv, PLANE_A),
+ i845_get_fifo_size(display, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
+ fwater_lo = intel_de_read(display, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2534,24 +2532,24 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
}
static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+ilk_display_fifo_size(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 3072;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
return 768;
else
return 512;
}
static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ilk_plane_wm_reg_max(struct intel_display *display,
int level, bool is_sprite)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -2563,30 +2561,30 @@ ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
}
static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+ilk_cursor_wm_reg_max(struct intel_display *display, int level)
{
- if (DISPLAY_VER(dev_priv) >= 7)
+ if (DISPLAY_VER(display) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+static unsigned int ilk_fbc_wm_reg_max(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 31;
else
return 15;
}
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_plane_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+ unsigned int fifo_size = ilk_display_fifo_size(display);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -2594,14 +2592,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_NUM_PIPES(dev_priv);
+ fifo_size /= INTEL_NUM_PIPES(display);
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (DISPLAY_VER(dev_priv) < 7)
+ if (DISPLAY_VER(display) < 7)
fifo_size /= 2;
}
@@ -2617,11 +2615,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(display, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_cursor_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config)
{
@@ -2630,32 +2628,32 @@ static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev_priv, level);
+ return ilk_cursor_wm_reg_max(display, level);
}
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_maximums(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev_priv, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_max(display, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(display, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(display, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_reg_maximums(struct intel_display *display,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
- max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_reg_max(display, level, false);
+ max->spr = ilk_plane_wm_reg_max(display, level, true);
+ max->cur = ilk_cursor_wm_reg_max(display, level);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static bool ilk_validate_wm_level(struct drm_i915_private *i915,
+static bool ilk_validate_wm_level(struct intel_display *display,
int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
@@ -2679,15 +2677,15 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
*/
if (level == 0 && !result->enable) {
if (result->pri_val > max->pri)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Primary WM%d too large %u (max %u)\n",
level, result->pri_val, max->pri);
if (result->spr_val > max->spr)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Sprite WM%d too large %u (max %u)\n",
level, result->spr_val, max->spr);
if (result->cur_val > max->cur)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
@@ -2700,7 +2698,7 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
return ret;
}
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(struct intel_display *display,
const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
@@ -2709,9 +2707,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- u16 pri_latency = dev_priv->display.wm.pri_latency[level];
- u16 spr_latency = dev_priv->display.wm.spr_latency[level];
- u16 cur_latency = dev_priv->display.wm.cur_latency[level];
+ u16 pri_latency = display->wm.pri_latency[level];
+ u16 spr_latency = display->wm.spr_latency[level];
+ u16 cur_latency = display->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2735,11 +2733,12 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void hsw_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u64 sskpd;
- i915->display.wm.num_levels = 5;
+ display->wm.num_levels = 5;
sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
@@ -2752,11 +2751,12 @@ static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
}
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void snb_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 sskpd;
- i915->display.wm.num_levels = 4;
+ display->wm.num_levels = 4;
sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
@@ -2766,11 +2766,12 @@ static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
}
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void ilk_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 mltr;
- i915->display.wm.num_levels = 3;
+ display->wm.num_levels = 3;
mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
@@ -2780,24 +2781,21 @@ static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
}
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_spr_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_cur_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5], u16 min)
+static bool ilk_increase_wm_latency(struct intel_display *display, u16 wm[5], u16 min)
{
int level;
@@ -2805,13 +2803,13 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return false;
wm[0] = max(wm[0], min);
- for (level = 1; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 1; level < display->wm.num_levels; level++)
wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_latency_quirk(struct intel_display *display)
{
bool changed;
@@ -2819,21 +2817,21 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(display, display->wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.cur_latency, 12);
if (!changed)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_lp3_irq_quirk(struct intel_display *display)
{
/*
* On some SNB machines (Thinkpad X220 Tablet at least)
@@ -2846,50 +2844,50 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
- if (dev_priv->display.wm.pri_latency[3] == 0 &&
- dev_priv->display.wm.spr_latency[3] == 0 &&
- dev_priv->display.wm.cur_latency[3] == 0)
+ if (display->wm.pri_latency[3] == 0 &&
+ display->wm.spr_latency[3] == 0 &&
+ display->wm.cur_latency[3] == 0)
return;
- dev_priv->display.wm.pri_latency[3] = 0;
- dev_priv->display.wm.spr_latency[3] = 0;
- dev_priv->display.wm.cur_latency[3] = 0;
+ display->wm.pri_latency[3] = 0;
+ display->wm.spr_latency[3] = 0;
+ display->wm.cur_latency[3] = 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void ilk_setup_wm_latency(struct intel_display *display)
{
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else if (DISPLAY_VER(dev_priv) >= 6)
- snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ if (display->platform.broadwell || display->platform.haswell)
+ hsw_read_wm_latency(display, display->wm.pri_latency);
+ else if (DISPLAY_VER(display) >= 6)
+ snb_read_wm_latency(display, display->wm.pri_latency);
else
- ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ ilk_read_wm_latency(display, display->wm.pri_latency);
- memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
- memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(display->wm.spr_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
+ memcpy(display->wm.cur_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
+ intel_fixup_spr_wm_latency(display, display->wm.spr_latency);
+ intel_fixup_cur_wm_latency(display, display->wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
- if (DISPLAY_VER(dev_priv) == 6) {
- snb_wm_latency_quirk(dev_priv);
- snb_wm_lp3_irq_quirk(dev_priv);
+ if (DISPLAY_VER(display) == 6) {
+ snb_wm_latency_quirk(display);
+ snb_wm_lp3_irq_quirk(display);
}
}
-static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
+static bool ilk_validate_pipe_wm(struct intel_display *display,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -2901,11 +2899,11 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_compute_wm_maximums(display, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
- if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
- drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
+ if (!ilk_validate_wm_level(display, 0, &max, &pipe_wm->wm[0])) {
+ drm_dbg_kms(display->drm, "LP0 watermark invalid\n");
return false;
}
@@ -2916,7 +2914,7 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_pipe_wm *pipe_wm;
@@ -2943,10 +2941,10 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
- usable_level = dev_priv->display.wm.num_levels - 1;
+ usable_level = display->wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(display) < 7 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2954,18 +2952,18 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
+ ilk_compute_wm_level(display, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+ if (!ilk_validate_pipe_wm(display, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+ ilk_compute_wm_reg_maximums(display, 1, &max);
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
+ ilk_compute_wm_level(display, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -2973,7 +2971,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
* register maximums since such watermarks are
* always invalid.
*/
- if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
+ if (!ilk_validate_wm_level(display, level, &max, wm)) {
memset(wm, 0, sizeof(*wm));
break;
}
@@ -2990,7 +2988,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -3015,7 +3013,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->sprites_enabled |= active->sprites_enabled;
intermediate->sprites_scaled |= active->sprites_scaled;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct intel_wm_level *intermediate_wm = &intermediate->wm[level];
const struct intel_wm_level *active_wm = &active->wm[level];
@@ -3036,7 +3034,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev_priv, intermediate))
+ if (!ilk_validate_pipe_wm(display, intermediate))
return -EINVAL;
/*
@@ -3068,7 +3066,7 @@ static int ilk_compute_watermarks(struct intel_atomic_state *state,
/*
* Merge the watermarks from all active pipes for a specific level.
*/
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_merge_wm_level(struct intel_display *display,
int level,
struct intel_wm_level *ret_wm)
{
@@ -3076,7 +3074,7 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
ret_wm->enable = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
@@ -3101,31 +3099,31 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
/*
* Merge all low power watermarks for all active pipes.
*/
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+static void ilk_wm_merge(struct intel_display *display,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- int level, num_levels = dev_priv->display.wm.num_levels;
+ int level, num_levels = display->wm.num_levels;
int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(display) < 7 || display->platform.ivybridge) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
+ merged->fbc_wm_enabled = DISPLAY_VER(display) >= 6;
/* merge each WM1+ level */
for (level = 1; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev_priv, level, wm);
+ ilk_merge_wm_level(display, level, wm);
if (level > last_enabled_level)
wm->enable = false;
- else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
+ else if (!ilk_validate_wm_level(display, level, max, wm))
/* make sure all following levels get disabled */
last_enabled_level = level - 1;
@@ -3141,8 +3139,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
}
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
+ if (DISPLAY_VER(display) == 5 && HAS_FBC(display) &&
+ display->params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3158,16 +3156,16 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
}
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+static unsigned int ilk_wm_lp_latency(struct intel_display *display,
int level)
{
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (display->platform.haswell || display->platform.broadwell)
return 2 * level;
else
- return dev_priv->display.wm.pri_latency[level];
+ return display->wm.pri_latency[level];
}
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_results(struct intel_display *display,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
@@ -3191,14 +3189,14 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_LATENCY(ilk_wm_lp_latency(display, level)) |
WM_LP_PRIMARY(r->pri_val) |
WM_LP_CURSOR(r->cur_val);
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
else
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
@@ -3209,19 +3207,19 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
- drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
+ if (DISPLAY_VER(display) < 7 && r->spr_val) {
+ drm_WARN_ON(display->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
}
}
/* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
const struct intel_wm_level *r = &pipe_wm->wm[0];
- if (drm_WARN_ON(&dev_priv->drm, !r->enable))
+ if (drm_WARN_ON(display->drm, !r->enable))
continue;
results->wm_pipe[pipe] =
@@ -3236,13 +3234,13 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* case both are at the same level. Prefer r1 in case they're the same.
*/
static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
+ilk_find_best_result(struct intel_display *display,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
int level, level1 = 0, level2 = 0;
- for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 1; level < display->wm.num_levels; level++) {
if (r1->wm[level].enable)
level1 = level;
if (r2->wm[level].enable)
@@ -3268,7 +3266,7 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
#define WM_DIRTY_FBC (1 << 24)
#define WM_DIRTY_DDB (1 << 25)
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+static unsigned int ilk_compute_wm_dirty(struct intel_display *display,
const struct ilk_wm_values *old,
const struct ilk_wm_values *new)
{
@@ -3276,7 +3274,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
enum pipe pipe;
int wm_lp;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
@@ -3314,25 +3312,25 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
return dirty;
}
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+static bool _ilk_disable_lp_wm(struct intel_display *display,
unsigned int dirty)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
previous->wm_lp[2] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
previous->wm_lp[1] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
previous->wm_lp[0] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3348,73 +3346,73 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+static void ilk_write_wm_values(struct intel_display *display,
struct ilk_wm_values *results)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
unsigned int dirty;
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+ dirty = ilk_compute_wm_dirty(display, previous, results);
if (!dirty)
return;
- _ilk_disable_lp_wm(dev_priv, dirty);
+ _ilk_disable_lp_wm(display, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- WM_MISC_DATA_PARTITION_5_6);
+ if (display->platform.haswell || display->platform.broadwell)
+ intel_de_rmw(display, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
else
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- DISP_DATA_PARTITION_5_6);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
}
if (dirty & WM_DIRTY_FBC)
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
- results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+ intel_de_rmw(display, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_de_write(display, WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (DISPLAY_VER(dev_priv) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_de_write(display, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_de_write(display, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->display.wm.hw = *results;
+ display->wm.hw = *results;
}
-bool ilk_disable_cxsr(struct drm_i915_private *dev_priv)
+bool ilk_disable_cxsr(struct intel_display *display)
{
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+ return _ilk_disable_lp_wm(display, WM_DIRTY_LP_ALL);
}
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_config(struct intel_display *display,
struct intel_wm_config *config)
{
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -3426,7 +3424,7 @@ static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
}
}
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_program_watermarks(struct intel_display *display)
{
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
@@ -3434,18 +3432,18 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev_priv, &config);
+ ilk_compute_wm_config(display, &config);
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (DISPLAY_VER(dev_priv) >= 7 &&
+ if (DISPLAY_VER(display) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(display, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -3453,50 +3451,49 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(display, best_lp_wm, partitioning, &results);
- ilk_write_wm_values(dev_priv, &results);
+ ilk_write_wm_values(display, &results);
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_display *display = to_intel_display(crtc);
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_de_read(display, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -3523,7 +3520,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
* should be marked as enabled but zeroed,
* which is what we'd compute them to.
*/
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
active->wm[level].enable = true;
}
@@ -3572,7 +3569,7 @@ static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
* through the atomic check code to calculate new watermark values in the
* state object.
*/
-void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+void ilk_wm_sanitize(struct intel_display *display)
{
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
@@ -3583,14 +3580,14 @@ void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ if (!display->funcs.wm->optimize_watermarks)
return;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 9))
return;
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
+ state = drm_atomic_state_alloc(display->drm);
+ if (drm_WARN_ON(display->drm, !state))
return;
intel_state = to_intel_atomic_state(state);
@@ -3606,14 +3603,14 @@ retry:
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(display))
intel_state->skip_intermediate_wm = true;
ret = ilk_sanitize_watermarks_add_affected(state);
if (ret)
goto fail;
- ret = intel_atomic_check(&dev_priv->drm, state);
+ ret = intel_atomic_check(display->drm, state);
if (ret)
goto fail;
@@ -3643,7 +3640,7 @@ fail:
* If this actually happens, we'll have to just leave the
* BIOS-programmed watermarks untouched and hope for the best.
*/
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Could not determine valid watermarks for inherited state\n");
drm_atomic_state_put(state);
@@ -3657,18 +3654,18 @@ fail:
#define _FW_WM_VLV(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_read_wm_values(struct intel_display *display,
struct g4x_wm_values *wm)
{
u32 tmp;
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -3676,21 +3673,21 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
}
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_read_wm_values(struct intel_display *display,
struct vlv_wm_values *wm)
{
enum pipe pipe;
u32 tmp;
- for_each_pipe(dev_priv, pipe) {
- tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
+ for_each_pipe(display, pipe) {
+ tmp = intel_de_read(display, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -3702,34 +3699,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- if (IS_CHERRYVIEW(dev_priv)) {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
+ if (display->platform.cherryview) {
+ tmp = intel_de_read(display, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
+ tmp = intel_de_read(display, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
+ tmp = intel_de_read(display, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -3741,11 +3738,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
+ tmp = intel_de_read(display, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -3759,16 +3756,16 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void g4x_wm_get_hw_state(struct intel_display *display)
{
- struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *wm = &display->wm.g4x;
struct intel_crtc *crtc;
- g4x_read_wm_values(dev_priv, wm);
+ g4x_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -3833,7 +3830,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3841,26 +3838,25 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE0]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ drm_dbg_kms(display->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
str_yes_no(wm->fbc_en));
}
-static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+static void g4x_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -3873,7 +3869,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.g4x.raw[level];
@@ -3884,36 +3880,37 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _g4x_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.g4x.intermediate =
crtc_state->wm.g4x.optimal;
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
}
- g4x_program_watermarks(dev_priv);
+ g4x_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void vlv_wm_get_hw_state(struct intel_display *display)
{
- struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
- vlv_read_wm_values(dev_priv, wm);
+ vlv_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -3935,10 +3932,10 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
@@ -3948,7 +3945,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
vlv_punit_put(dev_priv);
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -3988,7 +3985,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.vlv.optimal = *active;
crtc_state->wm.vlv.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3997,20 +3994,19 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE1]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
-static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+static void vlv_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -4023,7 +4019,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
@@ -4031,33 +4027,33 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _vlv_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.vlv.intermediate =
crtc_state->wm.vlv.optimal;
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
}
- vlv_program_watermarks(dev_priv);
+ vlv_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
/*
* FIXME should probably kill this and improve
* the real watermark readout/sanitation instead
*/
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_init_lp_watermarks(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM3_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM2_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM1_LP_ILK, WM_LP_ENABLE, 0);
/*
* Don't touch WM_LP_SPRITE_ENABLE here.
@@ -4065,37 +4061,37 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void ilk_wm_get_hw_state(struct intel_display *display)
{
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc *crtc;
- ilk_init_lp_watermarks(dev_priv);
+ ilk_init_lp_watermarks(display);
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
- hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
- hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
+ hw->wm_lp[0] = intel_de_read(display, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_de_read(display, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_de_read(display, WM3_LP_ILK);
- hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (DISPLAY_VER(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
- hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
+ hw->wm_lp_spr[0] = intel_de_read(display, WM1S_LP_ILK);
+ if (DISPLAY_VER(display) >= 7) {
+ hw->wm_lp_spr[1] = intel_de_read(display, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_de_read(display, WM3S_LP_IVB);
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ if (display->platform.haswell || display->platform.broadwell)
+ hw->partitioning = (intel_de_read(display, WM_MISC) &
WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ else if (display->platform.ivybridge)
+ hw->partitioning = (intel_de_read(display, DISP_ARB_CTL2) &
DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_de_read(display, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
static const struct intel_wm_funcs ilk_wm_funcs = {
@@ -4145,39 +4141,39 @@ static const struct intel_wm_funcs i845_wm_funcs = {
static const struct intel_wm_funcs nop_funcs = {
};
-void i9xx_wm_init(struct drm_i915_private *dev_priv)
+void i9xx_wm_init(struct intel_display *display)
{
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &vlv_wm_funcs;
- } else if (IS_G4X(dev_priv)) {
- g4x_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &g4x_wm_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
- if (!pnv_get_cxsr_latency(dev_priv)) {
- drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ if (HAS_PCH_SPLIT(display)) {
+ ilk_setup_wm_latency(display);
+ display->funcs.wm = &ilk_wm_funcs;
+ } else if (display->platform.valleyview || display->platform.cherryview) {
+ vlv_setup_wm_latency(display);
+ display->funcs.wm = &vlv_wm_funcs;
+ } else if (display->platform.g4x) {
+ g4x_setup_wm_latency(display);
+ display->funcs.wm = &g4x_wm_funcs;
+ } else if (display->platform.pineview) {
+ if (!pnv_get_cxsr_latency(display)) {
+ drm_info(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
/* Disable CxSR and never update its watermark again */
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.funcs.wm = &nop_funcs;
+ intel_set_memory_cxsr(display, false);
+ display->funcs.wm = &nop_funcs;
} else {
- dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ display->funcs.wm = &pnv_wm_funcs;
}
- } else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.funcs.wm = &i965_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->display.funcs.wm = &i845_wm_funcs;
+ } else if (DISPLAY_VER(display) == 4) {
+ display->funcs.wm = &i965_wm_funcs;
+ } else if (DISPLAY_VER(display) == 3) {
+ display->funcs.wm = &i9xx_wm_funcs;
+ } else if (DISPLAY_VER(display) == 2) {
+ if (INTEL_NUM_PIPES(display) == 1)
+ display->funcs.wm = &i845_wm_funcs;
else
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ display->funcs.wm = &i9xx_wm_funcs;
} else {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"unexpected fall-through in %s\n", __func__);
- dev_priv->display.funcs.wm = &nop_funcs;
+ display->funcs.wm = &nop_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
index 06ac37c6c94b..7bb363b2a756 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.h
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
#ifdef I915
-bool ilk_disable_cxsr(struct drm_i915_private *i915);
-void ilk_wm_sanitize(struct drm_i915_private *i915);
-bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
-void i9xx_wm_init(struct drm_i915_private *i915);
+bool ilk_disable_cxsr(struct intel_display *display);
+void ilk_wm_sanitize(struct intel_display *display);
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable);
+void i9xx_wm_init(struct intel_display *display);
#else
-static inline bool ilk_disable_cxsr(struct drm_i915_private *i915)
+static inline bool ilk_disable_cxsr(struct intel_display *display)
{
return false;
}
-static inline void ilk_wm_sanitize(struct drm_i915_private *i915)
+static inline void ilk_wm_sanitize(struct intel_display *display)
{
}
-static inline bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable)
+static inline bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
return false;
}
-static inline void i9xx_wm_init(struct drm_i915_private *i915)
+static inline void i9xx_wm_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 402b7b2e1829..ca7033251e91 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
@@ -1826,107 +1827,56 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
.transfer = gen11_dsi_host_transfer,
};
-#define ICL_PREPARE_CNT_MAX 0x7
-#define ICL_CLK_ZERO_CNT_MAX 0xf
-#define ICL_TRAIL_CNT_MAX 0x7
-#define ICL_TCLK_PRE_CNT_MAX 0x3
-#define ICL_TCLK_POST_CNT_MAX 0x7
-#define ICL_HS_ZERO_CNT_MAX 0xf
-#define ICL_EXIT_ZERO_CNT_MAX 0x7
-
static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 hs_zero_cnt;
- u32 tclk_pre_cnt;
+ u32 tclk_prepare_esc_clk, tclk_zero_esc_clk, tclk_pre_esc_clk;
+ u32 ths_prepare_esc_clk, ths_zero_esc_clk, ths_exit_esc_clk;
tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
/*
- * prepare cnt in escape clocks
- * this field represents a hexadecimal value with a precision
- * of 1.2 – i.e. the most significant bit is the integer
- * and the least significant 2 bits are fraction bits.
- * so, the field can represent a range of 0.25 to 1.75
+ * The clock and data lane prepare timing parameters are in expressed in
+ * units of 1/4 escape clocks, and all the other timings parameters in
+ * escape clocks.
*/
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
- if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n",
- prepare_cnt);
- prepare_cnt = ICL_PREPARE_CNT_MAX;
- }
+ tclk_prepare_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare * 4, tlpx_ns);
+ tclk_prepare_esc_clk = min(tclk_prepare_esc_clk, 7);
- /* clk zero count in escape clocks */
- clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
- ths_prepare_ns, tlpx_ns);
- if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
- clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
- }
+ tclk_zero_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ mipi_config->tclk_prepare, tlpx_ns);
+ tclk_zero_esc_clk = min(tclk_zero_esc_clk, 15);
- /* trail cnt in escape clocks*/
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
- if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n",
- trail_cnt);
- trail_cnt = ICL_TRAIL_CNT_MAX;
- }
+ tclk_pre_esc_clk = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ tclk_pre_esc_clk = min(tclk_pre_esc_clk, 3);
- /* tclk pre count in escape clocks */
- tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
- if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
- tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
- }
+ ths_prepare_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare * 4, tlpx_ns);
+ ths_prepare_esc_clk = min(ths_prepare_esc_clk, 7);
- /* hs zero cnt in escape clocks */
- hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
- ths_prepare_ns, tlpx_ns);
- if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n",
- hs_zero_cnt);
- hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
- }
+ ths_zero_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ mipi_config->ths_prepare, tlpx_ns);
+ ths_zero_esc_clk = min(ths_zero_esc_clk, 15);
- /* hs exit zero cnt in escape clocks */
- exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
- if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "exit_zero_cnt out of range (%d)\n",
- exit_zero_cnt);
- exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
- }
+ ths_exit_esc_clk = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ ths_exit_esc_clk = min(ths_exit_esc_clk, 7);
/* clock lane dphy timings */
intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
- CLK_PREPARE(prepare_cnt) |
+ CLK_PREPARE(tclk_prepare_esc_clk) |
CLK_ZERO_OVERRIDE |
- CLK_ZERO(clk_zero_cnt) |
+ CLK_ZERO(tclk_zero_esc_clk) |
CLK_PRE_OVERRIDE |
- CLK_PRE(tclk_pre_cnt) |
- CLK_TRAIL_OVERRIDE |
- CLK_TRAIL(trail_cnt));
+ CLK_PRE(tclk_pre_esc_clk));
/* data lanes dphy timings */
intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
- HS_PREPARE(prepare_cnt) |
+ HS_PREPARE(ths_prepare_esc_clk) |
HS_ZERO_OVERRIDE |
- HS_ZERO(hs_zero_cnt) |
- HS_TRAIL_OVERRIDE |
- HS_TRAIL(trail_cnt) |
+ HS_ZERO(ths_zero_esc_clk) |
HS_EXIT_OVERRIDE |
- HS_EXIT(exit_zero_cnt));
+ HS_EXIT(ths_exit_esc_clk));
intel_dsi_log_params(intel_dsi);
}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 55f3ae1e68c9..c176bdbc19a3 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -5,12 +5,15 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "intel_alpm.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
+#include "intel_psr.h"
#include "intel_psr_regs.h"
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp)
@@ -23,7 +26,7 @@ bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp)
return intel_dp->alpm_dpcd & DP_ALPM_AUX_LESS_CAP;
}
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
+void intel_alpm_init(struct intel_dp *intel_dp)
{
u8 dpcd;
@@ -31,6 +34,7 @@ void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
return;
intel_dp->alpm_dpcd = dpcd;
+ mutex_init(&intel_dp->alpm_parameters.lock);
}
/*
@@ -276,6 +280,14 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
+ if (intel_dp->alpm_parameters.lobf_disable_debug) {
+ drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
+ return;
+ }
+
+ if (intel_dp->alpm_parameters.sink_alpm_error)
+ return;
+
if (!intel_dp_is_edp(intel_dp))
return;
@@ -288,6 +300,10 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (crtc_state->has_psr)
return;
+ if (crtc_state->vrr.vmin != crtc_state->vrr.vmax ||
+ crtc_state->vrr.vmin != crtc_state->vrr.flipline)
+ return;
+
if (!(intel_alpm_aux_wake_supported(intel_dp) ||
intel_alpm_aux_less_wake_supported(intel_dp)))
return;
@@ -316,15 +332,16 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 alpm_ctl;
- if (DISPLAY_VER(display) < 20 ||
- (!intel_dp->psr.sel_update_enabled && !intel_dp_is_edp(intel_dp)))
+ if (DISPLAY_VER(display) < 20 || (!intel_psr_needs_alpm(intel_dp, crtc_state) &&
+ !crtc_state->has_lobf))
return;
+ mutex_lock(&intel_dp->alpm_parameters.lock);
/*
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
*/
- if ((intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) ||
+ if ((crtc_state->has_panel_replay && intel_dp_is_edp(intel_dp)) ||
(crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp))) {
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
@@ -353,18 +370,107 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
}
- if (crtc_state->has_lobf)
+ if (crtc_state->has_lobf) {
alpm_ctl |= ALPM_CTL_LOBF_ENABLE;
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) enabled\n");
+ }
alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
lnl_alpm_configure(intel_dp, crtc_state);
+ intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder;
+}
+
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ if (crtc_state->has_lobf || crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp_is_edp(intel_dp))
+ continue;
+
+ if (old_crtc_state->has_lobf) {
+ mutex_lock(&intel_dp->alpm_parameters.lock);
+ intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
+ }
+ }
+}
+
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 val;
+
+ if (!intel_psr_needs_alpm(intel_dp, crtc_state) && !crtc_state->has_lobf)
+ return;
+
+ val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
+
+ if (crtc_state->has_panel_replay || (crtc_state->has_lobf &&
+ intel_alpm_aux_less_wake_supported(intel_dp)))
+ val |= DP_ALPM_MODE_AUX_LESS;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
+}
+
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_encoder *encoder;
+
+ if (crtc_state->has_psr || !crtc_state->has_lobf ||
+ crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ intel_alpm_enable_sink(intel_dp, crtc_state);
+ intel_alpm_configure(intel_dp, crtc_state);
+ }
+ }
}
static int i915_edp_lobf_info_show(struct seq_file *m, void *data)
@@ -403,6 +509,32 @@ out:
DEFINE_SHOW_ATTRIBUTE(i915_edp_lobf_info);
+static int
+i915_edp_lobf_debug_get(void *data, u64 *val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ *val = intel_dp->alpm_parameters.lobf_disable_debug;
+
+ return 0;
+}
+
+static int
+i915_edp_lobf_debug_set(void *data, u64 val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ intel_dp->alpm_parameters.lobf_disable_debug = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_lobf_debug_fops,
+ i915_edp_lobf_debug_get, i915_edp_lobf_debug_set,
+ "%llu\n");
+
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
@@ -412,6 +544,55 @@ void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
return;
+ debugfs_create_file("i915_edp_lobf_debug", 0644, root,
+ connector, &i915_edp_lobf_debug_fops);
+
debugfs_create_file("i915_edp_lobf_info", 0444, root,
connector, &i915_edp_lobf_info_fops);
}
+
+void intel_alpm_disable(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->alpm_parameters.transcoder;
+
+ if (DISPLAY_VER(display) < 20 || !intel_dp->alpm_dpcd)
+ return;
+
+ mutex_lock(&intel_dp->alpm_parameters.lock);
+
+ intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
+ ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ intel_de_rmw(display,
+ PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ drm_dbg_kms(display->drm, "Disabling ALPM\n");
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
+}
+
+bool intel_alpm_get_error(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ u8 val;
+ int r;
+
+ r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+ if (r != 1) {
+ drm_err(display->drm, "Error reading ALPM status\n");
+ return true;
+ }
+
+ if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ drm_dbg_kms(display->drm, "ALPM lock timeout error\n");
+
+ /* Clearing error */
+ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index 8c409b10dce6..c9fe21e3e72c 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -12,8 +12,10 @@ struct intel_dp;
struct intel_crtc_state;
struct drm_connector_state;
struct intel_connector;
+struct intel_atomic_state;
+struct intel_crtc;
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp);
+void intel_alpm_init(struct intel_dp *intel_dp);
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
@@ -21,7 +23,15 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state);
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector);
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp);
bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp);
+void intel_alpm_disable(struct intel_dp *intel_dp);
+bool intel_alpm_get_error(struct intel_dp *intel_dp);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 03dc54c802d3..e83feca5c9c9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -33,16 +33,17 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_cdclk.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp_tunnel.h"
+#include "intel_fb.h"
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
-#include "intel_fb.h"
#include "skl_universal_plane.h"
/**
@@ -59,17 +60,16 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
struct drm_property *property,
u64 *val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio)
+ if (property == display->properties.force_audio)
*val = intel_conn_state->force_audio;
- else if (property == dev_priv->display.properties.broadcast_rgb)
+ else if (property == display->properties.broadcast_rgb)
*val = intel_conn_state->broadcast_rgb;
else {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
@@ -92,22 +92,21 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_property *property,
u64 val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio) {
+ if (property == display->properties.force_audio) {
intel_conn_state->force_audio = val;
return 0;
}
- if (property == dev_priv->display.properties.broadcast_rgb) {
+ if (property == display->properties.broadcast_rgb) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
- drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ drm_dbg_atomic(display->drm, "Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 7276179df878..1bcfa5f4fd63 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -174,11 +174,27 @@ bool intel_plane_needs_physical(struct intel_plane *plane)
DISPLAY_INFO(display)->cursor_needs_physical;
}
-bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier)
+bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
+ u64 modifier)
{
+ if (intel_format_info_is_yuv_semiplanar(drm_format_info(format), modifier) ||
+ format == DRM_FORMAT_C8)
+ return false;
+
return plane->can_async_flip && plane->can_async_flip(modifier);
}
+bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
+ u32 format,
+ u64 modifier)
+{
+ if (!plane->funcs->format_mod_supported(plane, format, modifier))
+ return false;
+
+ return intel_plane_can_async_flip(to_intel_plane(plane),
+ format, modifier);
+}
+
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate)
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 6efac923dcbc..317320c32285 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -21,7 +21,8 @@ enum plane_id;
struct intel_plane *
intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id);
-bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier);
+bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
+ u64 modifier);
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate);
@@ -89,5 +90,8 @@ int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
int intel_atomic_check_planes(struct intel_atomic_state *state);
u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
+bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
+ u32 format,
+ u64 modifier);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ea935a5d94c8..40d8bbd8107d 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -27,9 +27,9 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
@@ -587,19 +587,17 @@ static void ibx_audio_regs_init(struct intel_display *display,
enum pipe pipe,
struct ibx_audio_regs *regs)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.valleyview || display->platform.cherryview) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else if (HAS_PCH_CPT(i915)) {
+ } else if (HAS_PCH_CPT(display)) {
regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
regs->aud_config = CPT_AUD_CFG(pipe);
regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- } else if (HAS_PCH_IBX(i915)) {
+ } else if (HAS_PCH_IBX(display)) {
regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
regs->aud_config = IBX_AUD_CFG(pipe);
regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
@@ -889,12 +887,10 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
*/
void intel_audio_hooks_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.g4x)
display->funcs.audio = &g4x_audio_funcs;
else if (display->platform.valleyview || display->platform.cherryview ||
- HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
+ HAS_PCH_CPT(display) || HAS_PCH_IBX(display))
display->funcs.audio = &ibx_audio_funcs;
else if (display->platform.haswell || DISPLAY_VER(display) >= 8)
display->funcs.audio = &hsw_audio_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 178dc6c8de80..5827da586003 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -10,12 +10,16 @@
#include <acpi/video.h>
-#include "i915_drv.h"
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
@@ -472,7 +476,6 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2;
@@ -485,7 +488,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
intel_de_rmw(display, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
panel->backlight.alternate_pwm_increment ?
LPT_PWM_GRANULARITY : 0);
@@ -502,7 +505,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
pch_ctl1 |= BLM_PCH_POLARITY;
/* After LPT, override is the default. */
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
@@ -901,11 +904,9 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ with_intel_display_rpm(display) {
u32 hw_level;
drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
@@ -1065,7 +1066,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 mul, clock;
@@ -1074,7 +1075,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
mul = 128;
- if (HAS_PCH_LPT_H(i915))
+ if (HAS_PCH_LPT_H(display))
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
@@ -1231,12 +1232,11 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
bool alt, cpu_mode;
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
alt = intel_de_read(display, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
alt = intel_de_read(display, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
@@ -1260,7 +1260,7 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(i915) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(display) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
@@ -1467,15 +1467,13 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int cnp_num_backlight_controllers(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_DG1)
return 1;
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
return 2;
return 1;
@@ -1483,14 +1481,12 @@ static int cnp_num_backlight_controllers(struct intel_display *display)
static bool cnp_backlight_controller_is_valid(struct intel_display *display, int controller)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (controller < 0 || controller >= cnp_num_backlight_controllers(display))
return false;
if (controller == 1 &&
- INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) <= PCH_ADP)
+ INTEL_PCH_TYPE(display) >= PCH_ICP &&
+ INTEL_PCH_TYPE(display) <= PCH_ADP)
return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
@@ -1819,7 +1815,6 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
struct intel_connector *connector =
container_of(panel, struct intel_connector, panel);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
@@ -1827,14 +1822,14 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (display->platform.geminilake || display->platform.broxton) {
panel->backlight.pwm_funcs = &bxt_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_CNP) {
panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT_H) {
- if (HAS_PCH_LPT(i915))
+ } else if (INTEL_PCH_TYPE(display) >= PCH_LPT_H) {
+ if (HAS_PCH_LPT(display))
panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
panel->backlight.pwm_funcs = &spt_pwm_funcs;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (HAS_PCH_SPLIT(display)) {
panel->backlight.pwm_funcs = &pch_pwm_funcs;
} else if (display->platform.valleyview || display->platform.cherryview) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a8d08d7d82b3..ba7b8938b17c 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -37,6 +37,7 @@
#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
@@ -2244,28 +2245,27 @@ static const u8 adlp_ddc_pin_map[] = {
static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *ddc_pin_map;
int i, n_entries;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL || display->platform.alderlake_p) {
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL || display->platform.alderlake_p) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
} else if (display->platform.alderlake_s) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG1) {
return vbt_pin;
- } else if (display->platform.rocketlake && INTEL_PCH_TYPE(i915) == PCH_TGP) {
+ } else if (display->platform.rocketlake && INTEL_PCH_TYPE(display) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) {
+ } else if (HAS_PCH_TGP(display) && DISPLAY_VER(display) == 9) {
ddc_pin_map = gen9bc_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
- } else if (HAS_PCH_CNP(i915)) {
+ } else if (HAS_PCH_CNP(display)) {
ddc_pin_map = cnp_ddc_pin_map;
n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
} else {
@@ -2864,8 +2864,6 @@ parse_general_definitions(struct intel_display *display)
static void
init_vbt_defaults(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
display->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
@@ -2882,7 +2880,7 @@ init_vbt_defaults(struct intel_display *display)
* clock for LVDS.
*/
display->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(display,
- !HAS_PCH_SPLIT(i915));
+ !HAS_PCH_SPLIT(display));
drm_dbg_kms(display->drm, "Set default to SSC at %d kHz\n",
display->vbt.lvds_ssc_freq);
}
@@ -3115,7 +3113,6 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
{
struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
- intel_wakeref_t wakeref;
vbt = firmware_get_vbt(display, sizep);
@@ -3126,12 +3123,12 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
* If the OpRegion does not have VBT, look in SPI flash
* through MMIO or PCI mapping
*/
- if (!vbt && IS_DGFX(i915))
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ if (!vbt && display->platform.dgfx)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash");
if (!vbt)
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM");
return vbt;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index f9841f0498c6..6cd7a011b8c4 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -24,7 +24,7 @@
/*
* Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
* the VBT from the rest of the driver. Add the parsed, clean data to struct
- * intel_vbt_data within struct drm_i915_private.
+ * intel_vbt_data within struct intel_display.
*/
#ifndef _INTEL_BIOS_H_
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 98b898a1de8f..a5dd2932b852 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -39,14 +39,15 @@ struct intel_qgv_info {
u8 deinterleave;
};
-static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dclk_ratio, dclk_reference;
u32 val;
- val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
+ val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
if (val & DG1_QCLK_REFERENCE)
dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
@@ -54,18 +55,18 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
- val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
if (val & DG1_GEAR_TYPE)
sp->dclk *= 2;
if (sp->dclk == 0)
return -EINVAL;
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
@@ -74,22 +75,23 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int icl_pcode_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0, val2 = 0;
u16 dclk;
int ret;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
&val, &val2);
if (ret)
return ret;
dclk = val & 0xffff;
- sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0),
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -102,14 +104,15 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
- struct intel_psf_gv_point *points)
+static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
+ struct intel_psf_gv_point *points)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0;
int ret;
int i;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -122,10 +125,10 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+static u16 icl_qgv_points_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 qgv_points = 0, psf_points = 0;
/*
@@ -142,49 +145,51 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
}
-static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
{
- return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
ICL_PCODE_REQ_QGV_PT_MASK);
}
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+int icl_pcode_restrict_qgv_points(struct intel_display *display,
u32 points_mask)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
1);
if (ret < 0) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to disable qgv points (0x%x) points: 0x%x\n",
ret, points_mask);
return ret;
}
- dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ display->sagv.status = is_sagv_enabled(display, points_mask) ?
I915_SAGV_ENABLED : I915_SAGV_DISABLED;
return 0;
}
-static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int mtl_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp, int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val, val2;
u16 dclk;
- val = intel_uncore_read(&dev_priv->uncore,
+ val = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
- val2 = intel_uncore_read(&dev_priv->uncore,
+ val2 = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
@@ -200,29 +205,30 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
}
static int
-intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+intel_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
- if (DISPLAY_VER(dev_priv) >= 14)
- return mtl_read_qgv_point_info(dev_priv, sp, point);
- else if (IS_DG1(dev_priv))
- return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_read_qgv_point_info(display, sp, point);
+ else if (display->platform.dg1)
+ return dg1_mchbar_read_qgv_point_info(display, sp, point);
else
- return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+ return icl_pcode_read_qgv_point_info(display, sp, point);
}
-static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+static int icl_get_qgv_points(struct intel_display *display,
struct intel_qgv_info *qi,
bool is_y_tile)
{
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct dram_info *dram_info = &i915->dram_info;
int i, ret;
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = 4;
@@ -251,7 +257,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
MISSING_CASE(dram_info->type);
return -EINVAL;
}
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = is_y_tile ? 8 : 4;
@@ -266,7 +272,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = is_y_tile ? 1 : 2;
break;
case INTEL_DRAM_LPDDR4:
- if (IS_ROCKETLAKE(dev_priv)) {
+ if (display->platform.rocketlake) {
qi->t_bl = 8;
qi->max_numchannels = 4;
qi->channel_width = 32;
@@ -285,39 +291,39 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->max_numchannels = 1;
break;
}
- } else if (DISPLAY_VER(dev_priv) == 11) {
- qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
+ } else if (DISPLAY_VER(display) == 11) {
+ qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
qi->num_points = ARRAY_SIZE(qi->points);
for (i = 0; i < qi->num_points; i++) {
struct intel_qgv_point *sp = &qi->points[i];
- ret = intel_read_qgv_point_info(dev_priv, sp, i);
+ ret = intel_read_qgv_point_info(display, sp, i);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
+ drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
return ret;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
sp->t_rcd, sp->t_rc);
}
if (qi->num_psf_points > 0) {
- ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points);
+ ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
if (ret) {
- drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
+ drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
qi->num_psf_points = 0;
}
for (i = 0; i < qi->num_psf_points; i++)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSF GV %d: CLK=%d \n",
i, qi->psf_points[i].clk);
}
@@ -405,20 +411,28 @@ static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
/* Other values not used by simplified algorithm */
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static const struct intel_sa_info xe3lpd_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 65, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, i915->dram_info.num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -429,7 +443,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
int clpchgroup;
int j;
@@ -456,7 +470,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
@@ -467,44 +481,45 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ const struct dram_info *dram_info = &i915->dram_info;
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- if (DISPLAY_VER(dev_priv) < 14 &&
+ if (DISPLAY_VER(display) < 14 &&
(dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
num_channels *= 2;
qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
- if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
+ if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
- if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels)
- drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
+ if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
+ drm_warn(display->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
@@ -521,7 +536,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
@@ -529,7 +544,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
if (i < num_groups - 1) {
- bi_next = &dev_priv->display.bw.max[i + 1];
+ bi_next = &display->bw.max[i + 1];
if (clpchgroup < clperchgroup)
bi_next->num_planes = (ipqdepth - clpchgroup) /
@@ -561,7 +576,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
num_channels *
qi.channel_width, 8);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
i, j, bi->num_planes, bi->deratedbw[j],
bi->peakbw[j]);
@@ -572,7 +587,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / PSF GV %d: num_planes=%d bw=%u\n",
i, j, bi->num_planes, bi->psf_bw[j]);
}
@@ -584,17 +599,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static void dg2_get_bw_info(struct drm_i915_private *i915)
+static void dg2_get_bw_info(struct intel_display *display)
{
- unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
- int num_groups = ARRAY_SIZE(i915->display.bw.max);
+ unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i;
/*
@@ -605,7 +620,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
* whereas DG2-G11 platforms have 38 GB/s.
*/
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &i915->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
bi->num_planes = 1;
/* Need only one dummy QGV point per group */
@@ -613,20 +628,21 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
bi->deratedbw[0] = deratedbw;
}
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
}
-static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
+static int xe2_hpd_get_bw_info(struct intel_display *display,
const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
int num_channels = i915->dram_info.num_channels;
int peakbw, maxdebw;
int ret, i;
- ret = icl_get_qgv_points(i915, &qi, true);
+ ret = icl_get_qgv_points(display, &qi, true);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -638,33 +654,33 @@ static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
const struct intel_qgv_point *point = &qi.points[i];
int bw = num_channels * (qi.channel_width / 8) * point->dclk;
- i915->display.bw.max[0].deratedbw[i] =
+ display->bw.max[0].deratedbw[i] =
min(maxdebw, (100 - sa->derating) * bw / 100);
- i915->display.bw.max[0].peakbw[i] = bw;
+ display->bw.max[0].peakbw[i] = bw;
- drm_dbg_kms(&i915->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
- i, i915->display.bw.max[0].deratedbw[i],
- i915->display.bw.max[0].peakbw[i]);
+ drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
+ i, display->bw.max[0].deratedbw[i],
+ display->bw.max[0].peakbw[i]);
}
/* Bandwidth does not depend on # of planes; set all groups the same */
- i915->display.bw.max[0].num_planes = 1;
- i915->display.bw.max[0].num_qgv_points = qi.num_points;
- for (i = 1; i < ARRAY_SIZE(i915->display.bw.max); i++)
- memcpy(&i915->display.bw.max[i], &i915->display.bw.max[0],
- sizeof(i915->display.bw.max[0]));
+ display->bw.max[0].num_planes = 1;
+ display->bw.max[0].num_qgv_points = qi.num_points;
+ for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
+ memcpy(&display->bw.max[i], &display->bw.max[0],
+ sizeof(display->bw.max[0]));
/*
* Xe2_HPD should always have exactly two QGV points representing
* battery and plugged-in operation.
*/
- drm_WARN_ON(&i915->drm, qi.num_points != 2);
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ drm_WARN_ON(display->drm, qi.num_points != 2);
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int icl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -674,9 +690,9 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
+ for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -692,7 +708,7 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
return UINT_MAX;
}
-static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int tgl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -702,9 +718,9 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -720,57 +736,59 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
return 0;
}
-static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
+static unsigned int adl_psf_bw(struct intel_display *display,
int psf_gv_point)
{
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[0];
+ &display->bw.max[0];
return bi->psf_bw[psf_gv_point];
}
-static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+static unsigned int icl_qgv_bw(struct intel_display *display,
int num_active_planes, int qgv_point)
{
unsigned int idx;
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ if (DISPLAY_VER(display) >= 12)
+ idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
else
- idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+ idx = icl_max_bw_index(display, num_active_planes, qgv_point);
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ if (idx >= ARRAY_SIZE(display->bw.max))
return 0;
- return i915->display.bw.max[idx].deratedbw[qgv_point];
+ return display->bw.max[idx].deratedbw[qgv_point];
}
-void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+void intel_bw_init_hw(struct intel_display *display)
{
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ const struct dram_info *dram_info = &to_i915(display->drm)->dram_info;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv) &&
+ if (DISPLAY_VER(display) >= 30)
+ tgl_get_bw_info(display, &xe3lpd_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
dram_info->type == INTEL_DRAM_GDDR_ECC)
- xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_ecc_sa_info);
- else if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
- xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
- else if (DISPLAY_VER(dev_priv) >= 14)
- tgl_get_bw_info(dev_priv, &mtl_sa_info);
- else if (IS_DG2(dev_priv))
- dg2_get_bw_info(dev_priv);
- else if (IS_ALDERLAKE_P(dev_priv))
- tgl_get_bw_info(dev_priv, &adlp_sa_info);
- else if (IS_ALDERLAKE_S(dev_priv))
- tgl_get_bw_info(dev_priv, &adls_sa_info);
- else if (IS_ROCKETLAKE(dev_priv))
- tgl_get_bw_info(dev_priv, &rkl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 12)
- tgl_get_bw_info(dev_priv, &tgl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 11)
- icl_get_bw_info(dev_priv, &icl_sa_info);
+ xe2_hpd_get_bw_info(display, &xe2_hpd_ecc_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
+ xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info);
+ else if (DISPLAY_VER(display) >= 14)
+ tgl_get_bw_info(display, &mtl_sa_info);
+ else if (display->platform.dg2)
+ dg2_get_bw_info(display);
+ else if (display->platform.alderlake_p)
+ tgl_get_bw_info(display, &adlp_sa_info);
+ else if (display->platform.alderlake_s)
+ tgl_get_bw_info(display, &adls_sa_info);
+ else if (display->platform.rocketlake)
+ tgl_get_bw_info(display, &rkl_sa_info);
+ else if (DISPLAY_VER(display) == 12)
+ tgl_get_bw_info(display, &tgl_sa_info);
+ else if (DISPLAY_VER(display) == 11)
+ icl_get_bw_info(display, &icl_sa_info);
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -784,8 +802,8 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -799,7 +817,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
data_rate += crtc_state->data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->data_rate_y[plane_id];
}
@@ -807,39 +825,38 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
}
/* "Maximum Pipe Read Bandwidth" */
-static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_bw_crtc_min_cdclk(struct intel_display *display,
+ unsigned int data_rate)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
return 0;
- return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
}
-static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_num_active_planes(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int num_active_planes = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
num_active_planes += bw_state->num_active_planes[pipe];
return num_active_planes;
}
-static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_data_rate(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int data_rate = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
+ if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
@@ -848,10 +865,10 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -859,10 +876,10 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -870,27 +887,27 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
return to_intel_bw_state(bw_state);
}
-static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
int num_active_planes)
{
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int max_bw_point = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate =
- icl_qgv_bw(i915, num_active_planes, i);
+ icl_qgv_bw(display, num_active_planes, i);
/*
* We need to know which qgv point gives us
@@ -909,23 +926,23 @@ static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
return max_bw_point;
}
-static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
unsigned int qgv_points,
unsigned int psf_points)
{
return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
}
-static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
unsigned int max_bw_point_mask = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate > max_bw) {
max_bw_point_mask = BIT(i);
@@ -938,29 +955,29 @@ static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
return max_bw_point_mask;
}
-static void icl_force_disable_sagv(struct drm_i915_private *i915,
+static void icl_force_disable_sagv(struct intel_display *display,
struct intel_bw_state *bw_state)
{
- unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
- unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
- bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
- drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
bw_state->qgv_points_mask);
- icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+ icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
}
-static int mtl_find_qgv_points(struct drm_i915_private *i915,
+static int mtl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
struct intel_bw_state *new_bw_state)
{
unsigned int best_rate = UINT_MAX;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int qgv_peak_bw = 0;
int i;
int ret;
@@ -974,9 +991,9 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
* not enabled. PM Demand code will clamp the value for the register
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
+ if (!intel_can_enable_sagv(display, new_bw_state)) {
new_bw_state->qgv_point_peakbw = U16_MAX;
- drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw.");
+ drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
return 0;
}
@@ -986,27 +1003,27 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
*/
for (i = 0; i < num_qgv_points; i++) {
unsigned int bw_index =
- tgl_max_bw_index(i915, num_active_planes, i);
+ tgl_max_bw_index(display, num_active_planes, i);
unsigned int max_data_rate;
- if (bw_index >= ARRAY_SIZE(i915->display.bw.max))
+ if (bw_index >= ARRAY_SIZE(display->bw.max))
continue;
- max_data_rate = i915->display.bw.max[bw_index].deratedbw[i];
+ max_data_rate = display->bw.max[bw_index].deratedbw[i];
if (max_data_rate < data_rate)
continue;
if (max_data_rate - data_rate < best_rate) {
best_rate = max_data_rate - data_rate;
- qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i];
+ qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
}
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
i, max_data_rate, data_rate, qgv_peak_bw);
}
- drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
+ drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
qgv_peak_bw, data_rate);
/*
@@ -1014,7 +1031,7 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* satisfying the required data rate is found
*/
if (qgv_peak_bw == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
+ drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
@@ -1025,14 +1042,14 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int icl_find_qgv_points(struct drm_i915_private *i915,
+static int icl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 psf_points = 0;
u16 qgv_points = 0;
int i;
@@ -1043,22 +1060,22 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int max_data_rate = icl_qgv_bw(i915,
+ unsigned int max_data_rate = icl_qgv_bw(display,
num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
}
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate >= data_rate)
psf_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d"
+ drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
" required %d\n",
i, max_data_rate, data_rate);
}
@@ -1069,14 +1086,14 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* reasons.
*/
if (qgv_points == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
if (num_psf_gv_points > 0 && psf_points == 0) {
- drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
@@ -1087,9 +1104,9 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ if (!intel_can_enable_sagv(display, new_bw_state)) {
+ qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
+ drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
qgv_points);
}
@@ -1097,7 +1114,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
/*
@@ -1113,80 +1130,90 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int intel_bw_check_qgv_points(struct drm_i915_private *i915,
+static int intel_bw_check_qgv_points(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state);
+ unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
unsigned int num_active_planes =
- intel_bw_num_active_planes(i915, new_bw_state);
+ intel_bw_num_active_planes(display, new_bw_state);
data_rate = DIV_ROUND_UP(data_rate, 1000);
- if (DISPLAY_VER(i915) >= 14)
- return mtl_find_qgv_points(i915, data_rate, num_active_planes,
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_find_qgv_points(display, data_rate, num_active_planes,
new_bw_state);
else
- return icl_find_qgv_points(i915, data_rate, num_active_planes,
+ return icl_find_qgv_points(display, data_rate, num_active_planes,
old_bw_state, new_bw_state);
}
-static bool intel_bw_state_changed(struct drm_i915_private *i915,
+static bool intel_dbuf_bw_changed(struct intel_display *display,
+ const struct intel_dbuf_bw *old_dbuf_bw,
+ const struct intel_dbuf_bw *new_dbuf_bw)
+{
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
+ old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_bw_state_changed(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
const struct intel_bw_state *new_bw_state)
{
enum pipe pipe;
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *old_crtc_bw =
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *old_dbuf_bw =
&old_bw_state->dbuf_bw[pipe];
- const struct intel_dbuf_bw *new_crtc_bw =
+ const struct intel_dbuf_bw *new_dbuf_bw =
&new_bw_state->dbuf_bw[pipe];
- enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
- if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
- old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
- return true;
- }
+ if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
+ return true;
- if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
+ if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
+ intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
return true;
}
return false;
}
-static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
struct intel_crtc *crtc,
enum plane_id plane_id,
const struct skl_ddb_entry *ddb,
unsigned int data_rate)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
- unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
enum dbuf_slice slice;
/*
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
- crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
- crtc_bw->active_planes[slice] |= BIT(plane_id);
+ for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
+ dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
+ dbuf_bw->active_planes[slice] |= BIT(plane_id);
}
}
-static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
enum plane_id plane_id;
- memset(crtc_bw, 0, sizeof(*crtc_bw));
+ memset(dbuf_bw, 0, sizeof(*dbuf_bw));
if (!crtc_state->hw.active)
return;
@@ -1199,12 +1226,12 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
if (plane_id == PLANE_CURSOR)
continue;
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb[plane_id],
crtc_state->data_rate[plane_id]);
- if (DISPLAY_VER(i915) < 11)
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ if (DISPLAY_VER(display) < 11)
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb_y[plane_id],
crtc_state->data_rate[plane_id]);
}
@@ -1212,13 +1239,13 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
/* "Maximum Data Buffer Bandwidth" */
static int
-intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
+intel_bw_dbuf_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int total_max_bw = 0;
enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
+ for_each_dbuf_slice(display, slice) {
int num_active_planes = 0;
unsigned int max_bw = 0;
enum pipe pipe;
@@ -1227,11 +1254,11 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
- max_bw = max(crtc_bw->max_bw[slice], max_bw);
- num_active_planes += hweight8(crtc_bw->active_planes[slice]);
+ max_bw = max(dbuf_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
}
max_bw *= num_active_planes;
@@ -1241,16 +1268,18 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
return DIV_ROUND_UP(total_max_bw, 64);
}
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
enum pipe pipe;
int min_cdclk;
- min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
+ min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
- for_each_pipe(i915, pipe)
- min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
+ for_each_pipe(display, pipe)
+ min_cdclk = max(min_cdclk,
+ intel_bw_crtc_min_cdclk(display,
+ bw_state->data_rate[pipe]));
return min_cdclk;
}
@@ -1258,42 +1287,49 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915,
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_bw_state *new_bw_state = NULL;
const struct intel_bw_state *old_bw_state = NULL;
const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *crtc_state;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
int old_min_cdclk, new_min_cdclk;
struct intel_crtc *crtc;
int i;
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
+
+ skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
+ skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
+
+ if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
+ continue;
+
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
- skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
-
- new_bw_state->min_cdclk[crtc->pipe] =
- intel_bw_crtc_min_cdclk(crtc_state);
+ new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
}
if (!old_bw_state)
return 0;
- if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
+ if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
int ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
}
- old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
- new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
+ old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
+ new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
/*
* No need to check against the cdclk state if
@@ -1321,7 +1357,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
return 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
new_min_cdclk, cdclk_state->bw_min_cdclk);
*need_cdclk_calc = true;
@@ -1331,7 +1367,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -1365,7 +1401,7 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
*changed = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] data rate %u num active planes %u\n",
crtc->base.base.id, crtc->base.name,
new_bw_state->data_rate[crtc->pipe],
@@ -1375,16 +1411,103 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
return 0;
}
-int intel_bw_atomic_check(struct intel_atomic_state *state)
+static int intel_bw_modeset_checks(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state;
+ struct intel_bw_state *new_bw_state;
+
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ int ret;
+
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_bw_state *old_bw_state = NULL;
+ struct intel_bw_state *new_bw_state = NULL;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_can_enable_sagv(old_crtc_state) ==
+ intel_crtc_can_enable_sagv(new_crtc_state))
+ continue;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ if (intel_can_enable_sagv(display, new_bw_state) !=
+ intel_can_enable_sagv(display, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
+{
+ struct intel_display *display = to_intel_display(state);
bool changed = false;
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state;
const struct intel_bw_state *old_bw_state;
int ret;
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ if (any_ms) {
+ ret = intel_bw_modeset_checks(state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_bw_check_sagv_mask(state);
+ if (ret)
+ return ret;
+
/* FIXME earlier gens need some checks too */
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return 0;
ret = intel_bw_check_data_rate(state, &changed);
@@ -1395,9 +1518,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- (intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state) ||
- new_bw_state->force_check_qgv))
+ intel_can_enable_sagv(display, old_bw_state) !=
+ intel_can_enable_sagv(display, new_bw_state))
changed = true;
/*
@@ -1407,28 +1529,25 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (!changed)
return 0;
- ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state);
+ ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
if (ret)
return ret;
- new_bw_state->force_check_qgv = false;
-
return 0;
}
static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
- bw_state->force_check_qgv = true;
- drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
bw_state->data_rate[crtc->pipe],
bw_state->num_active_planes[crtc->pipe]);
@@ -1444,6 +1563,7 @@ void intel_bw_update_hw_state(struct intel_display *display)
return;
bw_state->active_pipes = 0;
+ bw_state->pipe_sagv_reject = 0;
for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
@@ -1455,6 +1575,11 @@ void intel_bw_update_hw_state(struct intel_display *display)
if (DISPLAY_VER(display) >= 11)
intel_bw_crtc_update(bw_state, crtc_state);
+
+ skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
+
+ /* initially SAGV has been forced off */
+ bw_state->pipe_sagv_reject |= BIT(pipe);
}
}
@@ -1470,6 +1595,7 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
+ memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
}
static struct intel_global_state *
@@ -1495,9 +1621,8 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *i915)
+int intel_bw_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_bw_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -1511,8 +1636,8 @@ int intel_bw_init(struct drm_i915_private *i915)
* Limit this only if we have SAGV. And for Display version 14 onwards
* sagv is handled though pmdemand requests
*/
- if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
- icl_force_disable_sagv(i915, state);
+ if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
+ icl_force_disable_sagv(display, state);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 3313e4eac4f0..eb2cc883e9c1 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -12,7 +12,6 @@
#include "intel_display_power.h"
#include "intel_global_state.h"
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -49,13 +48,6 @@ struct intel_bw_state {
*/
u16 qgv_points_mask;
- /*
- * Flag to force the QGV comparison in atomic check right after the
- * hw state readout
- */
- bool force_check_qgv;
-
- int min_cdclk[I915_MAX_PIPES];
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
};
@@ -72,14 +64,14 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state);
-void intel_bw_init_hw(struct drm_i915_private *dev_priv);
-int intel_bw_init(struct drm_i915_private *dev_priv);
-int intel_bw_atomic_check(struct intel_atomic_state *state);
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+void intel_bw_init_hw(struct intel_display *display);
+int intel_bw_init(struct intel_display *display);
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
+int icl_pcode_restrict_qgv_points(struct intel_display *display,
u32 points_mask);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc);
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state);
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 2a8749a0213e..b1718b491ffd 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1972,9 +1972,7 @@ int intel_mdclk_cdclk_ratio(struct intel_display *display,
static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
intel_mdclk_cdclk_ratio(display, cdclk_config),
cdclk_config->joined_mbus);
}
@@ -2808,7 +2806,6 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
static int intel_compute_min_cdclk(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
const struct intel_bw_state *bw_state;
@@ -2836,7 +2833,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
bw_state = intel_atomic_get_new_bw_state(state);
if (bw_state) {
- min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state);
+ min_cdclk = intel_bw_min_cdclk(display, bw_state);
if (cdclk_state->bw_min_cdclk != min_cdclk) {
int ret;
@@ -3342,6 +3339,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
void intel_cdclk_update_hw_state(struct intel_display *display)
{
+ const struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(display->cdclk.obj.state);
struct intel_crtc *crtc;
@@ -3359,6 +3358,8 @@ void intel_cdclk_update_hw_state(struct intel_display *display)
cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
}
+
+ cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state);
}
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3493,7 +3494,6 @@ static int dg1_rawclk(struct intel_display *display)
static int cnp_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int divider, fraction;
u32 rawclk;
@@ -3513,7 +3513,7 @@ static int cnp_rawclk(struct intel_display *display)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
@@ -3552,21 +3552,20 @@ static int i9xx_hrawclk(struct intel_display *display)
*/
u32 intel_read_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 freq;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
/*
* MTL always uses a 38.4 MHz rawclk. The bspec tells us
* "RAWCLK_FREQ defaults to the values for 38.4 and does
* not need to be programmed."
*/
freq = 38400;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
freq = dg1_rawclk(display);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_CNP)
freq = cnp_rawclk(display);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
freq = pch_rawclk(display);
else if (display->platform.valleyview || display->platform.cherryview)
freq = vlv_hrawclk(display);
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.c b/drivers/gpu/drm/i915/display/intel_cmtg.c
index 07d7f4e8f60f..82606ebae1de 100644
--- a/drivers/gpu/drm/i915/display/intel_cmtg.c
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.c
@@ -9,7 +9,6 @@
#include <drm/drm_device.h>
#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crtc.h"
#include "intel_cmtg.h"
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index cfe14162231d..98dddf72c0eb 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -22,7 +22,9 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
@@ -405,14 +407,13 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state)
static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
/* icl+ have dedicated output CSC */
if (DISPLAY_VER(display) >= 11)
return false;
/* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
- if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915))
+ if (DISPLAY_VER(display) < 7 || display->platform.ivybridge)
return false;
return crtc_state->limited_color_range;
@@ -516,7 +517,6 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->hw.ctm) {
@@ -538,7 +538,7 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915));
+ drm_WARN_ON(display->drm, !display->platform.geminilake);
ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity);
} else {
@@ -3983,12 +3983,10 @@ int intel_color_init(struct intel_display *display)
void intel_color_init_hooks(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (HAS_GMCH(display)) {
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
display->funcs.color = &chv_color_funcs;
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
display->funcs.color = &vlv_color_funcs;
else if (DISPLAY_VER(display) >= 4)
display->funcs.color = &i965_color_funcs;
@@ -4005,7 +4003,7 @@ void intel_color_init_hooks(struct intel_display *display)
display->funcs.color = &skl_color_funcs;
else if (DISPLAY_VER(display) == 8)
display->funcs.color = &bdw_color_funcs;
- else if (IS_HASWELL(i915))
+ else if (display->platform.haswell)
display->funcs.color = &hsw_color_funcs;
else if (DISPLAY_VER(display) == 7)
display->funcs.color = &ivb_color_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 17eea244cc83..f5cc38dbe559 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,6 +3,8 @@
* Copyright © 2018 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_combo_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index e42357bd9e80..6c81c9f2fd09 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -31,8 +31,10 @@
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_connector.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
@@ -154,13 +156,14 @@ void intel_connector_destroy(struct drm_connector *connector)
int intel_connector_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_i915_private *i915 = to_i915(connector->dev);
int ret;
ret = intel_backlight_device_register(intel_connector);
if (ret)
goto err;
- if (i915_inject_probe_failure(to_i915(connector->dev))) {
+ if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
goto err_backlight;
}
@@ -204,10 +207,10 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
+ struct intel_display *display = to_intel_display(connector);
- drm_WARN_ON(dev,
- !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
if (!connector->base.state->crtc)
return INVALID_PIPE;
@@ -264,20 +267,19 @@ static const struct drm_prop_enum_list force_audio_names[] = {
void
intel_attach_force_audio_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.force_audio;
+ prop = display->properties.force_audio;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, 0,
- "audio",
- force_audio_names,
- ARRAY_SIZE(force_audio_names));
+ prop = drm_property_create_enum(display->drm, 0,
+ "audio",
+ force_audio_names,
+ ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
- dev_priv->display.properties.force_audio = prop;
+ display->properties.force_audio = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
@@ -291,20 +293,19 @@ static const struct drm_prop_enum_list broadcast_rgb_names[] = {
void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.broadcast_rgb;
+ prop = display->properties.broadcast_rgb;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Broadcast RGB",
- broadcast_rgb_names,
- ARRAY_SIZE(broadcast_rgb_names));
+ prop = drm_property_create_enum(display->drm, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
- dev_priv->display.properties.broadcast_rgb = prop;
+ display->properties.broadcast_rgb = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
@@ -336,14 +337,14 @@ intel_attach_dp_colorspace_property(struct drm_connector *connector)
void
intel_attach_scaling_mode_property(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
u32 scaling_modes;
scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) |
BIT(DRM_MODE_SCALE_FULLSCREEN);
/* On GMCH platforms borders are only possible on the LVDS port */
- if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (!HAS_GMCH(display) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
scaling_modes |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, scaling_modes);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 76ffb3f8467c..38b50a779b6b 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -31,9 +31,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_connector.h"
@@ -91,13 +91,12 @@ static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
bool intel_crt_port_enabled(struct intel_display *display,
i915_reg_t adpa_reg, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
val = intel_de_read(display, adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK_CPT, val);
else
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK, val);
@@ -177,7 +176,6 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
int mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -194,14 +192,14 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev_priv))
+ else if (HAS_PCH_CPT(display))
adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else
adpa |= ADPA_PIPE_SEL(crtc->pipe);
- if (!HAS_PCH_SPLIT(dev_priv))
+ if (!HAS_PCH_SPLIT(display))
intel_de_write(display, BCLRPAT(display, crtc->pipe), 0);
switch (mode) {
@@ -356,7 +354,6 @@ intel_crt_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -368,9 +365,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
max_clock = 180000;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
@@ -387,7 +384,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
- if (HAS_PCH_LPT(dev_priv) &&
+ if (HAS_PCH_LPT(display) &&
ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
return MODE_CLOCK_HIGH;
@@ -438,7 +435,6 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -457,7 +453,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (crtc_state->bw_constrained && crtc_state->pipe_bpp < 24) {
drm_dbg_kms(display->drm,
@@ -482,13 +478,12 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 adpa;
bool ret;
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
- bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
+ bool turn_off_dac = HAS_PCH_SPLIT(display);
u32 save_adpa;
crt->force_hotplug_required = false;
@@ -532,8 +527,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- bool reenable_hpd;
u32 adpa;
bool ret;
u32 save_adpa;
@@ -550,7 +543,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*
* Just disable HPD interrupts here to prevent this
*/
- reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_block(&crt->base);
save_adpa = adpa = intel_de_read(display, crt->adpa_reg);
drm_dbg_kms(display->drm,
@@ -577,8 +570,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
drm_dbg_kms(display->drm,
"valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
- if (reenable_hpd)
- intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_clear_and_unblock(&crt->base);
return ret;
}
@@ -586,15 +578,14 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 stat;
bool ret = false;
int i, tries = 0;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
return ilk_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return valleyview_crt_detect_hotplug(connector);
/*
@@ -602,14 +593,14 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G45(dev_priv))
+ if (display->platform.g45)
tries = 2;
else
tries = 1;
for (i = 0; i < tries ; i++) {
/* turn on the FORCE_DETECT */
- i915_hotplug_interrupt_update(dev_priv,
+ i915_hotplug_interrupt_update(display,
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
@@ -627,7 +618,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, PORT_HOTPLUG_STAT(display),
CRT_HOTPLUG_INT_STATUS);
- i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
+ i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, 0);
return ret;
}
@@ -880,7 +871,7 @@ intel_crt_detect(struct drm_connector *connector,
wakeref = intel_display_power_get(display, encoder->power_domain);
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -904,7 +895,7 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
status = connector_status_disconnected;
goto out;
}
@@ -943,7 +934,6 @@ out:
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *encoder = &crt->base;
intel_wakeref_t wakeref;
@@ -956,7 +946,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
wakeref = intel_display_power_get(display, encoder->power_domain);
ret = intel_crt_ddc_get_modes(connector, connector->ddc);
- if (ret || !IS_G4X(dev_priv))
+ if (ret || !display->platform.g4x)
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -1015,16 +1005,15 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector;
struct intel_crt *crt;
i915_reg_t adpa_reg;
u8 ddc_pin;
u32 adpa;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@@ -1072,7 +1061,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI);
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
crt->base.pipe_mask = BIT(PIPE_A);
else
crt->base.pipe_mask = ~0;
@@ -1084,7 +1073,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
- if (I915_HAS_HOTPLUG(display) &&
+ if (HAS_HOTPLUG(display) &&
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
@@ -1112,7 +1101,7 @@ void intel_crt_init(struct intel_display *display)
intel_ddi_buf_trans_init(&crt->base);
} else {
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
crt->base.compute_config = pch_crt_compute_config;
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
@@ -1134,7 +1123,7 @@ void intel_crt_init(struct intel_display *display)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 5b2603ef2ff7..29cfc38f12e0 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -124,7 +124,7 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- crtc->block_dc_for_vblank = intel_psr_needs_block_dc_vblank(crtc_state);
+ crtc->vblank_psr_notify = intel_psr_needs_vblank_notification(crtc_state);
assert_vblank_disabled(&crtc->base);
drm_crtc_set_max_vblank_count(&crtc->base,
@@ -154,9 +154,9 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
drm_crtc_vblank_off(&crtc->base);
assert_vblank_disabled(&crtc->base);
- crtc->block_dc_for_vblank = false;
+ crtc->vblank_psr_notify = false;
- flush_work(&display->irq.vblank_dc_work);
+ flush_work(&display->irq.vblank_notify_work);
}
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
@@ -305,7 +305,6 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
int intel_crtc_init(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_plane *primary, *cursor;
const struct drm_crtc_funcs *funcs;
struct intel_crtc *crtc;
@@ -333,7 +332,7 @@ int intel_crtc_init(struct intel_display *display, enum pipe pipe)
for_each_sprite(display, pipe, sprite) {
struct intel_plane *plane;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
plane = skl_universal_plane_create(display, pipe, PLANE_2 + sprite);
else
plane = intel_sprite_plane_create(display, pipe, sprite);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 599ddce96371..0c7f91046996 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -5,9 +5,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
#include "intel_vblank.h"
@@ -42,13 +43,13 @@ intel_dump_m_n_config(struct drm_printer *p,
}
static void
-intel_dump_infoframe(struct drm_i915_private *i915,
+intel_dump_infoframe(struct intel_display *display,
const union hdmi_infoframe *frame)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
+ hdmi_infoframe_log(KERN_DEBUG, display->drm->dev, frame);
}
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
@@ -136,7 +137,7 @@ static void intel_dump_plane_state(struct drm_printer *p,
}
static void
-ilk_dump_csc(struct drm_i915_private *i915,
+ilk_dump_csc(struct intel_display *display,
struct drm_printer *p,
const char *name,
const struct intel_csc_matrix *csc)
@@ -152,7 +153,7 @@ ilk_dump_csc(struct drm_i915_private *i915,
csc->coeff[3 * i + 1],
csc->coeff[3 * i + 2]);
- if (DISPLAY_VER(i915) < 7)
+ if (DISPLAY_VER(display) < 7)
return;
drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
@@ -178,7 +179,6 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
{
struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
struct drm_printer p;
@@ -188,7 +188,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
crtc->base.base.id, crtc->base.name,
@@ -262,19 +262,19 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
- intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.avi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
- intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
+ intel_dump_infoframe(display, &pipe_config->infoframes.spd);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
- intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.hdmi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
@@ -294,8 +294,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
pipe_config->hw.adjusted_mode.crtc_vdisplay,
pipe_config->framestart_delay, pipe_config->msa_timing_delay);
- drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
+ drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
+ str_yes_no(intel_vrr_is_fixed_rr(pipe_config)),
pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end);
@@ -319,14 +320,14 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id,
pipe_config->hw.scaling_filter);
- if (HAS_GMCH(i915))
+ if (HAS_GMCH(display))
drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -343,7 +344,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_dpll_dump_hw_state(display, &p, &pipe_config->dpll_hw_state);
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
pipe_config->cgm_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
@@ -354,20 +355,20 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
- i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ display->color.glk_linear_degamma_lut ? "(linear) " : "",
pipe_config->pre_csc_lut ?
drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
pipe_config->post_csc_lut ?
drm_color_lut_size(pipe_config->post_csc_lut) : 0);
- if (DISPLAY_VER(i915) >= 11)
- ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
+ if (DISPLAY_VER(display) >= 11)
+ ilk_dump_csc(display, &p, "output csc", &pipe_config->output_csc);
- if (!HAS_GMCH(i915))
- ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
- else if (IS_CHERRYVIEW(i915))
+ if (!HAS_GMCH(display))
+ ilk_dump_csc(display, &p, "pipe csc", &pipe_config->csc);
+ else if (display->platform.cherryview)
vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
intel_vdsc_state_dump(&p, 0, pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 3276a5b4a9b0..2fec5ba58373 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -9,10 +9,11 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_cursor.h"
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 22595766eac5..a82b93cbc81d 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -6,8 +6,10 @@
#include <linux/log2.h>
#include <linux/math64.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -2761,9 +2763,9 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
if (!is_dp && is_hdmi_frl(port_clock))
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
@@ -2774,7 +2776,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
XELPDP_SSC_ENABLE_PLLB, val);
}
@@ -3097,10 +3099,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- if (DISPLAY_VER(display) >= 30)
- clock = REG_FIELD_GET(XE3_DDI_CLOCK_SELECT_MASK, val);
- else
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
@@ -3168,13 +3167,9 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
* clock muxes, gating and SSC
*/
- if (DISPLAY_VER(display) >= 30) {
- mask = XE3_DDI_CLOCK_SELECT_MASK;
- val |= XE3_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
- } else {
- mask = XELPDP_DDI_CLOCK_SELECT_MASK;
- val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
- }
+ mask = XELPDP_DDI_CLOCK_SELECT_MASK(display);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display,
+ intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
mask |= XELPDP_FORWARD_CLOCK_UNGATE;
val |= XELPDP_FORWARD_CLOCK_UNGATE;
@@ -3287,7 +3282,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK, 0);
+ XELPDP_DDI_CLOCK_SELECT_MASK(display), 0);
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_FORWARD_CLOCK_UNGATE, 0);
@@ -3336,7 +3331,7 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
* 5. Program PORT CLOCK CTRL register to disable and gate clocks
*/
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) |
XELPDP_FORWARD_CLOCK_UNGATE, 0);
/* 6. Program DDI_CLK_VALFREQ to 0. */
@@ -3365,7 +3360,7 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
* handling is done via the standard shared DPLL framework.
*/
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK)
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 960f7f778fb8..59c22beaf1de 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -192,10 +192,17 @@
#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19)
#define XELPDP_TBT_CLOCK_ACK REG_BIT(18)
-#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
-#define XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
-#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
-#define XE3_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XE3_DDI_CLOCK_SELECT_MASK, val)
+#define _XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
+#define _XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
+#define XELPDP_DDI_CLOCK_SELECT_MASK(display) (DISPLAY_VER(display) >= 30 ? \
+ _XE3_DDI_CLOCK_SELECT_MASK : _XELPDP_DDI_CLOCK_SELECT_MASK)
+#define XELPDP_DDI_CLOCK_SELECT_PREP(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_PREP(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_PREP(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+#define XELPDP_DDI_CLOCK_SELECT_GET(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_GET(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_GET(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+
#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0
#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8
#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index f38c998935b9..74132c1d6385 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -30,11 +30,13 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_consumer.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "icl_dsi.h"
+#include "intel_alpm.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
#include "intel_backlight.h"
@@ -78,6 +80,7 @@
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -106,14 +109,14 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
return level;
}
-static bool has_buf_trans_select(struct drm_i915_private *i915)
+static bool has_buf_trans_select(struct intel_display *display)
{
- return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) < 10 && !display->platform.broxton;
}
-static bool has_iboost(struct drm_i915_private *i915)
+static bool has_iboost(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) == 9 && !display->platform.broxton;
}
/*
@@ -124,25 +127,25 @@ static bool has_iboost(struct drm_i915_private *i915)
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 iboost_bit = 0;
int i, n_entries;
enum port port = encoder->port;
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, i),
trans->entries[i].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, i),
trans->entries[i].hsw.trans2);
}
}
@@ -155,7 +158,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
u32 iboost_bit = 0;
int n_entries;
@@ -163,27 +166,25 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, 9),
trans->entries[level].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, 9),
trans->entries[level].hsw.trans2);
}
static i915_reg_t intel_ddi_buf_status_reg(struct intel_display *display, enum port port)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 14)
- return XELPDP_PORT_BUF_CTL1(i915, port);
+ return XELPDP_PORT_BUF_CTL1(display, port);
else
return DDI_BUF_CTL(port);
}
@@ -346,7 +347,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -359,14 +359,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
if (dig_port->ddi_a_4_lanes)
intel_dp->DP |= DDI_A_4_LANES;
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (intel_dp_is_uhbr(crtc_state))
intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
else
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
}
- if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
+ if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -379,8 +379,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
}
}
-static int icl_calc_tbt_pll_link(struct intel_display *display,
- enum port port)
+static int icl_calc_tbt_pll_link(struct intel_display *display, enum port port)
{
u32 val = intel_de_read(display, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
@@ -414,15 +413,14 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -445,7 +443,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ drm_WARN_ON(display->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
@@ -468,7 +466,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
- intel_de_write(dev_priv, TRANS_MSA_MISC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_MSA_MISC(display, cpu_transcoder),
temp);
}
@@ -507,8 +505,8 @@ static u32
intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
@@ -516,7 +514,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
temp |= TGL_TRANS_DDI_SELECT_PORT(port);
else
temp |= TRANS_DDI_SELECT_PORT(port);
@@ -578,7 +576,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_HDMI_SCRAMBLING;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
@@ -591,11 +589,11 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
master == INVALID_TRANSCODER);
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
@@ -604,7 +602,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
- if (IS_DISPLAY_VER(dev_priv, 8, 10) &&
+ if (IS_DISPLAY_VER(display, 8, 10) &&
crtc_state->master_transcoder != INVALID_TRANSCODER) {
u8 master_select =
bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
@@ -619,11 +617,10 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
u32 ctl2 = 0;
@@ -635,12 +632,12 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
PORT_SYNC_MODE_MASTER_SELECT(master_select);
}
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
ctl2);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
intel_ddi_transcoder_func_reg_val_get(encoder,
crtc_state));
}
@@ -654,8 +651,7 @@ void
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
@@ -663,7 +659,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
}
@@ -677,27 +673,26 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 11)
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
0);
- ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- if (IS_DISPLAY_VER(dev_priv, 8, 10))
+ if (IS_DISPLAY_VER(display, 8, 10))
ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
@@ -706,7 +701,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
if (intel_dp_mst_is_slave_trans(crtc_state))
@@ -725,17 +720,15 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool enable, u32 hdcp_mask)
{
struct intel_display *display = to_intel_display(intel_encoder);
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
wakeref = intel_display_power_get_if_enabled(display,
intel_encoder->power_domain);
- if (drm_WARN_ON(dev, !wakeref))
+ if (drm_WARN_ON(display->drm, !wakeref))
return -ENXIO;
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
hdcp_mask, enable ? hdcp_mask : 0);
intel_display_power_put(display, intel_encoder->power_domain, wakeref);
return ret;
@@ -744,7 +737,6 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct intel_display *display = to_intel_display(intel_connector);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
@@ -765,12 +757,12 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
- ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ ddi_mode = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) &
TRANS_DDI_MODE_SELECT_MASK;
if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI ||
@@ -804,7 +796,6 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
@@ -819,13 +810,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!wakeref)
return;
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ tmp = intel_de_read(display, DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP));
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A) {
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
@@ -846,7 +837,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
goto out;
}
- for_each_pipe(dev_priv, p) {
+ for_each_pipe(display, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
@@ -856,7 +847,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!trans_wakeref)
continue;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
port_mask = TGL_TRANS_DDI_PORT_MASK;
ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
} else {
@@ -864,8 +855,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
intel_display_power_put(display, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
@@ -883,12 +874,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If we don't have 8b/10b MST, but have more than one
@@ -901,12 +892,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
* can assume it's SST.
*/
if (hweight8(dp128b132b_pipe_mask) > 1 ||
- intel_dp_mst_encoder_active_links(dig_port))
+ intel_dp_mst_active_streams(intel_dp))
mst_pipe_mask = dp128b132b_pipe_mask;
}
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask);
@@ -914,7 +905,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask);
@@ -922,12 +913,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*is_dp_mst = mst_pipe_mask;
out:
- if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
+ if (*pipe_mask && (display->platform.geminilake || display->platform.broxton)) {
+ tmp = intel_de_read(display, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
encoder->base.base.id, encoder->base.name, tmp);
}
@@ -1041,8 +1032,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
@@ -1050,53 +1040,53 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
else
val = TRANS_CLK_SEL_PORT(encoder->port);
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val;
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_DISABLED;
else
val = TRANS_CLK_SEL_DISABLED;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
-static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+static void _skl_ddi_set_iboost(struct intel_display *display,
enum port port, u8 iboost)
{
u32 tmp;
- tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
+ tmp = intel_de_read(display, DISPIO_CR_TX_BMU_CR0);
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
if (iboost)
tmp |= iboost << BALANCE_LEG_SHIFT(port);
else
tmp |= BALANCE_LEG_DISABLE(port);
- intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
+ intel_de_write(display, DISPIO_CR_TX_BMU_CR0, tmp);
}
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int level)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1109,7 +1099,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
iboost = trans->entries[level].hsw.i_boost;
@@ -1117,28 +1107,28 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
- drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
+ drm_err(display->drm, "Invalid I_boost value %u\n", iboost);
return;
}
- _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
+ _skl_ddi_set_iboost(display, encoder->port, iboost);
if (encoder->port == PORT_A && dig_port->max_lanes == 4)
- _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
+ _skl_ddi_set_iboost(display, PORT_E, iboost);
}
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries;
encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
+ if (drm_WARN_ON(display->drm, n_entries < 1))
n_entries = 1;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
@@ -1171,14 +1161,14 @@ static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_stat
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
u32 val;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
@@ -1186,25 +1176,25 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
intel_dp->hobl_active = is_hobl_buf_trans(trans);
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
+ intel_de_rmw(display, ICL_PORT_CL_DW10(phy), val,
intel_dp->hobl_active ? val : 0);
}
/* Set PORT_TX_DW5 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
COEFF_POLARITY | CURSOR_PROGRAM |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW2_LN(ln, phy),
SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK,
SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) |
SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) |
@@ -1216,7 +1206,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK,
POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) |
POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) |
@@ -1227,7 +1217,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW7_LN(ln, phy),
N_SCALAR_MASK,
N_SCALAR(trans->entries[level].icl.dw7_n_scalar));
}
@@ -1236,7 +1226,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
int ln;
@@ -1246,12 +1236,12 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -1261,33 +1251,33 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln < 4; ln++) {
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
LOADGEN_SELECT,
icl_combo_phy_loadgen_select(crtc_state, ln));
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ intel_de_rmw(display, ICL_PORT_CL_DW5(phy),
0, SUS_CLOCK_CONFIG);
/* 4. Clear training enable to change swing values */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(encoder, crtc_state);
/* 6. Set training enable to trigger update */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1296,13 +1286,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
return;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
- intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
}
@@ -1312,13 +1302,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
}
@@ -1329,7 +1319,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1338,7 +1328,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1354,21 +1344,21 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port),
+ intel_de_rmw(display, MG_CLKHUB(ln, tc_port),
CFG_LOW_RATE_LKREN_EN,
crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
- intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
@@ -1378,9 +1368,9 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
- intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
}
}
@@ -1490,12 +1480,12 @@ int intel_ddi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int lane)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
int level, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return 0;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1504,7 +1494,7 @@ int intel_ddi_level(struct intel_encoder *encoder,
level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
lane);
- if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
+ if (drm_WARN_ON_ONCE(display->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -1514,13 +1504,13 @@ static void
hsw_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
enum port port = encoder->port;
u32 signal_levels;
- if (has_iboost(dev_priv))
+ if (has_iboost(display))
skl_ddi_set_iboost(encoder, crtc_state, level);
/* HDMI ignores the rest */
@@ -1529,46 +1519,46 @@ hsw_set_signal_levels(struct intel_encoder *encoder,
signal_levels = DDI_BUF_TRANS_SELECT(level);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
}
-static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_enable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
+ intel_de_rmw(display, reg, clk_sel_mask, clk_sel);
/*
* "This step and the step before must be
* done with separate register writes."
*/
- intel_de_rmw(i915, reg, clk_off, 0);
+ intel_de_rmw(display, reg, clk_off, 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_disable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, 0, clk_off);
+ intel_de_rmw(display, reg, 0, clk_off);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+static bool _icl_ddi_is_clock_enabled(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- return !(intel_de_read(i915, reg) & clk_off);
+ return !(intel_de_read(display, reg) & clk_off);
}
static struct intel_shared_dpll *
@@ -1585,14 +1575,14 @@ _icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_enable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1600,19 +1590,19 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_disable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+ return _icl_ddi_is_clock_enabled(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1629,14 +1619,14 @@ static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1644,19 +1634,19 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1673,23 +1663,23 @@ static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* If we fail this, something went very wrong: first 2 PLLs should be
* used by first 2 phys and last 2 PLLs by last phys
*/
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
(pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) ||
(pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
return;
- _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_enable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1697,19 +1687,19 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_disable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+ return _icl_ddi_is_clock_enabled(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1739,14 +1729,14 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1754,19 +1744,19 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1783,39 +1773,39 @@ struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port.
* MG does not exist, but the programming is required to ungate DDIC and DDID."
*/
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
icl_ddi_combo_enable_clock(encoder, crtc_state);
}
static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
icl_ddi_combo_disable_clock(encoder);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
@@ -1826,54 +1816,54 @@ static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, DDI_CLK_SEL(port),
+ intel_de_write(display, DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
- tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0);
+ tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
}
@@ -1934,47 +1924,47 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
static void skl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port),
DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void skl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
0, DPLL_CTRL2_DDI_CLK_OFF(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
/*
* FIXME Not sure if the override affects both
* the PLL selection and the CLK_OFF bit.
*/
- return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
+ return !(intel_de_read(display, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
}
static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
@@ -2002,30 +1992,30 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+ intel_de_write(display, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
void hsw_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_de_write(display, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
+ return intel_de_read(display, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
}
static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
@@ -2081,7 +2071,7 @@ void intel_ddi_disable_clock(struct intel_encoder *encoder)
void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 port_mask;
bool ddi_clk_needed;
@@ -2101,7 +2091,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (drm_WARN_ON(&i915->drm, is_mst))
+ if (drm_WARN_ON(display->drm, is_mst))
return;
}
@@ -2116,11 +2106,11 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* Sanity check that we haven't incorrectly registered another
* encoder using any of the ports of this DSI encoder.
*/
- for_each_intel_encoder(&i915->drm, other_encoder) {
+ for_each_intel_encoder(display->drm, other_encoder) {
if (other_encoder == encoder)
continue;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
port_mask & BIT(other_encoder->port)))
return;
}
@@ -2135,7 +2125,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
!encoder->is_clock_enabled(encoder))
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n",
encoder->base.base.id, encoder->base.name);
@@ -2255,10 +2245,10 @@ tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state)
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_CTL(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_CTL(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_CTL(encoder->port);
@@ -2267,10 +2257,10 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_STATUS(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_STATUS(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_STATUS(encoder->port);
@@ -2445,14 +2435,14 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
static void intel_ddi_disable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (!crtc_state->fec_enable)
return;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_FEC_ENABLE, 0);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
}
static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
@@ -2474,11 +2464,11 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
* Splitter enable for eDP MSO is limited to certain pipes, on certain
* platforms.
*/
-static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+static u8 intel_ddi_splitter_pipe_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) > 20)
+ if (DISPLAY_VER(display) > 20)
return ~0;
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
@@ -2487,28 +2477,28 @@ static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
- dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+ dss1 = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE;
if (!pipe_config->splitter.enable)
return;
- if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
+ if (drm_WARN_ON(display->drm, !(intel_ddi_splitter_pipe_mask(display) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
switch (dss1 & SPLITTER_CONFIGURATION_MASK) {
default:
- drm_WARN(&i915->drm, true,
+ drm_WARN(display->drm, true,
"Invalid splitter configuration, dss1=0x%08x\n", dss1);
fallthrough;
case SPLITTER_CONFIGURATION_2_SEGMENT:
@@ -2524,12 +2514,12 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1 = 0;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
if (crtc_state->splitter.enable) {
@@ -2541,7 +2531,7 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT;
}
- intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe),
+ intel_de_rmw(display, ICL_PIPE_DSS_CTL1(pipe),
SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK |
OVERLAP_PIXELS_MASK, dss1);
}
@@ -2549,27 +2539,27 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
static void
mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, 0, set_bits);
- if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) {
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, 0, set_bits);
+ if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) {
+ drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
}
@@ -2599,13 +2589,13 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val;
val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, val);
}
@@ -2734,7 +2724,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int ret;
@@ -2778,7 +2767,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -2882,16 +2871,15 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- if (DISPLAY_VER(dev_priv) < 11)
- drm_WARN_ON(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 11)
+ drm_WARN_ON(display->drm,
is_mst && (port == PORT_A || port == PORT_E));
else
- drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
+ drm_WARN_ON(display->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2908,14 +2896,14 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
encoder->set_signal_levels(encoder, crtc_state);
@@ -2931,7 +2919,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_start_link_train(state, intel_dp, crtc_state);
- if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
+ if ((port != PORT_A || DISPLAY_VER(display) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp, crtc_state);
@@ -2979,12 +2967,11 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_enable_clock(encoder, crtc_state);
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
@@ -3022,10 +3009,9 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
@@ -3050,27 +3036,27 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
static void
mtl_ddi_disable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, clr_bits, 0);
- if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100))
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, clr_bits, 0);
+ if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100))
+ drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3089,10 +3075,9 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (DISPLAY_VER(display) >= 14)
intel_wait_ddi_buf_idle(display, port);
@@ -3100,7 +3085,7 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
mtl_ddi_disable_d2d(encoder);
if (intel_crtc_has_dp_encoder(crtc_state)) {
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE, 0);
}
@@ -3118,7 +3103,6 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
intel_wakeref_t wakeref;
@@ -3135,12 +3119,12 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
*/
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
0);
}
@@ -3160,7 +3144,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* Configure Transcoder Clock select to direct no clock to the
* transcoder"
*/
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
@@ -3176,8 +3160,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_clock(encoder);
/* De-select Thunderbolt */
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port),
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
@@ -3187,7 +3171,6 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
intel_wakeref_t wakeref;
@@ -3195,12 +3178,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_ddi_buf_disable(encoder, old_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
@@ -3220,7 +3203,6 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *pipe_crtc;
bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI);
@@ -3249,6 +3231,8 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
}
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3257,7 +3241,7 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_dsc_disable(old_pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
@@ -3359,12 +3343,12 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 9)
+ if (port == PORT_A && DISPLAY_VER(display) < 9)
intel_dp_stop_link_train(intel_dp, crtc_state);
drm_connector_update_privacy_screen(conn_state);
@@ -3401,7 +3385,6 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
@@ -3410,11 +3393,11 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
/* e. Enable D2D Link for C10/C20 Phy */
@@ -3423,7 +3406,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
encoder->set_signal_levels(encoder, crtc_state);
/* Display WA #1143: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
/*
* For some reason these chicken bits have been
* stuffed into a transcoder register, event though
@@ -3433,7 +3416,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port);
u32 val;
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(display, reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3442,8 +3425,8 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
udelay(1);
@@ -3454,7 +3437,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
intel_ddi_power_up_lanes(encoder, crtc_state);
@@ -3475,7 +3458,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->ddi_a_4_lanes)
buf_ctl |= DDI_A_4_LANES;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
u32 port_buf = 0;
port_buf |= XELPDP_PORT_WIDTH(crtc_state->lane_count);
@@ -3483,15 +3466,15 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ } else if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
+ drm_WARN_ON(display->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -3522,6 +3505,8 @@ static void intel_ddi_enable(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
+
/* Enable/Disable DP2.0 SDP split config before transcoder */
intel_audio_sdp_split_update(crtc_state);
@@ -3567,9 +3552,10 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
intel_psr_disable(intel_dp, old_crtc_state);
+ intel_alpm_disable(intel_dp);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
intel_dp_sink_disable_decompression(state,
@@ -3584,12 +3570,12 @@ static void intel_ddi_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = old_conn_state->connector;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
}
@@ -3653,16 +3639,16 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
- if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
+ if (DISPLAY_VER(display) >= 14 || !intel_encoder_is_tc(encoder))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(crtc_state))
intel_update_active_dpll(state, pipe_crtc, encoder);
}
@@ -3678,7 +3664,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_tc_port = intel_encoder_is_tc(encoder);
@@ -3697,7 +3683,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
* Type-C ports. Skip this step for TBT.
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dpio_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
}
@@ -3765,10 +3751,9 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 dp_tp_ctl;
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE);
@@ -3781,10 +3766,10 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
- if (IS_ALDERLAKE_P(dev_priv) &&
+ if (display->platform.alderlake_p &&
(intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port)))
adlp_tbt_to_dp_alt_switch_wa(encoder);
@@ -3796,11 +3781,11 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 temp;
- temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ temp = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
@@ -3821,17 +3806,17 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
break;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp);
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), temp);
}
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
@@ -3841,28 +3826,26 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 12)
+ if (port == PORT_A && DISPLAY_VER(display) < 12)
return;
- if (intel_de_wait_for_set(dev_priv,
+ if (intel_de_wait_for_set(display,
dp_tp_status_reg(encoder, crtc_state),
DP_TP_STATUS_IDLE_DONE, 2))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DP idle patterns\n");
}
-static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+static bool intel_ddi_is_audio_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- struct intel_display *display = &dev_priv->display;
-
if (cpu_transcoder == TRANSCODER_EDP)
return false;
if (!intel_display_power_is_enabled(display, POWER_DOMAIN_AUDIO_MMIO))
return false;
- return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
+ return intel_de_read(display, HSW_AUD_PIN_ELD_CP_VLD) &
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
@@ -3892,34 +3875,34 @@ static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
}
-static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+static enum transcoder bdw_transcoder_master_readout(struct intel_display *display,
enum transcoder cpu_transcoder)
{
u32 master_select;
- if (DISPLAY_VER(dev_priv) >= 11) {
- u32 ctl2 = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) >= 11) {
+ u32 ctl2 = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder));
if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
return INVALID_TRANSCODER;
master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
} else {
- u32 ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ u32 ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
return INVALID_TRANSCODER;
@@ -3936,15 +3919,14 @@ static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *de
static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum transcoder cpu_transcoder;
crtc_state->master_transcoder =
- bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+ bdw_transcoder_master_readout(display, crtc_state->cpu_transcoder);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ for_each_cpu_transcoder_masked(display, cpu_transcoder, transcoders) {
enum intel_display_power_domain power_domain;
intel_wakeref_t trans_wakeref;
@@ -3955,14 +3937,14 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
if (!trans_wakeref)
continue;
- if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ if (bdw_transcoder_master_readout(display, cpu_transcoder) ==
crtc_state->cpu_transcoder)
crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
intel_display_power_put(display, power_domain, trans_wakeref);
}
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
crtc_state->master_transcoder != INVALID_TRANSCODER &&
crtc_state->sync_mode_slaves_mask);
}
@@ -4085,11 +4067,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 ddi_func_ctl, ddi_mode, flags = 0;
- ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ddi_func_ctl = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if (ddi_func_ctl & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -4131,13 +4112,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If this is true, we know we're being called from mst stream
* encoder's ->get_config().
*/
- if (intel_dp_mst_encoder_active_links(dig_port))
+ if (intel_dp_mst_active_streams(intel_dp))
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
@@ -4152,11 +4133,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
/* XXX: DSI transcoder paranoia */
- if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)))
return;
intel_ddi_read_func_ctl(encoder, pipe_config);
@@ -4164,14 +4145,14 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_mso_get_config(encoder, pipe_config);
pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
+ intel_ddi_is_audio_enabled(display, cpu_transcoder);
if (encoder->type == INTEL_OUTPUT_EDP)
intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
ddi_dotclock_get(pipe_config);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_get_lane_lat_optim_mask(encoder);
@@ -4192,7 +4173,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
intel_psr_get_config(encoder, pipe_config);
@@ -4285,10 +4266,10 @@ static enum icl_port_dpll_id
icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return ICL_PORT_DPLL_DEFAULT;
if (icl_ddi_tc_pll_is_tbt(pll))
@@ -4382,11 +4363,11 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
bool fastset = true;
if (intel_encoder_is_tc(encoder)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
@@ -4421,12 +4402,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4441,13 +4422,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ if (display->platform.haswell && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->pch_pfit.force_thru =
pipe_config->pch_pfit.enabled ||
pipe_config->crc_enabled;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -4498,9 +4479,9 @@ static u8
intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
int tile_group_id)
{
+ struct intel_display *display = to_intel_display(ref_crtc_state);
struct drm_connector *connector;
const struct drm_connector_state *conn_state;
- struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
struct intel_atomic_state *state =
to_intel_atomic_state(ref_crtc_state->uapi.state);
u8 transcoders = 0;
@@ -4510,7 +4491,7 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4542,11 +4523,11 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
@@ -4618,7 +4599,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_connector *connector;
enum port port = dig_port->base.port;
@@ -4627,7 +4608,7 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
return -ENOMEM;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain;
else
dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -4643,15 +4624,14 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
}
if (dig_port->base.type == INTEL_OUTPUT_EDP) {
- struct drm_device *dev = dig_port->base.base.dev;
struct drm_privacy_screen *privacy_screen;
- privacy_screen = drm_privacy_screen_get(dev->dev, NULL);
+ privacy_screen = drm_privacy_screen_get(display->drm->dev, NULL);
if (!IS_ERR(privacy_screen)) {
drm_connector_attach_privacy_screen_provider(&connector->base,
privacy_screen);
} else if (PTR_ERR(privacy_screen) != -ENODEV) {
- drm_warn(dev, "Error getting privacy-screen\n");
+ drm_warn(display->drm, "Error getting privacy-screen\n");
}
}
@@ -4662,7 +4642,6 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
struct i2c_adapter *ddc = connector->base.ddc;
@@ -4675,7 +4654,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
if (connector->base.status != connector_status_connected)
return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
ctx);
if (ret)
return ret;
@@ -4692,7 +4671,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
@@ -4708,7 +4687,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- drm_err(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
connector->base.base.id, connector->base.name, ret);
return 0;
}
@@ -4733,11 +4712,11 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
static void intel_ddi_link_check(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/* TODO: Move checking the HDMI link state here as well. */
- drm_WARN_ON(&i915->drm, !dig_port->dp.attached_connector);
+ drm_WARN_ON(display->drm, !dig_port->dp.attached_connector);
intel_dp_link_check(encoder);
}
@@ -4800,26 +4779,26 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
static bool lpt_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, SDEISR) & bit;
+ return intel_de_read(display, SDEISR) & bit;
}
static bool hsw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, DEISR) & bit;
+ return intel_de_read(display, DEISR) & bit;
}
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+ return intel_de_read(display, GEN8_DE_PORT_ISR) & bit;
}
static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
@@ -4848,7 +4827,7 @@ static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
if (dig_port->base.port != PORT_A)
return false;
@@ -4859,7 +4838,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
* supported configuration
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
return true;
return false;
@@ -4868,15 +4847,15 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
static int
intel_ddi_max_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum port port = dig_port->base.port;
int max_lanes = 4;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
- if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
max_lanes = port == PORT_A ? 4 : 0;
else
/* Both A and E share 2 lanes */
@@ -4889,7 +4868,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
* so we use the proper lane count for our calculations.
*/
if (intel_ddi_a_force_4_lanes(dig_port)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Forcing DDI_A_4_LANES for port A\n");
dig_port->ddi_a_4_lanes = true;
max_lanes = 4;
@@ -4898,8 +4877,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
return max_lanes;
}
-static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin xelpd_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_D_XELPD)
return HPD_PORT_D + port - PORT_D_XELPD;
@@ -4909,8 +4887,7 @@ static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin dg1_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4918,8 +4895,7 @@ static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin tgl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_TC1 + port - PORT_TC1;
@@ -4927,11 +4903,10 @@ static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin rkl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return tgl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return tgl_hpd_pin(display, port);
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4939,8 +4914,7 @@ static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin icl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_C)
return HPD_PORT_TC1 + port - PORT_C;
@@ -4948,31 +4922,30 @@ static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin ehl_hpd_pin(struct intel_display *display, enum port port)
{
if (port == PORT_D)
return HPD_PORT_A;
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
+static enum hpd_pin skl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
+static bool intel_ddi_is_tc(struct intel_display *display, enum port port)
{
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return port >= PORT_TC1;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
return port >= PORT_C;
else
return false;
@@ -5015,21 +4988,21 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
-static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
+static bool port_strap_detected(struct intel_display *display, enum port port)
{
/* straps not used on skl+ */
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return true;
switch (port) {
case PORT_A:
- return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ return intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
case PORT_B:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
case PORT_C:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
case PORT_D:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
case PORT_E:
return true; /* no strap for DDI-E */
default:
@@ -5043,18 +5016,18 @@ static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
return init_dp || intel_encoder_is_tc(encoder);
}
-static bool assert_has_icl_dsi(struct drm_i915_private *i915)
+static bool assert_has_icl_dsi(struct intel_display *display)
{
- return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) &&
- !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11,
+ return !drm_WARN(display->drm, !display->platform.alderlake_p &&
+ !display->platform.tigerlake && DISPLAY_VER(display) != 11,
"Platform does not support DSI\n");
}
-static bool port_in_use(struct drm_i915_private *i915, enum port port)
+static bool port_in_use(struct intel_display *display, enum port port)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
/* FIXME what about second port for dual link DSI? */
if (encoder->port == port)
return true;
@@ -5066,7 +5039,6 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port)
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp;
@@ -5078,8 +5050,8 @@ void intel_ddi_init(struct intel_display *display,
if (port == PORT_NONE)
return;
- if (!port_strap_detected(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!port_strap_detected(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c strap not detected\n", port_name(port));
return;
}
@@ -5087,15 +5059,15 @@ void intel_ddi_init(struct intel_display *display,
if (!assert_port_valid(display, port))
return;
- if (port_in_use(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (port_in_use(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c already claimed\n", port_name(port));
return;
}
if (intel_bios_encoder_supports_dsi(devdata)) {
/* BXT/GLK handled elsewhere, for now at least */
- if (!assert_has_icl_dsi(dev_priv))
+ if (!assert_has_icl_dsi(display))
return;
icl_dsi_init(display, devdata);
@@ -5111,7 +5083,7 @@ void intel_ddi_init(struct intel_display *display,
* outputs.
*/
if (intel_hti_uses_phy(display, phy)) {
- drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
+ drm_dbg_kms(display->drm, "PORT %c / PHY %c reserved by HTI\n",
port_name(port), phy_name(phy));
return;
}
@@ -5128,20 +5100,20 @@ void intel_ddi_init(struct intel_display *display,
*/
init_dp = true;
init_hdmi = false;
- drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ drm_dbg_kms(display->drm, "VBT says port %c has lspcon\n",
port_name(port));
}
if (!init_dp && !init_hdmi) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
return;
}
if (intel_phy_is_snps(display, phy) &&
- dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
- drm_dbg_kms(&dev_priv->drm,
+ display->snps.phy_failed_calibration & BIT(phy)) {
+ drm_dbg_kms(display->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
phy_name(phy));
}
@@ -5155,26 +5127,26 @@ void intel_ddi_init(struct intel_display *display,
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c",
port_name(port - PORT_D_XELPD + PORT_D),
phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %s%c/PHY %s%c",
port >= PORT_TC1 ? "TC" : "",
port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c%s/PHY %s%c",
port_name(port),
@@ -5182,7 +5154,7 @@ void intel_ddi_init(struct intel_display *display,
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
} else {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c", port_name(port), phy_name(phy));
}
@@ -5218,32 +5190,32 @@ void intel_ddi_init(struct intel_display *display,
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
encoder->get_config = mtl_ddi_get_config;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->enable_clock = intel_mpllb_enable;
encoder->disable_clock = intel_mpllb_disable;
encoder->get_config = dg2_ddi_get_config;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (display->platform.alderlake_s) {
encoder->enable_clock = adls_ddi_enable_clock;
encoder->disable_clock = adls_ddi_disable_clock;
encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
encoder->get_config = adls_ddi_get_config;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (display->platform.rocketlake) {
encoder->enable_clock = rkl_ddi_enable_clock;
encoder->disable_clock = rkl_ddi_disable_clock;
encoder->is_clock_enabled = rkl_ddi_is_clock_enabled;
encoder->get_config = rkl_ddi_get_config;
- } else if (IS_DG1(dev_priv)) {
+ } else if (display->platform.dg1) {
encoder->enable_clock = dg1_ddi_enable_clock;
encoder->disable_clock = dg1_ddi_disable_clock;
encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
encoder->get_config = dg1_ddi_get_config;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
@@ -5255,8 +5227,8 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (DISPLAY_VER(display) >= 11) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
@@ -5268,36 +5240,36 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
/* BXT/GLK have fixed PLL->port mapping */
encoder->get_config = bxt_ddi_get_config;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
encoder->enable_clock = skl_ddi_enable_clock;
encoder->disable_clock = skl_ddi_disable_clock;
encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
encoder->get_config = skl_ddi_get_config;
- } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ } else if (display->platform.broadwell || display->platform.haswell) {
encoder->enable_clock = hsw_ddi_enable_clock;
encoder->disable_clock = hsw_ddi_disable_clock;
encoder->is_clock_enabled = hsw_ddi_is_clock_enabled;
encoder->get_config = hsw_ddi_get_config;
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels;
} else {
encoder->set_signal_levels = hsw_set_signal_levels;
@@ -5305,29 +5277,29 @@ void intel_ddi_init(struct intel_display *display,
intel_ddi_buf_trans_init(encoder);
- if (DISPLAY_VER(dev_priv) >= 13)
- encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
- else if (IS_DG1(dev_priv))
- encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
- else if (IS_ROCKETLAKE(dev_priv))
- encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) >= 12)
- encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 11)
- encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
+ if (DISPLAY_VER(display) >= 13)
+ encoder->hpd_pin = xelpd_hpd_pin(display, port);
+ else if (display->platform.dg1)
+ encoder->hpd_pin = dg1_hpd_pin(display, port);
+ else if (display->platform.rocketlake)
+ encoder->hpd_pin = rkl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) >= 12)
+ encoder->hpd_pin = tgl_hpd_pin(display, port);
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ encoder->hpd_pin = ehl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 11)
+ encoder->hpd_pin = icl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
+ encoder->hpd_pin = skl_hpd_pin(display, port);
else
encoder->hpd_pin = intel_hpd_pin_default(port);
- ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ ddi_buf_ctl = intel_de_read(display, DDI_BUF_CTL(port));
dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) ||
ddi_buf_ctl & DDI_BUF_PORT_REVERSAL;
- dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
+ dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
@@ -5346,7 +5318,7 @@ void intel_ddi_init(struct intel_display *display,
if (!is_legacy && init_hdmi) {
is_legacy = !init_dp;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
port_name(port),
str_yes_no(init_dp),
@@ -5363,24 +5335,24 @@ void intel_ddi_init(struct intel_display *display,
goto err;
}
- drm_WARN_ON(&dev_priv->drm, port > PORT_I);
+ drm_WARN_ON(display->drm, port > PORT_I);
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(display, port);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_tc(encoder))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
dig_port->connected = bdw_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_HASWELL(dev_priv)) {
+ } else if (display->platform.haswell) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -5396,7 +5368,7 @@ void intel_ddi_init(struct intel_display *display,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(display);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index b7399e9d11cc..54ce3e4f8fd9 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,7 +6,6 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
-#include "intel_display_conversion.h"
#include "intel_display_core.h"
#include "intel_dmc_wl.h"
#include "intel_dsb.h"
@@ -19,7 +18,7 @@ static inline struct intel_uncore *__to_uncore(struct intel_display *display)
}
static inline u32
-__intel_de_read(struct intel_display *display, i915_reg_t reg)
+intel_de_read(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -31,7 +30,6 @@ __intel_de_read(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__)
static inline u8
intel_de_read8(struct intel_display *display, i915_reg_t reg)
@@ -66,7 +64,7 @@ intel_de_read64_2x32(struct intel_display *display,
}
static inline void
-__intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
+intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
{
intel_dmc_wl_get(display, reg);
@@ -74,10 +72,9 @@ __intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_posting_read(p,...) __intel_de_posting_read(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
{
intel_dmc_wl_get(display, reg);
@@ -85,7 +82,6 @@ __intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__)
static inline u32
__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
@@ -95,8 +91,7 @@ __intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
}
static inline u32
-__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
- u32 set)
+intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
{
u32 val;
@@ -108,7 +103,6 @@ __intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
return val;
}
-#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__)
static inline int
__intel_de_wait_for_register_nowl(struct intel_display *display,
@@ -181,20 +175,18 @@ intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
}
static inline int
-__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
{
return intel_de_wait(display, reg, mask, mask, timeout);
}
-#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
{
return intel_de_wait(display, reg, mask, 0, timeout);
}
-#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__)
/*
* Unlocked mmio-accessors, think carefully before using these.
@@ -205,7 +197,7 @@ __intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
* a more localised lock guarding all access to that bank of registers.
*/
static inline u32
-__intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
+intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -214,15 +206,13 @@ __intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
{
trace_i915_reg_rw(true, reg, val, sizeof(val), true);
intel_uncore_write_fw(__to_uncore(display), reg, val);
}
-#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
static inline u32
intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3b509c70fb58..6f0a0bc71b06 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -55,6 +55,7 @@
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
+#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_audio.h"
@@ -73,6 +74,7 @@
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -663,7 +665,6 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -696,7 +697,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) &&
- intel_set_memory_cxsr(dev_priv, false))
+ intel_set_memory_cxsr(display, false))
intel_plane_initial_vblank_wait(crtc);
/*
@@ -1043,19 +1044,16 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- intel_psr_post_plane_update(state, crtc);
-
- intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
+ intel_frontbuffer_flip(display, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_fbc_post_update(state, crtc);
@@ -1080,6 +1078,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
+
+ intel_alpm_post_plane_update(state, crtc);
+
+ intel_psr_post_plane_update(state, crtc);
}
static void intel_post_plane_update_after_readout(struct intel_atomic_state *state,
@@ -1168,13 +1170,15 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ intel_alpm_pre_plane_update(state, crtc);
+ intel_psr_pre_plane_update(state, crtc);
+
if (intel_crtc_vrr_disabling(state, crtc)) {
intel_vrr_disable(old_crtc_state);
intel_crtc_update_active_timings(old_crtc_state, false);
@@ -1185,8 +1189,6 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_drrs_deactivate(old_crtc_state);
- intel_psr_pre_plane_update(state, crtc);
-
if (hsw_ips_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1222,7 +1224,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1233,7 +1235,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WaCxSRDisabledForSpriteScaling:ivb
*/
if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv))
+ new_crtc_state->disable_cxsr && ilk_disable_cxsr(display))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1257,7 +1259,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*/
if (!intel_initial_watermarks(state, crtc))
if (new_crtc_state->update_wm_pre)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
}
/*
@@ -1282,7 +1284,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
unsigned int update_mask = new_crtc_state->update_planes;
@@ -1304,7 +1306,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
fb_bits |= plane->frontbuffer_bit;
}
- intel_frontbuffer_flip(dev_priv, fb_bits);
+ intel_frontbuffer_flip(display, fb_bits);
}
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
@@ -1511,7 +1513,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (drm_WARN_ON(display->drm, crtc->active))
@@ -1563,7 +1564,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_wait_for_pipe_scanline_moving(crtc);
/*
@@ -1662,13 +1663,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_pll_enable(state, crtc);
- for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
- const struct intel_crtc_state *pipe_crtc_state =
- intel_atomic_get_new_crtc_state(state, pipe_crtc);
-
- if (pipe_crtc_state->shared_dpll)
- intel_enable_shared_dpll(pipe_crtc_state);
- }
+ if (new_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(new_crtc_state);
intel_encoders_pre_enable(state, crtc);
@@ -1779,8 +1775,6 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_set_pch_fifo_underrun_reporting(display, pipe, true);
-
- intel_disable_shared_dpll(old_crtc_state);
}
static void hsw_crtc_disable(struct intel_atomic_state *state,
@@ -1799,12 +1793,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
- for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
- const struct intel_crtc_state *old_pipe_crtc_state =
- intel_atomic_get_old_crtc_state(state, pipe_crtc);
-
- intel_disable_shared_dpll(old_pipe_crtc_state);
- }
+ intel_disable_shared_dpll(old_crtc_state);
intel_encoders_post_pll_disable(state, crtc);
@@ -2083,7 +2072,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (drm_WARN_ON(display->drm, crtc->active))
@@ -2107,7 +2095,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_color_modeset(new_crtc_state);
if (!intel_initial_watermarks(state, crtc))
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -2123,7 +2111,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
@@ -2147,9 +2134,9 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (display->platform.cherryview)
- chv_disable_pll(dev_priv, pipe);
+ chv_disable_pll(display, pipe);
else if (display->platform.valleyview)
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
else
i9xx_disable_pll(old_crtc_state);
}
@@ -2160,7 +2147,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
if (!display->funcs.wm->initial_watermarks)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
/* clock the pipe down to 640x480@60 to potentially save power */
if (display->platform.i830)
@@ -2343,7 +2330,6 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
intel_joiner_compute_pipe_src(crtc_state);
@@ -2362,7 +2348,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(i915)) {
+ intel_is_dual_link_lvds(display)) {
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
crtc->base.base.id, crtc->base.name);
@@ -2420,14 +2406,6 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static bool intel_crtc_needs_wa_14015401596(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return intel_vrr_possible(crtc_state) && crtc_state->has_psr &&
- IS_DISPLAY_VER(display, 13, 14);
-}
-
static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -2436,9 +2414,7 @@ static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
if (!HAS_DSB(display))
return 0;
- /* Wa_14015401596 */
- if (intel_crtc_needs_wa_14015401596(crtc_state))
- vblank_delay = max(vblank_delay, 1);
+ vblank_delay = max(vblank_delay, intel_psr_min_vblank_delay(crtc_state));
return vblank_delay;
}
@@ -2550,15 +2526,13 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
void intel_panel_sanitize_ssc(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
bool bios_lvds_use_ssc = intel_de_read(display,
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
@@ -2639,6 +2613,15 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
PIPE_LINK_N2(display, transcoder));
}
+static bool
+transcoder_has_vrr(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ return HAS_VRR(display) && !transcoder_is_dsi(cpu_transcoder);
+}
+
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -2703,6 +2686,15 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
@@ -2722,6 +2714,19 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
intel_de_write(display, TRANS_VTOTAL(display, pipe),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ if (DISPLAY_VER(display) >= 30) {
+ /*
+ * Address issues for resolutions with high refresh rate that
+ * have small Hblank, specifically where Hblank is smaller than
+ * one MTP. Simulations indicate this will address the
+ * jitter issues that currently causes BS to be immediately
+ * followed by BE which DPRX devices are unable to handle.
+ * https://groups.vesa.org/wg/DP/document/20494
+ */
+ intel_de_write(display, DP_MIN_HBLANK_CTL(cpu_transcoder),
+ crtc_state->min_hblank);
+ }
}
static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
@@ -2764,12 +2769,24 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
/*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
+ /*
* The double buffer latch point for TRANS_VTOTAL
* is the transcoder's undelayed vblank.
*/
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
@@ -2853,6 +2870,10 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vdisplay +
intel_de_read(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
+
+ if (DISPLAY_VER(display) >= 30)
+ pipe_config->min_hblank = intel_de_read(display,
+ DP_MIN_HBLANK_CTL(cpu_transcoder));
}
static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -3835,7 +3856,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_display_power_domain_set *power_domain_set)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder;
enum port port;
u32 tmp;
@@ -3857,7 +3877,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
* registers/MIPI[BXT]. We can break out here early, since we
* need the same DSI PLL to be enabled for both DSI ports.
*/
- if (!bxt_dsi_pll_is_enabled(dev_priv))
+ if (!bxt_dsi_pll_is_enabled(display))
break;
/* XXX: this works for video mode only */
@@ -3920,7 +3940,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- if (HAS_VRR(display) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ if (transcoder_has_vrr(pipe_config))
intel_vrr_get_config(pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -4147,8 +4167,6 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
int linetime_wm;
@@ -4161,7 +4179,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
/* Display WA #1135: BXT:ALL GLK:ALL */
if ((display->platform.geminilake || display->platform.broxton) &&
- skl_watermark_ipc_enabled(dev_priv))
+ skl_watermark_ipc_enabled(display))
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -5206,6 +5224,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
+ PIPE_CONF_CHECK_I(min_hblank);
+
if (HAS_DOUBLE_BUFFERED_M_N(display)) {
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -5387,8 +5407,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.vmin);
PIPE_CONF_CHECK_I(vrr.vmax);
PIPE_CONF_CHECK_I(vrr.flipline);
- PIPE_CONF_CHECK_I(vrr.pipeline_full);
- PIPE_CONF_CHECK_I(vrr.guardband);
PIPE_CONF_CHECK_I(vrr.vsync_start);
PIPE_CONF_CHECK_I(vrr.vsync_end);
PIPE_CONF_CHECK_LLI(cmrr.cmrr_m);
@@ -5396,6 +5414,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(cmrr.enable);
}
+ if (!fastset || intel_vrr_always_use_vrr_tg(display)) {
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+ PIPE_CONF_CHECK_I(vrr.guardband);
+ }
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_LLI
@@ -6007,22 +6030,16 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!plane->async_flip)
continue;
- if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->modifier)) {
+ if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->format->format,
+ new_plane_state->hw.fb->modifier)) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
+ "[PLANE:%d:%s] pixel format %p4cc / modifier 0x%llx does not support async flip\n",
plane->base.base.id, plane->base.name,
+ &new_plane_state->hw.fb->format->format,
new_plane_state->hw.fb->modifier);
return -EINVAL;
}
- if (intel_format_info_is_yuv_semiplanar(new_plane_state->hw.fb->format,
- new_plane_state->hw.fb->modifier)) {
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] Planar formats do not support async flips\n",
- plane->base.base.id, plane->base.name);
- return -EINVAL;
- }
-
/*
* We turn the first async flip request into a sync flip
* so that we can reconfigure the plane (eg. change modifier).
@@ -6429,7 +6446,7 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_bw_atomic_check(state);
+ ret = intel_bw_atomic_check(state, any_ms);
if (ret)
goto fail;
@@ -6533,7 +6550,6 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
{
struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
* Update pipe size and adjust fitter if needed: the reason for this is
@@ -6549,7 +6565,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(display) >= 9) {
if (new_crtc_state->pch_pfit.enabled)
skl_pfit_enable(new_crtc_state);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
if (new_crtc_state->pch_pfit.enabled)
ilk_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
@@ -6650,6 +6666,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(pipe_crtc_state, false);
}
+ intel_psr_notify_pipe_change(state, crtc, true);
+
display->funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
@@ -6769,6 +6787,8 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
intel_crtc_joined_pipe_mask(old_crtc_state))
intel_crtc_disable_pipe_crc(pipe_crtc);
+ intel_psr_notify_pipe_change(state, crtc, false);
+
display->funcs.display->crtc_disable(state, crtc);
for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
@@ -7231,7 +7251,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
@@ -7445,7 +7465,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* toggling overhead at and above 60 FPS.
*/
intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -7517,10 +7537,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
{
struct intel_display *display = to_intel_display(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ state->wakeref = intel_display_rpm_get(display);
/*
* The intel_legacy_cursor_update() fast path takes care
@@ -7554,7 +7573,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_dbg_atomic(display->drm,
"Preparing state failed with %i\n", ret);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7564,7 +7583,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_atomic_helper_unprepare_planes(dev, &state->base);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7626,15 +7645,13 @@ static bool ilk_has_edp_a(struct intel_display *display)
static bool intel_ddi_crt_present(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9)
return false;
if (display->platform.haswell_ult || display->platform.broadwell_ult)
return false;
- if (HAS_PCH_LPT_H(dev_priv) &&
+ if (HAS_PCH_LPT_H(display) &&
intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
@@ -7656,7 +7673,6 @@ bool assert_port_valid(struct intel_display *display, enum port port)
void intel_setup_outputs(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
@@ -7672,8 +7688,8 @@ void intel_setup_outputs(struct intel_display *display)
intel_bios_for_each_encoder(display, intel_ddi_init);
if (display->platform.geminilake || display->platform.broxton)
- vlv_dsi_init(dev_priv);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ vlv_dsi_init(display);
+ } else if (HAS_PCH_SPLIT(display)) {
int found;
/*
@@ -7681,7 +7697,7 @@ void intel_setup_outputs(struct intel_display *display)
* to prevent the registration of both eDP and LVDS and the
* incorrect sharing of the PPS.
*/
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
@@ -7756,15 +7772,15 @@ void intel_setup_outputs(struct intel_display *display)
g4x_hdmi_init(display, CHV_HDMID, PORT_D);
}
- vlv_dsi_init(dev_priv);
+ vlv_dsi_init(display);
} else if (display->platform.pineview) {
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
} else if (IS_DISPLAY_VER(display, 3, 4)) {
bool found = false;
if (display->platform.mobile)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
@@ -7806,10 +7822,10 @@ void intel_setup_outputs(struct intel_display *display)
intel_tv_init(display);
} else if (DISPLAY_VER(display) == 2) {
if (display->platform.i85x)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
- intel_dvo_init(dev_priv);
+ intel_dvo_init(display);
}
for_each_intel_encoder(display->drm, encoder) {
@@ -7819,7 +7835,7 @@ void intel_setup_outputs(struct intel_display *display)
intel_encoder_possible_clones(encoder);
}
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
drm_helper_move_panel_connectors_to_head(display->drm);
}
@@ -8041,13 +8057,11 @@ static const struct intel_display_funcs i9xx_display_funcs = {
*/
void intel_init_display_hooks(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9) {
display->funcs.display = &skl_display_funcs;
} else if (HAS_DDI(display)) {
display->funcs.display = &ddi_display_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
display->funcs.display = &pch_split_display_funcs;
} else if (display->platform.cherryview ||
display->platform.valleyview) {
@@ -8083,6 +8097,9 @@ retry:
goto out;
}
+ if (!crtc_state->hw.active)
+ crtc_state->inherited = false;
+
if (crtc_state->hw.active) {
struct intel_encoder *encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index eeb7ae3eaea8..b4937e102360 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -21,17 +21,15 @@
#include "intel_display_limits.h"
#include "intel_display_params.h"
#include "intel_display_power.h"
+#include "intel_dmc_wl.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
-#include "intel_dmc_wl.h"
+#include "intel_pch.h"
#include "intel_wm_types.h"
-struct task_struct;
-
-struct drm_i915_private;
struct drm_property;
struct drm_property_blob;
struct i915_audio_component;
@@ -52,6 +50,7 @@ struct intel_hotplug_funcs;
struct intel_initial_plane_config;
struct intel_opregion;
struct intel_overlay;
+struct task_struct;
/* Amount of SAGV/QGV points, BSpec precisely defines this */
#define I915_NUM_QGV_POINTS 8
@@ -80,7 +79,7 @@ struct intel_display_funcs {
/* functions used for watermark calcs for display. */
struct intel_wm_funcs {
/* update_wm is for legacy wm management */
- void (*update_wm)(struct drm_i915_private *dev_priv);
+ void (*update_wm)(struct intel_display *display);
int (*compute_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*initial_watermarks)(struct intel_atomic_state *state,
@@ -90,8 +89,8 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
- void (*get_hw_state)(struct drm_i915_private *i915);
- void (*sanitize)(struct drm_i915_private *i915);
+ void (*get_hw_state)(struct intel_display *display);
+ void (*sanitize)(struct intel_display *display);
};
struct intel_audio_state {
@@ -160,6 +159,7 @@ struct intel_hotplug {
struct {
unsigned long last_jiffies;
int count;
+ int blocked_count;
enum {
HPD_ENABLED = 0,
HPD_DISABLED = 1,
@@ -170,8 +170,8 @@ struct intel_hotplug {
u32 retry_bits;
struct delayed_work reenable_work;
- u32 long_port_mask;
- u32 short_port_mask;
+ u32 long_hpd_pin_mask;
+ u32 short_hpd_pin_mask;
struct work_struct dig_port_work;
struct work_struct poll_init_work;
@@ -179,7 +179,7 @@ struct intel_hotplug {
/*
* Queuing of hotplug_work, reenable_work and poll_init_work is
- * enabled. Protected by drm_i915_private::irq_lock.
+ * enabled. Protected by intel_display::irq::lock.
*/
bool detection_work_enabled;
@@ -288,6 +288,9 @@ struct intel_display {
/* Platform (and subplatform, if any) identification */
struct intel_display_platforms platform;
+ /* Intel PCH: where the south display engine lives */
+ enum intel_pch pch_type;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -425,7 +428,7 @@ struct intel_display {
* reused when sending message to gsc cs.
* this is only populated post Meteorlake
*/
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct intel_hdcp_gsc_context *gsc_context;
/* Mutex to protect the above hdcp related values. */
struct mutex hdcp_mutex;
} hdcp;
@@ -453,6 +456,9 @@ struct intel_display {
} ips;
struct {
+ /* protects the irq masks */
+ spinlock_t lock;
+
/*
* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -465,9 +471,9 @@ struct intel_display {
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
- int vblank_wa_num_pipes;
+ int vblank_enable_count;
- struct work_struct vblank_dc_work;
+ struct work_struct vblank_notify_work;
u32 de_irq_mask[I915_MAX_PIPES];
u32 pipestat_irq_mask[I915_MAX_PIPES];
@@ -574,6 +580,8 @@ struct intel_display {
struct intel_vbt_data vbt;
struct intel_dmc_wl wl;
struct intel_wm wm;
+
+ struct work_struct psr_dc5_dc6_wa_work;
};
#endif /* __INTEL_DISPLAY_CORE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index fdedf65bee53..8d0a1779dd19 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -7,11 +7,12 @@
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "i9xx_wm_regs.h"
@@ -24,6 +25,7 @@
#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -54,6 +56,8 @@ static int intel_display_caps(struct seq_file *m, void *data)
struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
+ drm_printf(&p, "PCH type: %d\n", INTEL_PCH_TYPE(display));
+
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
intel_display_params_dump(&display->params, display->drm->driver->name, &p);
@@ -81,7 +85,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
bool sr_enabled = false;
@@ -89,7 +92,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (DISPLAY_VER(display) >= 9)
/* no global SR status; inspect per-plane WM */;
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
sr_enabled = intel_de_read(display, WM1_LP_ILK) & WM_LP_ENABLE;
else if (display->platform.i965gm || display->platform.g4x ||
display->platform.i945g || display->platform.i945gm)
@@ -554,6 +557,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
seq_printf(m, "\tpipe src=" DRM_RECT_FMT ", dither=%s, bpp=%d\n",
DRM_RECT_ARG(&crtc_state->pipe_src),
str_yes_no(crtc_state->dither), crtc_state->pipe_bpp);
+ seq_printf(m, "\tport_clock=%d, lane_count=%d\n",
+ crtc_state->port_clock, crtc_state->lane_count);
intel_scaler_info(m, crtc);
@@ -580,13 +585,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
drm_modeset_lock_all(display->drm);
@@ -605,18 +609,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_modeset_unlock_all(display->drm);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_display_capabilities(struct seq_file *m, void *unused)
-{
- struct intel_display *display = node_to_intel_display(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- intel_display_device_info_print(DISPLAY_INFO(display),
- DISPLAY_RUNTIME_INFO(display), &p);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -690,14 +683,11 @@ static bool
intel_lpsp_power_well_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- intel_wakeref_t wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- is_enabled = intel_display_power_well_is_enabled(display,
- power_well_id);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ is_enabled = intel_display_power_well_is_enabled(display,
+ power_well_id);
return is_enabled;
}
@@ -820,7 +810,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
- {"i915_display_capabilities", i915_display_capabilities, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
@@ -829,7 +818,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
void intel_display_debugfs_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
@@ -844,10 +832,10 @@ void intel_display_debugfs_register(struct intel_display *display)
intel_dmc_debugfs_register(display);
intel_dp_test_debugfs_register(display);
intel_fbc_debugfs_register(display);
- intel_hpd_debugfs_register(i915);
+ intel_hpd_debugfs_register(display);
intel_opregion_debugfs_register(display);
intel_psr_debugfs_register(display);
- intel_wm_debugfs_register(i915);
+ intel_wm_debugfs_register(display);
intel_display_debugfs_params(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 738ae522c8f4..90d714598664 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -3,11 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/intel/pciids.h>
-#include <drm/drm_color_mgmt.h>
#include <linux/pci.h>
-#include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+#include <drm/intel/pciids.h>
+
#include "i915_reg.h"
#include "intel_cx0_phy_regs.h"
#include "intel_de.h"
@@ -1711,7 +1713,6 @@ void intel_display_device_remove(struct intel_display *display)
static void __intel_display_device_info_runtime_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(display);
enum pipe pipe;
@@ -1775,7 +1776,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
goto display_fused_off;
}
- if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(display)) {
u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(display, SFUSE_STRAP);
@@ -1790,7 +1791,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
- (HAS_PCH_CPT(i915) &&
+ (HAS_PCH_CPT(display) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
drm_info(display->drm,
"Display fused off, disabling\n");
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 7a3bb77c7af7..87c666792c0d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -143,9 +143,11 @@ struct intel_display_platforms {
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
+#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13)
#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
@@ -156,9 +158,9 @@ struct intel_display_platforms {
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
-#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
#define HAS_DSC_3ENGINES(__display) (DISPLAY_VERx100(__display) == 1401 && HAS_DSC(__display))
@@ -167,9 +169,10 @@ struct intel_display_platforms {
#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
-#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
+#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
@@ -190,10 +193,7 @@ struct intel_display_platforms {
((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
HAS_DSC(__display))
#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
-#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
-#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
#define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 31740a677dd8..411fe7b918a7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -54,6 +54,7 @@
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_vga.h"
#include "intel_wm.h"
@@ -82,7 +83,6 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
void intel_display_driver_init_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
if (!HAS_DISPLAY(display))
@@ -94,7 +94,7 @@ void intel_display_driver_init_hw(struct intel_display *display)
intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
- intel_display_wa_apply(i915);
+ intel_display_wa_apply(display);
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
@@ -181,7 +181,8 @@ static void intel_plane_possible_crtcs_init(struct intel_display *display)
void intel_display_driver_early_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_pch_detect(display);
if (!HAS_DISPLAY(display))
return;
@@ -193,12 +194,12 @@ void intel_display_driver_early_probe(struct intel_display *display)
mutex_init(&display->pps.mutex);
mutex_init(&display->hdcp.hdcp_mutex);
- intel_display_irq_init(i915);
+ intel_display_irq_init(display);
intel_dkl_phy_init(display);
intel_color_init_hooks(display);
intel_init_cdclk_hooks(display);
intel_audio_hooks_init(display);
- intel_dpll_init_clock_hook(i915);
+ intel_dpll_init_clock_hook(display);
intel_init_display_hooks(display);
intel_fdi_init_hook(display);
intel_dmc_wl_init(display);
@@ -226,6 +227,8 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (ret)
goto cleanup_bios;
+ intel_psr_dc5_dc6_wa_init(display);
+
/* FIXME: completely on the wrong abstraction layer */
ret = intel_power_domains_init(display);
if (ret < 0)
@@ -241,31 +244,45 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
intel_dmc_init(display);
display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ if (!display->wq.modeset) {
+ ret = -ENOMEM;
+ goto cleanup_vga_client_pw_domain_dmc;
+ }
+
display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ if (!display->wq.flip) {
+ ret = -ENOMEM;
+ goto cleanup_wq_modeset;
+ }
+
display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
+ if (!display->wq.cleanup) {
+ ret = -ENOMEM;
+ goto cleanup_wq_flip;
+ }
intel_mode_config_init(display);
ret = intel_cdclk_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
ret = intel_color_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
- ret = intel_dbuf_init(i915);
+ ret = intel_dbuf_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
- ret = intel_bw_init(i915);
+ ret = intel_bw_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
ret = intel_pmdemand_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
intel_init_quirks(display);
@@ -273,6 +290,12 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
return 0;
+cleanup_wq_cleanup:
+ destroy_workqueue(display->wq.cleanup);
+cleanup_wq_flip:
+ destroy_workqueue(display->wq.flip);
+cleanup_wq_modeset:
+ destroy_workqueue(display->wq.modeset);
cleanup_vga_client_pw_domain_dmc:
intel_dmc_fini(display);
intel_power_domains_driver_remove(display);
@@ -315,11 +338,9 @@ static void set_display_access(struct intel_display *display,
*/
void intel_display_driver_enable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
set_display_access(display, true, NULL);
- intel_hpd_enable_detection_work(i915);
+ intel_hpd_enable_detection_work(display);
}
/**
@@ -341,9 +362,7 @@ void intel_display_driver_enable_user_access(struct intel_display *display)
*/
void intel_display_driver_disable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_hpd_disable_detection_work(i915);
+ intel_hpd_disable_detection_work(display);
set_display_access(display, false, current);
}
@@ -422,14 +441,13 @@ bool intel_display_driver_check_access(struct intel_display *display)
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
int ret;
if (!HAS_DISPLAY(display))
return 0;
- intel_wm_init(i915);
+ intel_wm_init(display);
intel_panel_sanitize_ssc(display);
@@ -460,8 +478,6 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_hti_init(display);
- /* Just disable it once at startup */
- intel_vga_disable(display);
intel_setup_outputs(display);
ret = intel_dp_tunnel_mgr_init(display);
@@ -471,7 +487,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_display_driver_disable_user_access(display);
drm_modeset_lock_all(display->drm);
- intel_modeset_setup_hw_state(i915, display->drm->mode_config.acquire_ctx);
+ intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(display);
drm_modeset_unlock_all(display->drm);
@@ -483,7 +499,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(display))
- ilk_wm_sanitize(i915);
+ ilk_wm_sanitize(display);
return 0;
@@ -498,7 +514,6 @@ err_mode_config:
/* part #3: call after gem init */
int intel_display_driver_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!HAS_DISPLAY(display))
@@ -524,16 +539,15 @@ int intel_display_driver_probe(struct intel_display *display)
intel_overlay_setup(display);
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(i915);
+ intel_hpd_init(display);
- skl_watermark_ipc_init(i915);
+ skl_watermark_ipc_init(display);
return 0;
}
void intel_display_driver_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
"i915 display info:");
@@ -558,9 +572,9 @@ void intel_display_driver_register(struct intel_display *display)
* fbdev->async_cookie.
*/
drm_kms_helper_poll_init(display->drm);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
- intel_fbdev_setup(i915);
+ intel_fbdev_setup(display);
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
@@ -600,7 +614,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(i915);
+ intel_hpd_poll_fini(display);
intel_unregister_dsm_handler();
@@ -695,13 +709,11 @@ __intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
- intel_modeset_setup_hw_state(i915, ctx);
- intel_vga_redisable(display);
+ intel_modeset_setup_hw_state(display, ctx);
if (!state)
return 0;
@@ -733,7 +745,6 @@ __intel_display_driver_resume(struct intel_display *display,
void intel_display_driver_resume(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_atomic_state *state = display->restore.modeset_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -761,7 +772,7 @@ void intel_display_driver_resume(struct intel_display *display)
if (!ret)
ret = __intel_display_driver_resume(display, state, &ctx);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index aa23bb817805..3e73832e5e81 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -5,7 +5,6 @@
#include <drm/drm_vblank.h>
-#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -14,6 +13,8 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_rpm.h"
+#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dmc_wl.h"
@@ -115,9 +116,8 @@ static void intel_pipe_fault_irq_handler(struct intel_display *display,
}
static void
-intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_handle_vblank(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
drm_crtc_handle_vblank(&crtc->base);
@@ -125,59 +125,59 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
/**
* ilk_update_display_irq - update DEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
new_val = dev_priv->irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->irq_mask &&
- !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
+ !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
intel_de_write(display, DEIMR, dev_priv->irq_mask);
intel_de_posting_read(display, DEIMR);
}
}
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_enable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, bits);
+ ilk_update_display_irq(display, bits, bits);
}
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_disable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, 0);
+ ilk_update_display_irq(display, bits, 0);
}
/**
* bdw_update_port_irq - update DE port interrupt
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void bdw_update_port_irq(struct drm_i915_private *dev_priv,
+void bdw_update_port_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
u32 old_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
@@ -194,93 +194,92 @@ void bdw_update_port_irq(struct drm_i915_private *dev_priv,
/**
* bdw_update_pipe_irq - update DE pipe interrupt
- * @dev_priv: driver private
+ * @display: display device
* @pipe: pipe whose interrupt to update
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+static void bdw_update_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = dev_priv->display.irq.de_irq_mask[pipe];
+ new_val = display->irq.de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
- dev_priv->display.irq.de_irq_mask[pipe] = new_val;
+ if (new_val != display->irq.de_irq_mask[pipe]) {
+ display->irq.de_irq_mask[pipe] = new_val;
intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
-void bdw_enable_pipe_irq(struct drm_i915_private *i915,
+void bdw_enable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, bits);
+ bdw_update_pipe_irq(display, pipe, bits, bits);
}
-void bdw_disable_pipe_irq(struct drm_i915_private *i915,
+void bdw_disable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, 0);
+ bdw_update_pipe_irq(display, pipe, bits, 0);
}
/**
* ibx_display_interrupt_update - update SDEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 sdeimr = intel_de_read(display, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
intel_de_write(display, SDEIMR, sdeimr);
intel_de_posting_read(display, SDEIMR);
}
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, bits);
+ ibx_display_interrupt_update(display, bits, bits);
}
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, 0);
+ ibx_display_interrupt_update(display, bits, 0);
}
u32 i915_pipestat_enable_mask(struct intel_display *display,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 status_mask = display->irq.pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if (DISPLAY_VER(display) < 5)
goto out;
@@ -318,48 +317,48 @@ out:
return enable_mask;
}
-void i915_enable_pipestat(struct drm_i915_private *dev_priv,
+void i915_enable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
+ display->irq.pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
intel_de_posting_read(display, reg);
}
-void i915_disable_pipestat(struct drm_i915_private *dev_priv,
+void i915_disable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
+ display->irq.pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
@@ -368,49 +367,41 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_I85X(i915))
+ if (display->platform.i85x)
return true;
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
return true;
- return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
+ return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile;
}
-/**
- * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev_priv: i915 device private
- */
-void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
+/* enable ASLE pipestat for OpRegion */
+static void i915_enable_asle_pipestat(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
if (!intel_opregion_asle_present(display))
return;
if (!i915_has_legacy_blc_interrupt(display))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (DISPLAY_VER(dev_priv) >= 4)
- i915_enable_pipestat(dev_priv, PIPE_A,
+ i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
+ if (DISPLAY_VER(display) >= 4)
+ i915_enable_pipestat(display, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
@@ -427,7 +418,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* don't trust that one either.
*/
if (pipe_crc->skipped <= 0 ||
- (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
@@ -440,20 +431,19 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
#else
static inline void
-display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4) {}
#endif
-static void flip_done_handler(struct drm_i915_private *i915,
+static void flip_done_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
- spin_lock(&i915->drm.event_lock);
+ spin_lock(&display->drm->event_lock);
if (crtc->flip_done_event) {
trace_intel_crtc_flip_done(crtc);
@@ -461,25 +451,21 @@ static void flip_done_handler(struct drm_i915_private *i915,
crtc->flip_done_event = NULL;
}
- spin_unlock(&i915->drm.event_lock);
+ spin_unlock(&display->drm->event_lock);
}
-static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void hsw_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
0, 0, 0, 0);
}
-static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void ivb_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
@@ -487,58 +473,55 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
}
-static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 res1, res2;
- if (DISPLAY_VER(dev_priv) >= 3)
- res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 3)
+ res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe));
else
res1 = 0;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe));
else
res2 = 0;
- display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
+ display_pipe_crc_irq_handler(display, pipe,
+ intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)),
res1, res2);
}
-static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+static void i9xx_pipestat_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
intel_de_write(display,
- PIPESTAT(dev_priv, pipe),
+ PIPESTAT(display, pipe),
PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
+ display->irq.pipestat_irq_mask[pipe] = 0;
}
}
-void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+void i9xx_pipestat_irq_ack(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->display.irq.vlv_display_irqs_enabled) {
- spin_unlock(&dev_priv->irq_lock);
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled) {
+ spin_unlock(&display->irq.lock);
return;
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
i915_reg_t reg;
u32 status_mask, enable_mask, iir_bit = 0;
@@ -566,12 +549,12 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
+ status_mask |= display->irq.pipestat_irq_mask[pipe];
if (!status_mask)
continue;
- reg = PIPESTAT(dev_priv, pipe);
+ reg = PIPESTAT(display, pipe);
pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
@@ -589,25 +572,24 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
intel_de_write(display, reg, enable_mask);
}
}
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&display->irq.lock);
}
-void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i915_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -617,22 +599,21 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(display);
}
-void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i965_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -645,21 +626,20 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void valleyview_pipestat_irq_handler(struct intel_display *display,
u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -669,18 +649,17 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void ibx_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+ drm_dbg(display->drm, "PCH audio power change on port %d\n",
port_name(port));
}
@@ -691,26 +670,26 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
- drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
+ drm_dbg(display->drm, "PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
- drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
- drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
@@ -753,14 +732,13 @@ static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = {
{}
};
-static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
+static void ivb_err_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 err_int = intel_de_read(display, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (err_int & ERR_INT_INVALID_GTT_PTE)
drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
@@ -768,17 +746,17 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
if (err_int & ERR_INT_INVALID_PTE_DATA)
drm_err_ratelimited(display->drm, "Invalid PTE data\n");
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
- if (IS_IVYBRIDGE(dev_priv))
- ivb_pipe_crc_irq_handler(dev_priv, pipe);
+ if (display->platform.ivybridge)
+ ivb_pipe_crc_irq_handler(display, pipe);
else
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
}
fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe);
@@ -790,34 +768,32 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
intel_de_write(display, GEN7_ERR_INT, err_int);
}
-static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
+static void cpt_serr_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 serr_int = intel_de_read(display, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(display, pipe);
intel_de_write(display, SERR_INT, serr_int);
}
-static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void cpt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+ drm_dbg(display->drm, "PCH audio power change on port %c\n",
port_name(port));
}
@@ -828,20 +804,20 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
+ drm_dbg(display->drm, "Audio CP request interrupt\n");
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
+ drm_dbg(display->drm, "Audio CP change interrupt\n");
if (pch_iir & SDE_FDI_MASK_CPT) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
- cpt_serr_int_handler(dev_priv);
+ cpt_serr_int_handler(display);
}
static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe)
@@ -894,14 +870,13 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display)
}
}
-void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
intel_dp_aux_irq_handler(display);
@@ -910,58 +885,57 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
intel_opregion_asle_intr(display);
if (de_iir & DE_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (de_iir & DE_GTT_FAULT)
ilk_gtt_fault_irq_handler(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
}
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
u32 pch_iir = intel_de_read(display, SDEIIR);
- if (HAS_PCH_CPT(dev_priv))
- cpt_irq_handler(dev_priv, pch_iir);
+ if (HAS_PCH_CPT(display))
+ cpt_irq_handler(display, pch_iir);
else
- ibx_irq_handler(dev_priv, pch_iir);
+ ibx_irq_handler(display, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
- if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
- gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
+ if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT)
+ ilk_display_rps_irq_handler(display);
}
-void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev_priv);
+ ivb_err_int_handler(display);
if (de_iir & DE_EDP_PSR_INT_HSW) {
struct intel_encoder *encoder;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 psr_iir;
@@ -977,35 +951,35 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
}
/* check event from PCH */
- if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
+ if (!HAS_PCH_NOP(display) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = intel_de_read(display, SDEIIR);
- cpt_irq_handler(dev_priv, pch_iir);
+ cpt_irq_handler(display, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
}
-static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_port_aux_mask(struct intel_display *display)
{
u32 mask;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return 0;
- else if (DISPLAY_VER(dev_priv) >= 14)
+ else if (DISPLAY_VER(display) >= 14)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB;
- else if (DISPLAY_VER(dev_priv) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1015,7 +989,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC2 |
TGL_DE_PORT_AUX_USBC3 |
TGL_DE_PORT_AUX_USBC4;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1027,12 +1001,12 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC6;
mask = GEN8_AUX_CHANNEL_A;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (DISPLAY_VER(dev_priv) == 11) {
+ if (DISPLAY_VER(display) == 11) {
mask |= ICL_AUX_CHANNEL_F;
mask |= ICL_AUX_CHANNEL_E;
}
@@ -1040,10 +1014,8 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
return mask;
}
-static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_pipe_fault_mask(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
@@ -1195,15 +1167,14 @@ gen8_pipe_fault_handlers(struct intel_display *display)
return bdw_pipe_fault_handlers;
}
-static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
+static void intel_pmdemand_irq_handler(struct intel_display *display)
{
- wake_up_all(&dev_priv->display.pmdemand.waitqueue);
+ wake_up_all(&display->pmdemand.waitqueue);
}
static void
-gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+gen8_de_misc_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &dev_priv->display;
bool found = false;
if (HAS_DBUF_OVERLAP_DETECTION(display)) {
@@ -1213,20 +1184,20 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (iir & (XELPDP_PMDEMAND_RSP |
XELPDP_PMDEMAND_RSPTOUT_ERR)) {
if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Error waiting for Punit PM Demand Response\n");
- intel_pmdemand_irq_handler(dev_priv);
+ intel_pmdemand_irq_handler(display);
found = true;
}
if (iir & XELPDP_RM_TIMEOUT) {
u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
- drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
+ drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val);
found = true;
}
} else if (iir & GEN8_DE_MISC_GSE) {
@@ -1239,12 +1210,12 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 psr_iir;
i915_reg_t iir_reg;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- iir_reg = TRANS_PSR_IIR(dev_priv,
- intel_dp->psr.transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ iir_reg = TRANS_PSR_IIR(display,
+ intel_dp->psr.transcoder);
else
iir_reg = EDP_PSR_IIR;
@@ -1256,19 +1227,18 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_psr_irq_handler(intel_dp, psr_iir);
/* prior GEN12 only have one EDP PSR */
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
break;
}
}
if (!found)
- drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
+ drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
}
-static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
+static void gen11_dsi_te_interrupt_handler(struct intel_display *display,
u32 te_trigger)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe = INVALID_PIPE;
enum transcoder dsi_trans;
enum port port;
@@ -1278,7 +1248,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -1294,12 +1264,12 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
- drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
+ drm_err(display->drm, "DSI trancoder not configured in command mode\n");
return;
}
/* Get PIPE for handling VBLANK event */
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -1311,28 +1281,27 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
pipe = PIPE_C;
break;
default:
- drm_err(&dev_priv->drm, "Invalid PIPE\n");
+ drm_err(display->drm, "Invalid PIPE\n");
return;
}
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
}
-static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return GEN9_PIPE_PLANE1_FLIP_DONE;
else
return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
-static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
+static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir)
{
- struct intel_display *display = &i915->display;
u32 pica_ier = 0;
*pica_iir = 0;
@@ -1346,7 +1315,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
* their flags both in the PICA and SDE IIR.
*/
if (*pch_iir & SDE_PICAINTERRUPT) {
- drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
+ drm_WARN_ON(display->drm, INTEL_PCH_TYPE(display) < PCH_MTL);
pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
*pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
@@ -1359,32 +1328,31 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
}
-void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
{
- struct intel_display *display = &dev_priv->display;
u32 iir;
enum pipe pipe;
- drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
+ drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display));
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = intel_de_read(display, GEN8_DE_MISC_IIR);
if (iir) {
intel_de_write(display, GEN8_DE_MISC_IIR, iir);
- gen8_de_misc_irq_handler(dev_priv, iir);
+ gen8_de_misc_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE MISC)!\n");
}
}
- if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
iir = intel_de_read(display, GEN11_DE_HPD_IIR);
if (iir) {
intel_de_write(display, GEN11_DE_HPD_IIR, iir);
- gen11_hpd_irq_handler(dev_priv, iir);
+ gen11_hpd_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied, (DE HPD)!\n");
}
}
@@ -1396,52 +1364,52 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_de_write(display, GEN8_DE_PORT_IIR, iir);
- if (iir & gen8_de_port_aux_mask(dev_priv)) {
+ if (iir & gen8_de_port_aux_mask(display)) {
intel_dp_aux_irq_handler(display);
found = true;
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
+ bxt_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
}
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
(iir & BXT_DE_PORT_GMBUS)) {
intel_gmbus_irq_handler(display);
found = true;
}
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
if (te_trigger) {
- gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
+ gen11_dsi_te_interrupt_handler(display, te_trigger);
found = true;
}
}
if (!found)
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"Unexpected DE Port interrupt\n");
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PORT)!\n");
}
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
@@ -1449,7 +1417,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PIPE)!\n");
continue;
}
@@ -1457,36 +1425,36 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
- if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
- flip_done_handler(dev_priv, pipe);
+ if (iir & gen8_de_pipe_flip_done_mask(display))
+ flip_done_handler(display, pipe);
- if (HAS_DSB(dev_priv)) {
+ if (HAS_DSB(display)) {
if (iir & GEN12_DSB_INT(INTEL_DSB_0))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_0);
if (iir & GEN12_DSB_INT(INTEL_DSB_1))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_1);
if (iir & GEN12_DSB_INT(INTEL_DSB_2))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_2);
}
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
- fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
+ fault_errors = iir & gen8_de_pipe_fault_mask(display);
if (fault_errors)
intel_pipe_fault_irq_handler(display,
gen8_pipe_fault_handlers(display),
pipe, fault_errors);
}
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
+ if (HAS_PCH_SPLIT(display) && !HAS_PCH_NOP(display) &&
master_ctl & GEN8_DE_PCH_IRQ) {
u32 pica_iir;
@@ -1495,31 +1463,30 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
+ gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir);
if (iir) {
if (pica_iir)
- xelpdp_pica_irq_handler(dev_priv, pica_iir);
+ xelpdp_pica_irq_handler(display, pica_iir);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- spt_irq_handler(dev_priv, iir);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_handler(display, iir);
+ else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
+ spt_irq_handler(display, iir);
else
- cpt_irq_handler(dev_priv, iir);
+ cpt_irq_handler(display, iir);
} else {
/*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"The master control interrupt lied (SDE)!\n");
}
}
}
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
{
- struct intel_display *display = &i915->display;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -1532,20 +1499,17 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
return iir;
}
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir)
{
- struct intel_display *display = &i915->display;
-
if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(display);
}
-void gen11_display_irq_handler(struct drm_i915_private *i915)
+void gen11_display_irq_handler(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 disp_ctl;
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_block(display);
/*
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
@@ -1553,16 +1517,15 @@ void gen11_display_irq_handler(struct drm_i915_private *i915)
disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- gen8_de_irq_handler(i915, disp_ctl);
+ gen8_de_irq_handler(display, disp_ctl);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_unblock(display);
}
-static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_enable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
/*
* Vblank/CRC interrupts fail to wake the device up from C2+.
@@ -1570,117 +1533,116 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
* the problem. There is a small power cost so we do this
* only when vblank/CRC interrupts are actually enabled.
*/
- if (i915->display.irq.vblank_enabled++ == 0)
+ if (display->irq.vblank_enabled++ == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
- if (--i915->display.irq.vblank_enabled == 0)
+ if (--display->irq.vblank_enabled == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
{
- spin_lock_irq(&i915->drm.vblank_time_lock);
+ spin_lock_irq(&display->drm->vblank_time_lock);
if (enable)
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
else
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
- spin_unlock_irq(&i915->drm.vblank_time_lock);
+ spin_unlock_irq(&display->drm->vblank_time_lock);
}
int i8xx_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i8xx_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
return i8xx_enable_vblank(crtc);
}
void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
i8xx_disable_vblank(crtc);
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
}
int i965_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i965_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int ilk_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_enable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even though there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(crtc);
return 0;
@@ -1688,15 +1650,15 @@ int ilk_enable_vblank(struct drm_crtc *crtc)
void ilk_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_disable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
@@ -1722,44 +1684,36 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
return true;
}
-static void intel_display_vblank_dc_work(struct work_struct *work)
+static void intel_display_vblank_notify_work(struct work_struct *work)
{
struct intel_display *display =
- container_of(work, typeof(*display), irq.vblank_dc_work);
- int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
+ container_of(work, typeof(*display), irq.vblank_notify_work);
+ int vblank_enable_count = READ_ONCE(display->irq.vblank_enable_count);
- /*
- * NOTE: intel_display_power_set_target_dc_state is used only by PSR
- * code for DC3CO handling. DC3CO target state is currently disabled in
- * PSR code. If DC3CO is taken into use we need take that into account
- * here as well.
- */
- intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
- DC_STATE_EN_UPTO_DC6);
+ intel_psr_notify_vblank_enable_disable(display, vblank_enable_count);
}
int bdw_enable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, true))
return 0;
- if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ if (crtc->vblank_psr_notify && display->irq.vblank_enable_count++ == 0)
+ schedule_work(&display->irq.vblank_notify_work);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even if there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated, so check only for PSR.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(&crtc->base);
return 0;
@@ -1769,19 +1723,18 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, false))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
- if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ if (crtc->vblank_psr_notify && --display->irq.vblank_enable_count == 0)
+ schedule_work(&display->irq.vblank_notify_work);
}
static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe)
@@ -1892,11 +1845,11 @@ void vlv_display_error_irq_handler(struct intel_display *display,
vlv_page_table_error_irq_handler(display, dpinvgtt);
}
-static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
@@ -1904,31 +1857,60 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(to_intel_uncore(display->drm),
VLV_ERROR_REGS);
- i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
+ i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
- i9xx_pipestat_irq_reset(dev_priv);
+ i9xx_pipestat_irq_reset(display);
intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
-void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_reset(struct intel_display *display)
{
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- _vlv_display_irq_reset(dev_priv);
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_reset(display);
+ spin_unlock_irq(&display->irq.lock);
}
-void i9xx_display_irq_reset(struct drm_i915_private *i915)
+void i9xx_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (I915_HAS_HOTPLUG(i915)) {
- i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0);
+ if (HAS_HOTPLUG(display)) {
+ i915_hotplug_interrupt_update(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
}
- i9xx_pipestat_irq_reset(i915);
+ i9xx_pipestat_irq_reset(display);
+}
+
+void i915_display_irq_postinstall(struct intel_display *display)
+{
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
+}
+
+void i965_display_irq_postinstall(struct intel_display *display)
+{
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
}
static u32 vlv_error_mask(void)
@@ -1937,17 +1919,14 @@ static u32 vlv_error_mask(void)
return VLV_ERROR_PAGE_TABLE;
}
-void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT,
DPINVGTT_STATUS_MASK_CHV |
DPINVGTT_EN_MASK_CHV);
@@ -1961,9 +1940,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(display, pipe)
+ i915_enable_pipestat(display, pipe, pipestat_mask);
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -1972,53 +1951,76 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
I915_LPE_PIPE_B_INTERRUPT |
I915_MASTER_ERROR_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
}
-void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_postinstall(display);
+ spin_unlock_irq(&display->irq.lock);
+}
+
+void ibx_display_irq_reset(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (HAS_PCH_NOP(i915))
+ return;
+
+ gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
+
+ if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915))
+ intel_de_write(display, SERR_INT, 0xffffffff);
+}
+
+void gen8_display_irq_reset(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
+
+ if (HAS_PCH_SPLIT(i915))
+ ibx_display_irq_reset(display);
}
-void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
+void gen11_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2026,10 +2028,10 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
continue;
intel_de_write(display,
- TRANS_PSR_IMR(dev_priv, trans),
+ TRANS_PSR_IMR(display, trans),
0xffffffff);
intel_de_write(display,
- TRANS_PSR_IIR(dev_priv, trans),
+ TRANS_PSR_IIR(display, trans),
0xffffffff);
}
} else {
@@ -2037,7 +2039,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
}
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
@@ -2045,55 +2047,55 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
else
intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_post_enable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
+ display->irq.de_irq_mask[pipe],
+ ~display->irq.de_irq_mask[pipe] | extra_ier);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_pre_disable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -2110,17 +2112,16 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
* to avoid races with the irq handler, assuming we have MSI. Shared legacy
* interrupts could still race.
*/
-static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ibx_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 mask;
- if (HAS_PCH_NOP(dev_priv))
+ if (HAS_PCH_NOP(display))
return;
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
- else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
+ else if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
else
mask = SDE_GMBUS_CPT;
@@ -2128,40 +2129,50 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_enable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
+
+ if (display->irq.vlv_display_irqs_enabled)
+ goto out;
- dev_priv->display.irq.vlv_display_irqs_enabled = true;
+ display->irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
- _vlv_display_irq_reset(dev_priv);
- vlv_display_irq_postinstall(dev_priv);
+ _vlv_display_irq_reset(display);
+ _vlv_display_irq_postinstall(display);
}
+
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_disable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
+
+ if (!display->irq.vlv_display_irqs_enabled)
+ goto out;
- dev_priv->display.irq.vlv_display_irqs_enabled = false;
+ display->irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- _vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(display);
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void ilk_de_irq_postinstall(struct drm_i915_private *i915)
+void ilk_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
u32 display_mask, extra_mask;
- if (DISPLAY_VER(i915) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
@@ -2182,59 +2193,57 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
DE_DP_A_HOTPLUG);
}
- if (IS_HASWELL(i915)) {
+ if (display->platform.haswell) {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
- if (IS_IRONLAKE_M(i915))
+ if (display->platform.ironlake && display->platform.mobile)
extra_mask |= DE_PCU_EVENT;
i915->irq_mask = ~display_mask;
- ibx_irq_postinstall(i915);
+ ibx_irq_postinstall(display);
intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
display_mask | extra_mask);
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915);
-static void icp_irq_postinstall(struct drm_i915_private *i915);
+static void mtp_irq_postinstall(struct intel_display *display);
+static void icp_irq_postinstall(struct intel_display *display);
-void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen8_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
+ u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) |
GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
- u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
+ u32 de_port_masked = gen8_de_port_aux_mask(display);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VER(dev_priv) >= 14)
- mtp_irq_postinstall(dev_priv);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
- else if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev_priv);
+ if (DISPLAY_VER(display) >= 14)
+ mtp_irq_postinstall(display);
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_postinstall(display);
+ else if (HAS_PCH_SPLIT(display))
+ ibx_irq_postinstall(display);
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
de_misc_masked |= GEN8_DE_MISC_GSE;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_masked |= BXT_DE_PORT_GMBUS;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum port port;
if (intel_bios_is_dsi_present(display, &port))
@@ -2244,25 +2253,25 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_DBUF_OVERLAP_DETECTION(display))
de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
- if (HAS_DSB(dev_priv))
+ if (HAS_DSB(display))
de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
GEN12_DSB_INT(INTEL_DSB_1) |
GEN12_DSB_INT(INTEL_DSB_2);
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
de_port_enables = de_port_masked;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
- else if (IS_BROADWELL(dev_priv))
+ else if (display->platform.broadwell)
de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2270,19 +2279,19 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
continue;
intel_display_irq_regs_assert_irr_is_zero(display,
- TRANS_PSR_IIR(dev_priv, trans));
+ TRANS_PSR_IIR(display, trans));
}
} else {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
}
- for_each_pipe(dev_priv, pipe) {
- dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
+ for_each_pipe(display, pipe) {
+ display->irq.de_irq_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
+ display->irq.de_irq_mask[pipe],
de_pipe_enables);
}
@@ -2291,7 +2300,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
de_misc_masked);
- if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
+ if (IS_DISPLAY_VER(display, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
@@ -2301,9 +2310,8 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915)
+static void mtp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
@@ -2315,43 +2323,68 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
-static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+static void icp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 mask = SDE_GMBUS_ICP;
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen11_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void dg1_de_irq_postinstall(struct drm_i915_private *i915)
+void dg1_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(i915);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void intel_display_irq_init(struct drm_i915_private *i915)
+void intel_display_irq_init(struct intel_display *display)
{
- i915->drm.vblank_disable_immediate = true;
+ spin_lock_init(&display->irq.lock);
- intel_hotplug_irq_init(i915);
+ display->drm->vblank_disable_immediate = true;
+
+ intel_hotplug_irq_init(display);
+
+ INIT_WORK(&display->irq.vblank_notify_work,
+ intel_display_vblank_notify_work);
+}
+
+struct intel_display_irq_snapshot {
+ u32 derrmr;
+};
+
+struct intel_display_irq_snapshot *
+intel_display_irq_snapshot_capture(struct intel_display *display)
+{
+ struct intel_display_irq_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
+ if (!snapshot)
+ return NULL;
+
+ if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display))
+ snapshot->derrmr = intel_de_read(display, DERRMR);
+
+ return snapshot;
+}
+
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ if (!snapshot)
+ return;
- INIT_WORK(&i915->display.irq.vblank_dc_work,
- intel_display_vblank_dc_work);
+ drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index d9867cd0a220..c66db3851da4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -12,28 +12,29 @@
enum pipe;
struct drm_crtc;
-struct drm_i915_private;
+struct drm_printer;
struct intel_display;
+struct intel_display_irq_snapshot;
-void valleyview_enable_display_irqs(struct drm_i915_private *i915);
-void valleyview_disable_display_irqs(struct drm_i915_private *i915);
+void valleyview_enable_display_irqs(struct intel_display *display);
+void valleyview_disable_display_irqs(struct intel_display *display);
-void ilk_update_display_irq(struct drm_i915_private *i915,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits);
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits);
+void ilk_enable_display_irq(struct intel_display *display, u32 bits);
+void ilk_disable_display_irq(struct intel_display *display, u32 bits);
-void bdw_update_port_irq(struct drm_i915_private *i915, u32 interrupt_mask, u32 enabled_irq_mask);
-void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
-void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask);
+void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
+void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
-void ibx_display_interrupt_update(struct drm_i915_private *i915,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits);
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits);
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits);
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask);
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask);
+void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask);
+void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask);
int i8xx_enable_vblank(struct drm_crtc *crtc);
int i915gm_enable_vblank(struct drm_crtc *crtc);
@@ -46,41 +47,46 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void ivb_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void ilk_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl);
-void gen11_display_irq_handler(struct drm_i915_private *i915);
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir);
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir);
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl);
+void gen11_display_irq_handler(struct intel_display *display);
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl);
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir);
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl);
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir);
-void i9xx_display_irq_reset(struct drm_i915_private *i915);
-void vlv_display_irq_reset(struct drm_i915_private *i915);
-void gen8_display_irq_reset(struct drm_i915_private *i915);
-void gen11_display_irq_reset(struct drm_i915_private *i915);
+void i9xx_display_irq_reset(struct intel_display *display);
+void ibx_display_irq_reset(struct intel_display *display);
+void vlv_display_irq_reset(struct intel_display *display);
+void gen8_display_irq_reset(struct intel_display *display);
+void gen11_display_irq_reset(struct intel_display *display);
-void vlv_display_irq_postinstall(struct drm_i915_private *i915);
-void ilk_de_irq_postinstall(struct drm_i915_private *i915);
-void gen8_de_irq_postinstall(struct drm_i915_private *i915);
-void gen11_de_irq_postinstall(struct drm_i915_private *i915);
-void dg1_de_irq_postinstall(struct drm_i915_private *i915);
+void i915_display_irq_postinstall(struct intel_display *display);
+void i965_display_irq_postinstall(struct intel_display *display);
+void vlv_display_irq_postinstall(struct intel_display *display);
+void ilk_de_irq_postinstall(struct intel_display *display);
+void gen8_de_irq_postinstall(struct intel_display *display);
+void gen11_de_irq_postinstall(struct intel_display *display);
+void dg1_de_irq_postinstall(struct intel_display *display);
u32 i915_pipestat_enable_mask(struct intel_display *display, enum pipe pipe);
-void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_enable_asle_pipestat(struct drm_i915_private *i915);
+void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
+void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
-void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
+void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]);
void vlv_display_error_irq_ack(struct intel_display *display, u32 *eir, u32 *dpinvgtt);
void vlv_display_error_irq_handler(struct intel_display *display, u32 eir, u32 dpinvgtt);
-void intel_display_irq_init(struct drm_i915_private *i915);
+void intel_display_irq_init(struct intel_display *display);
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable);
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable);
+
+struct intel_display_irq_snapshot *intel_display_irq_snapshot_capture(struct intel_display *display);
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot, struct drm_printer *p);
#endif /* __INTEL_DISPLAY_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index f7171e6932dc..16356523816f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -16,6 +16,7 @@
#include "intel_display_power.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
@@ -204,7 +205,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
struct i915_power_well *power_well;
bool is_enabled;
- if (pm_runtime_suspended(display->drm->dev))
+ if (intel_display_rpm_suspended(display))
return false;
is_enabled = true;
@@ -322,6 +323,35 @@ unlock:
mutex_unlock(&power_domains->lock);
}
+/**
+ * intel_display_power_get_current_dc_state - Set target dc state.
+ * @display: display device
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+u32 intel_display_power_get_current_dc_state(struct intel_display *display)
+{
+ struct i915_power_well *power_well;
+ struct i915_power_domains *power_domains = &display->power.domains;
+ u32 current_dc_state = DC_STATE_DISABLE;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
+
+ if (drm_WARN_ON(display->drm, !power_well))
+ goto unlock;
+
+ current_dc_state = intel_power_well_is_enabled(display, power_well) ?
+ DC_STATE_DISABLE : power_domains->target_dc_state;
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+
+ return current_dc_state;
+}
+
static void __async_put_domains_mask(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
@@ -455,7 +485,6 @@ static bool
intel_display_power_grab_async_put_ref(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -473,8 +502,8 @@ intel_display_power_grab_async_put_ref(struct intel_display *display,
goto out_verify;
cancel_async_put_work(power_domains, false);
- intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
- fetch_and_zero(&power_domains->async_put_wakeref));
+ intel_display_rpm_put_raw(display,
+ fetch_and_zero(&power_domains->async_put_wakeref));
out_verify:
verify_async_put_domains_state(power_domains);
@@ -512,9 +541,10 @@ __intel_display_power_get_domain(struct intel_display *display,
intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ struct ref_tracker *wakeref;
+
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(display, domain);
@@ -539,12 +569,11 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get_if_in_use(display);
if (!wakeref)
return NULL;
@@ -560,7 +589,7 @@ intel_display_power_get_if_enabled(struct intel_display *display,
mutex_unlock(&power_domains->lock);
if (!is_enabled) {
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
wakeref = NULL;
}
@@ -623,12 +652,10 @@ release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_display *display = container_of(power_domains,
struct intel_display,
power.domains);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get_noresume(rpm);
+ wakeref = intel_display_rpm_get_noresume(display);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
@@ -636,7 +663,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
__intel_display_power_put_domain(display, domain);
}
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static void
@@ -644,11 +671,10 @@ intel_display_power_put_async_work(struct work_struct *work)
{
struct intel_display *display = container_of(work, struct intel_display,
power.domains.async_put_work.work);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
- intel_wakeref_t old_work_wakeref = NULL;
+ struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL;
+
+ new_work_wakeref = intel_display_rpm_get_raw(display);
mutex_lock(&power_domains->lock);
@@ -688,9 +714,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (old_work_wakeref)
- intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+ intel_display_rpm_put_raw(display, old_work_wakeref);
if (new_work_wakeref)
- intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+ intel_display_rpm_put_raw(display, new_work_wakeref);
}
/**
@@ -711,10 +737,10 @@ void __intel_display_power_put_async(struct intel_display *display,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+ struct ref_tracker *work_wakeref;
+
+ work_wakeref = intel_display_rpm_get_raw(display);
delay_ms = delay_ms >= 0 ? delay_ms : 100;
@@ -746,9 +772,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(rpm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -765,7 +791,6 @@ out_verify:
*/
void intel_display_power_flush_work(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -786,7 +811,7 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
}
/**
@@ -824,10 +849,8 @@ void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
#else
/**
@@ -846,10 +869,8 @@ void intel_display_power_put(struct intel_display *display,
void intel_display_power_put_unchecked(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_display_rpm_put_unchecked(display);
}
#endif
@@ -1373,26 +1394,24 @@ static void hsw_restore_lcpll(struct intel_display *display)
*/
static void hsw_enable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
drm_dbg_kms(display->drm, "Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev_priv))
+ if (HAS_PCH_LPT_LP(display))
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
hsw_disable_lcpll(display, true, true);
}
static void hsw_disable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
drm_dbg_kms(display->drm, "Disabling package C8+\n");
hsw_restore_lcpll(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/* Many display registers don't survive PC8+ */
#ifdef I915 /* FIXME */
@@ -1423,14 +1442,13 @@ static void intel_pch_reset_handshake(struct intel_display *display,
static void skl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
@@ -1632,20 +1650,19 @@ static void tgl_bw_buddy_init(struct intel_display *display)
static void icl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
- INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP &&
+ INTEL_PCH_TYPE(display) < PCH_DG1)
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
@@ -1916,7 +1933,6 @@ static void intel_power_domains_verify_state(struct intel_display *display);
*/
void intel_power_domains_init_hw(struct intel_display *display, bool resume)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
power_domains->initializing = true;
@@ -1940,9 +1956,9 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
assert_isp_power_gated(display);
} else if (display->platform.broadwell || display->platform.haswell) {
hsw_assert_cdclk(display);
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
} else if (display->platform.ivybridge) {
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
}
/*
@@ -1979,7 +1995,6 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
*/
void intel_power_domains_driver_remove(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
@@ -1993,7 +2008,7 @@ void intel_power_domains_driver_remove(struct intel_display *display)
intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -2238,8 +2253,6 @@ static void intel_power_domains_verify_state(struct intel_display *display)
void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
intel_power_domains_suspend(display, s2idle);
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
@@ -2250,14 +2263,12 @@ void intel_display_power_suspend_late(struct intel_display *display, bool s2idle
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
display->platform.broxton) {
gen9_sanitize_dc_state(display);
@@ -2267,8 +2278,8 @@ void intel_display_power_resume_early(struct intel_display *display)
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
intel_power_domains_resume(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 1b53d67f9b60..f8813b0e16df 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -183,6 +183,7 @@ void intel_display_power_suspend(struct intel_display *display);
void intel_display_power_resume(struct intel_display *display);
void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state);
+u32 intel_display_power_get_current_dc_state(struct intel_display *display);
bool intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index e80e1fd611ca..ab1163744bc5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1696,6 +1696,7 @@ I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off,
XE3LPD_PW_C_POWER_DOMAINS,
XE3LPD_PW_D_POWER_DOMAINS,
POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
POWER_DOMAIN_INIT);
static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 8ec87ffd87d2..b104bce0e14d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -24,6 +25,7 @@
#include "intel_hotplug.h"
#include "intel_pcode.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "skl_watermark.h"
@@ -186,22 +188,18 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
static void hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (has_vga)
intel_vga_reset_io_mem(display);
if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_post_enable(display, irq_pipe_mask);
}
static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_pre_disable(display, irq_pipe_mask);
}
#define ICL_AUX_PW_TO_PHY(pw_idx) \
@@ -752,8 +750,9 @@ void gen9_sanitize_dc_state(struct intel_display *display)
void gen9_set_dc_state(struct intel_display *display, u32 state)
{
struct i915_power_domains *power_domains = &display->power.domains;
- u32 val;
+ bool dc6_was_enabled, enable_dc6;
u32 mask;
+ u32 val;
if (!HAS_DISPLAY(display))
return;
@@ -762,6 +761,9 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
state & ~power_domains->allowed_dc_mask))
state &= power_domains->allowed_dc_mask;
+ if (!power_domains->initializing)
+ intel_psr_notify_dc5_dc6(display);
+
val = intel_de_read(display, DC_STATE_EN);
mask = gen9_dc_mask(display);
drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
@@ -772,11 +774,19 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
power_domains->dc_state, val & mask);
+ enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
+ dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6;
+ if (!dc6_was_enabled && enable_dc6)
+ intel_dmc_update_dc6_allowed_count(display, true);
+
val &= ~mask;
val |= state;
gen9_write_dc_state(display, val);
+ if (!enable_dc6 && dc6_was_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
power_domains->dc_state = val & mask;
}
@@ -816,7 +826,8 @@ static void assert_can_enable_dc5(struct intel_display *display)
(intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ assert_display_rpm_held(display);
assert_dmc_loaded(display);
}
@@ -1201,7 +1212,6 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
static void vlv_display_power_well_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1225,9 +1235,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
vlv_init_display_clock_gating(display);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(display);
/*
* During driver initialization/resume we can avoid restoring the
@@ -1236,8 +1244,8 @@ static void vlv_display_power_well_init(struct intel_display *display)
if (display->power.domains.initializing)
return;
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
/* Re-enable the ADPA, if we have one */
for_each_intel_encoder(display->drm, encoder) {
@@ -1245,7 +1253,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
intel_crt_reset(&encoder->base);
}
- intel_vga_redisable_power_on(display);
+ intel_vga_disable(display);
intel_pps_unlock_regs_wa(display);
}
@@ -1254,9 +1262,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(display);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -1265,7 +1271,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
/* Prevent us from re-enabling polling on accident in late suspend */
if (!display->drm->dev->power.is_suspended)
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
}
static void vlv_display_power_well_enable(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 1f2798404f2c..1dbd3e841df3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -107,14 +107,14 @@ void intel_display_reset_finish(struct intel_display *display, bool test_only)
intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
intel_cx0_pll_power_save_wa(display);
- intel_hpd_init(i915);
+ intel_hpd_init(display);
ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
}
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
new file mode 100644
index 000000000000..48da67dd0136
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "i915_drv.h"
+#include "intel_display_rpm.h"
+#include "intel_runtime_pm.h"
+
+static struct intel_runtime_pm *display_to_rpm(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ return &i915->runtime_pm;
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_runtime_pm_get_raw(display_to_rpm(display));
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put_raw(display_to_rpm(display), wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return intel_runtime_pm_get(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return intel_runtime_pm_get_if_in_use(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ return intel_runtime_pm_get_noresume(display_to_rpm(display));
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put(display_to_rpm(display), wakeref);
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ intel_runtime_pm_put_unchecked(display_to_rpm(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ return intel_runtime_pm_suspended(display_to_rpm(display));
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ assert_rpm_wakelock_held(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ disable_rpm_wakeref_asserts(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ enable_rpm_wakeref_asserts(display_to_rpm(display));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.h b/drivers/gpu/drm/i915/display/intel_display_rpm.h
new file mode 100644
index 000000000000..6ef48515f84b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_RPM__
+#define __INTEL_DISPLAY_RPM__
+
+#include <linux/types.h>
+
+struct intel_display;
+struct ref_tracker;
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display);
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref);
+
+#define __with_intel_display_rpm(__display, __wakeref) \
+ for (struct ref_tracker *(__wakeref) = intel_display_rpm_get(__display); (__wakeref); \
+ intel_display_rpm_put((__display), (__wakeref)), (__wakeref) = NULL)
+
+#define with_intel_display_rpm(__display) \
+ __with_intel_display_rpm((__display), __UNIQUE_ID(wakeref))
+
+/* Only for special cases. */
+bool intel_display_rpm_suspended(struct intel_display *display);
+
+void assert_display_rpm_held(struct intel_display *display);
+void intel_display_rpm_assert_block(struct intel_display *display);
+void intel_display_rpm_assert_unblock(struct intel_display *display);
+
+/* Only for display power implementation. */
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display);
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref);
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display);
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display);
+void intel_display_rpm_put_unchecked(struct intel_display *display);
+
+#endif /* __INTEL_DISPLAY_RPM__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index 4074a1879828..678b24115951 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -8,6 +8,8 @@
#include "gt/intel_rps.h"
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
@@ -81,3 +83,24 @@ void intel_display_rps_mark_interactive(struct intel_display *display,
intel_rps_mark_interactive(&to_gt(i915)->rps, interactive);
state->rps_interactive = interactive;
}
+
+void ilk_display_rps_enable(struct intel_display *display)
+{
+ spin_lock(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PCU_EVENT);
+ spin_unlock(&display->irq.lock);
+}
+
+void ilk_display_rps_disable(struct intel_display *display)
+{
+ spin_lock(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PCU_EVENT);
+ spin_unlock(&display->irq.lock);
+}
+
+void ilk_display_rps_irq_handler(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ gen5_rps_irq_handler(&to_gt(i915)->rps);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
index 556891edb2dd..183d154f2c7c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.h
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -13,10 +13,34 @@ struct drm_crtc;
struct intel_atomic_state;
struct intel_display;
+#ifdef I915
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence);
void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive);
+void ilk_display_rps_enable(struct intel_display *display);
+void ilk_display_rps_disable(struct intel_display *display);
+void ilk_display_rps_irq_handler(struct intel_display *display);
+#else
+static inline void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+}
+static inline void intel_display_rps_mark_interactive(struct intel_display *display,
+ struct intel_atomic_state *state,
+ bool interactive)
+{
+}
+static inline void ilk_display_rps_enable(struct intel_display *display)
+{
+}
+static inline void ilk_display_rps_disable(struct intel_display *display)
+{
+}
+static inline void ilk_display_rps_irq_handler(struct intel_display *display)
+{
+}
+#endif
#endif /* __INTEL_DISPLAY_RPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
index 25ba043cbb65..66087302fdbc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_snapshot.c
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
@@ -7,6 +7,7 @@
#include "intel_display_core.h"
#include "intel_display_device.h"
+#include "intel_display_irq.h"
#include "intel_display_params.h"
#include "intel_display_snapshot.h"
#include "intel_dmc.h"
@@ -20,6 +21,7 @@ struct intel_display_snapshot {
struct intel_display_params params;
struct intel_overlay_snapshot *overlay;
struct intel_dmc_snapshot *dmc;
+ struct intel_display_irq_snapshot *irq;
};
struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display)
@@ -38,6 +40,7 @@ struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_displ
intel_display_params_copy(&snapshot->params);
+ snapshot->irq = intel_display_irq_snapshot_capture(display);
snapshot->overlay = intel_overlay_snapshot_capture(display);
snapshot->dmc = intel_dmc_snapshot_capture(display);
@@ -57,6 +60,7 @@ void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot,
intel_display_device_info_print(&snapshot->info, &snapshot->runtime_info, p);
intel_display_params_dump(&snapshot->params, display->drm->driver->name, p);
+ intel_display_irq_snapshot_print(snapshot->irq, p);
intel_overlay_snapshot_print(snapshot->overlay, p);
intel_dmc_snapshot_print(snapshot->dmc, p);
}
@@ -68,6 +72,7 @@ void intel_display_snapshot_free(struct intel_display_snapshot *snapshot)
intel_display_params_free(&snapshot->params);
+ kfree(snapshot->irq);
kfree(snapshot->overlay);
kfree(snapshot->dmc);
kfree(snapshot);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 99a6fd2900b9..d6d0440dcee9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -581,7 +581,7 @@ struct dpll {
struct intel_atomic_state {
struct drm_atomic_state base;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct __intel_global_objs_state *global_objs;
int num_global_objs;
@@ -1114,6 +1114,7 @@ struct intel_crtc_state {
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
+ u8 active_non_psr_pipes;
/*
* Frequency the dpll for the port should run at. Differs from the
@@ -1387,7 +1388,7 @@ struct intel_crtc {
/* armed event for DSB based updates */
struct drm_pending_vblank_event *dsb_event;
- /* Access to these should be protected by dev_priv->irq_lock. */
+ /* Access to these should be protected by display->irq.lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
@@ -1439,7 +1440,7 @@ struct intel_crtc {
struct intel_pipe_crc pipe_crc;
#endif
- bool block_dc_for_vblank;
+ bool vblank_psr_notify;
};
struct intel_plane_error {
@@ -1620,7 +1621,7 @@ struct intel_psr {
bool sink_support;
bool source_support;
bool enabled;
- bool paused;
+ int pause_counter;
enum pipe pipe;
enum transcoder transcoder;
bool active;
@@ -1650,6 +1651,8 @@ struct intel_psr {
u8 entry_setup_frames;
bool link_ok;
+
+ u8 active_non_psr_pipes;
};
struct intel_dp {
@@ -1658,7 +1661,6 @@ struct intel_dp {
int link_rate;
u8 lane_count;
u8 sink_count;
- bool link_trained;
bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -1683,6 +1685,7 @@ struct intel_dp {
int common_rates[DP_MAX_SUPPORTED_RATES];
struct {
/* TODO: move the rest of link specific fields to here */
+ bool active;
/* common rate,lane_count configs in bw order */
int num_configs;
#define INTEL_DP_MAX_LANE_COUNT 4
@@ -1739,7 +1742,7 @@ struct intel_dp {
struct {
struct intel_dp_mst_encoder *stream_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mgr;
- int active_links;
+ int active_streams;
} mst;
u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
@@ -1805,12 +1808,16 @@ struct intel_dp {
struct {
u8 io_wake_lines;
u8 fast_wake_lines;
+ enum transcoder transcoder;
+ struct mutex lock;
/* LNL and beyond */
u8 check_entry_lines;
u8 aux_less_wake_lines;
u8 silence_period_sym_clocks;
u8 lfps_half_cycle_num_of_syms;
+ bool lobf_disable_debug;
+ bool sink_alpm_error;
} alpm_parameters;
u8 alpm_dpcd;
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index e5a8022db664..da429c332914 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -3,38 +3,38 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_core.h"
#include "intel_display_wa.h"
-static void gen11_display_wa_apply(struct drm_i915_private *i915)
+static void gen11_display_wa_apply(struct intel_display *display)
{
/* Wa_14010594013 */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
}
-static void xe_d_display_wa_apply(struct drm_i915_private *i915)
+static void xe_d_display_wa_apply(struct intel_display *display)
{
/* Wa_14013723622 */
- intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
+ intel_de_rmw(display, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
}
-static void adlp_display_wa_apply(struct drm_i915_private *i915)
+static void adlp_display_wa_apply(struct intel_display *display)
{
/* Wa_22011091694:adlp */
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
/* Bspec/49189 Initialize Sequence */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
-void intel_display_wa_apply(struct drm_i915_private *i915)
+void intel_display_wa_apply(struct intel_display *display)
{
- if (IS_ALDERLAKE_P(i915))
- adlp_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 12)
- xe_d_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 11)
- gen11_display_wa_apply(i915);
+ if (display->platform.alderlake_p)
+ adlp_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 12)
+ xe_d_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 11)
+ gen11_display_wa_apply(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index be644ab6ae00..babd9d16603d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -8,14 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
-void intel_display_wa_apply(struct drm_i915_private *i915);
+void intel_display_wa_apply(struct intel_display *display);
#ifdef I915
-static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; }
+static inline bool intel_display_needs_wa_16023588340(struct intel_display *display)
+{
+ return false;
+}
#else
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915);
+bool intel_display_needs_wa_16023588340(struct intel_display *display);
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index 0813fb9b5823..dad7192132ad 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index fa6944e55d95..b58189d24e7e 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -28,6 +28,8 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
+#include "intel_display_power_well.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_step.h"
@@ -57,6 +59,10 @@ struct intel_dmc {
const char *fw_path;
u32 max_fw_size; /* bytes */
u32 version;
+ struct {
+ u32 dc5_start;
+ u32 count;
+ } dc6_allowed;
struct dmc_fw_info {
u32 mmio_count;
i915_reg_t mmioaddr[20];
@@ -167,7 +173,6 @@ MODULE_FIRMWARE(BXT_DMC_PATH);
static const char *dmc_firmware_default(struct intel_display *display, u32 *size)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const char *fw_path = NULL;
u32 max_fw_size = 0;
@@ -183,39 +188,39 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
} else if (DISPLAY_VERx100(display) == 1400) {
fw_path = MTL_DMC_PATH;
max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
- } else if (IS_DG2(i915)) {
+ } else if (display->platform.dg2) {
fw_path = DG2_DMC_PATH;
max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
fw_path = ADLP_DMC_PATH;
max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(i915)) {
+ } else if (display->platform.alderlake_s) {
fw_path = ADLS_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(i915)) {
+ } else if (display->platform.dg1) {
fw_path = DG1_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(i915)) {
+ } else if (display->platform.rocketlake) {
fw_path = RKL_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(i915)) {
+ } else if (display->platform.tigerlake) {
fw_path = TGL_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VER(display) == 11) {
fw_path = ICL_DMC_PATH;
max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(i915)) {
+ } else if (display->platform.geminilake) {
fw_path = GLK_DMC_PATH;
max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915)) {
+ } else if (display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake) {
fw_path = KBL_DMC_PATH;
max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(i915)) {
+ } else if (display->platform.skylake) {
fw_path = SKL_DMC_PATH;
max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(i915)) {
+ } else if (display->platform.broxton) {
fw_path = BXT_DMC_PATH;
max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
@@ -511,6 +516,54 @@ void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe)
intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
}
+/**
+ * intel_dmc_block_pkgc() - block PKG C-state
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @block: block/unblock
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. to set/clear
+ * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register.
+ */
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block)
+{
+ intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe),
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ?
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0);
+}
+
+/**
+ * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG
+ * C-state exit
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @enable: enable/disable
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. start the package C
+ * exit at the start of the undelayed vblank
+ */
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable)
+{
+ u32 val;
+
+ if (enable)
+ val = DMC_EVT_CTL_ENABLE | DMC_EVT_CTL_RECURRING |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_VBLANK_A);
+ else
+ val = REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE) |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1);
+
+ intel_de_write(display, MTL_PIPEDMC_EVT_CTL_4(pipe),
+ val);
+}
+
static bool is_dmc_evt_ctl_reg(struct intel_display *display,
enum intel_dmc_id dmc_id, i915_reg_t reg)
{
@@ -535,8 +588,6 @@ static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!is_dmc_evt_ctl_reg(display, dmc_id, reg))
return false;
@@ -545,12 +596,12 @@ static bool disable_dmc_evt(struct intel_display *display,
return true;
/* also disable the flip queue event on the main DMC on TGL */
- if (IS_TIGERLAKE(i915) &&
+ if (display->platform.tigerlake &&
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
return true;
/* also disable the HRR event on the main DMC on TGL/ADLS */
- if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) &&
+ if ((display->platform.tigerlake || display->platform.alderlake_s) &&
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
return true;
@@ -582,7 +633,6 @@ static u32 dmc_mmiodata(struct intel_display *display,
*/
void intel_dmc_load_program(struct intel_display *display)
{
- struct drm_i915_private *i915 __maybe_unused = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_dmc *dmc = display_to_dmc(display);
enum intel_dmc_id dmc_id;
@@ -595,7 +645,7 @@ void intel_dmc_load_program(struct intel_display *display)
disable_all_event_handlers(display);
- assert_rpm_wakelock_held(&i915->runtime_pm);
+ assert_display_rpm_held(display);
preempt_disable();
@@ -1006,9 +1056,7 @@ static void intel_dmc_runtime_pm_put(struct intel_display *display)
static const char *dmc_fallback_path(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_ALDERLAKE_P(i915))
+ if (display->platform.alderlake_p)
return ADLP_DMC_FALLBACK_PATH;
return NULL;
@@ -1232,18 +1280,56 @@ void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct
DMC_VERSION_MINOR(snapshot->version));
}
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display,
+ bool start_tracking)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ u32 dc5_cur_count;
+
+ if (DISPLAY_VER(dmc->display) < 14)
+ return;
+
+ dc5_cur_count = intel_de_read(dmc->display, DG1_DMC_DEBUG_DC5_COUNT);
+
+ if (!start_tracking)
+ dmc->dc6_allowed.count += dc5_cur_count - dmc->dc6_allowed.dc5_start;
+
+ dmc->dc6_allowed.dc5_start = dc5_cur_count;
+}
+
+static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *count)
+{
+ struct i915_power_domains *power_domains = &display->power.domains;
+ struct intel_dmc *dmc = display_to_dmc(display);
+ bool dc6_enabled;
+
+ if (DISPLAY_VER(display) < 14)
+ return false;
+
+ mutex_lock(&power_domains->lock);
+ dc6_enabled = intel_de_read(display, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6;
+ if (dc6_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
+ *count = dmc->dc6_allowed.count;
+ mutex_unlock(&power_domains->lock);
+
+ return true;
+}
+
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_display *display = m->private;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc *dmc = display_to_dmc(display);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
+ u32 dc6_allowed_count;
if (!HAS_DMC(display))
return -ENODEV;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
@@ -1254,7 +1340,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "Pipe A fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
- str_yes_no(IS_ALDERLAKE_P(i915) ||
+ str_yes_no(display->platform.alderlake_p ||
DISPLAY_VER(display) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEB)));
@@ -1268,7 +1354,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
if (DISPLAY_VER(display) >= 12) {
i915_reg_t dc3co_reg;
- if (IS_DGFX(i915) || DISPLAY_VER(display) >= 14) {
+ if (display->platform.dgfx || DISPLAY_VER(display) >= 14) {
dc3co_reg = DG1_DMC_DEBUG3;
dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
} else {
@@ -1280,14 +1366,18 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "DC3CO count: %d\n",
intel_de_read(display, dc3co_reg));
} else {
- dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
+ dc5_reg = display->platform.broxton ? BXT_DMC_DC3_DC5_COUNT :
SKL_DMC_DC3_DC5_COUNT;
- if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
+ if (!display->platform.geminilake && !display->platform.broxton)
dc6_reg = SKL_DMC_DC5_DC6_COUNT;
}
seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg));
- if (i915_mmio_reg_valid(dc6_reg))
+
+ if (intel_dmc_get_dc6_allowed_count(display, &dc6_allowed_count))
+ seq_printf(m, "DC5 -> DC6 allowed count: %d\n",
+ dc6_allowed_count);
+ else if (i915_mmio_reg_valid(dc6_reg))
seq_printf(m, "DC5 -> DC6 count: %d\n",
intel_de_read(display, dc6_reg));
@@ -1299,7 +1389,7 @@ out:
intel_de_read(display, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL));
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 44cecef98e73..bd1c459b0075 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -18,6 +18,10 @@ void intel_dmc_load_program(struct intel_display *display);
void intel_dmc_disable_program(struct intel_display *display);
void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe);
void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe);
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block);
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable);
void intel_dmc_fini(struct intel_display *display);
void intel_dmc_suspend(struct intel_display *display);
void intel_dmc_resume(struct intel_display *display);
@@ -26,6 +30,7 @@ void intel_dmc_debugfs_register(struct intel_display *display);
struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display);
void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p);
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking);
void assert_dmc_loaded(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 1bf446f96a10..e16ea3f16ed8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -21,6 +21,20 @@
#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
+#define _MTL_PIPEDMC_EVT_CTL_4_A 0x5f044
+#define _MTL_PIPEDMC_EVT_CTL_4_B 0x5f444
+#define MTL_PIPEDMC_EVT_CTL_4(pipe) _MMIO_PIPE(pipe, \
+ _MTL_PIPEDMC_EVT_CTL_4_A, \
+ _MTL_PIPEDMC_EVT_CTL_4_B)
+
+#define PIPEDMC_BLOCK_PKGC_SW_A 0x5f1d0
+#define PIPEDMC_BLOCK_PKGC_SW_B 0x5F5d0
+#define PIPEDMC_BLOCK_PKGC_SW(pipe) _MMIO_PIPE(pipe, \
+ PIPEDMC_BLOCK_PKGC_SW_A, \
+ PIPEDMC_BLOCK_PKGC_SW_B)
+#define PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS BIT(31)
+#define PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_UNTIL_NEXT_FRAMESTART BIT(15)
+
#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 392c3653d0d7..640c43bf62d4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -45,12 +45,13 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -58,10 +59,12 @@
#include "intel_combo_phy_regs.h"
#include "intel_connector.h"
#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -87,12 +90,10 @@
#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
-#include "intel_runtime_pm.h"
#include "intel_quirks.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
-#include "intel_crtc_state_dump.h"
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
@@ -2523,6 +2524,7 @@ intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -2576,7 +2578,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
intel_dp_test_compute_config(intel_dp, crtc_state, limits);
return intel_dp_compute_config_link_bpp_limits(intel_dp,
- intel_dp->attached_connector,
+ connector,
crtc_state,
dsc,
limits);
@@ -2637,7 +2639,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_compute_config_limits(intel_dp, pipe_config,
+ !intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
respect_downstream_limits,
false,
&limits);
@@ -2671,7 +2673,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_compute_config_limits(intel_dp, pipe_config,
+ if (!intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
respect_downstream_limits,
true,
&limits))
@@ -3104,6 +3106,76 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
}
}
+int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
+ /*
+ * min symbol cycles is 3(BS,VBID, BE) for 128b/132b and
+ * 5(BS, VBID, MVID, MAUD, BE) for 8b/10b
+ */
+ int min_sym_cycles = intel_dp_is_uhbr(crtc_state) ? 3 : 5;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state);
+ int min_hblank;
+ int max_lane_count = 4;
+ int hactive_sym_cycles, htotal_sym_cycles;
+ int dsc_slices = 0;
+ int link_bpp_x16;
+
+ if (DISPLAY_VER(display) < 30)
+ return 0;
+
+ /* MIN_HBLANK should be set only for 8b/10b MST or for 128b/132b SST/MST */
+ if (!is_mst && !intel_dp_is_uhbr(crtc_state))
+ return 0;
+
+ if (crtc_state->dsc.compression_enable) {
+ dsc_slices = intel_dp_dsc_get_slice_count(connector,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ num_joined_pipes);
+ if (!dsc_slices) {
+ drm_dbg(display->drm, "failed to calculate dsc slice count\n");
+ return -EINVAL;
+ }
+ }
+
+ if (crtc_state->dsc.compression_enable)
+ link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16;
+ else
+ link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
+ crtc_state->pipe_bpp));
+
+ /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
+ hactive_sym_cycles = drm_dp_link_symbol_cycles(max_lane_count,
+ adjusted_mode->hdisplay,
+ dsc_slices,
+ link_bpp_x16,
+ symbol_size, is_mst);
+ htotal_sym_cycles = adjusted_mode->htotal * hactive_sym_cycles /
+ adjusted_mode->hdisplay;
+
+ min_hblank = htotal_sym_cycles - hactive_sym_cycles;
+ /* minimum Hblank calculation: https://groups.vesa.org/wg/DP/document/20494 */
+ min_hblank = max(min_hblank, min_sym_cycles);
+
+ /*
+ * adjust the BlankingStart/BlankingEnd framing control from
+ * the calculated value
+ */
+ min_hblank = min_hblank - 2;
+
+ min_hblank = min(10, min_hblank);
+ crtc_state->min_hblank = min_hblank;
+
+ return 0;
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -3203,6 +3275,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m_n);
}
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
@@ -3223,7 +3299,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
intel_dp->needs_modeset_retry = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
@@ -3587,7 +3663,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
if (crtc_state) {
intel_dp_reset_link_params(intel_dp);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
- intel_dp->link_trained = true;
+ intel_dp->link.active = true;
}
}
@@ -4456,6 +4532,23 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp)
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /*
+ * Display WA for HSD #13013007775: mtl/arl/lnl
+ * Read the sink count and link service IRQ registers in separate
+ * transactions to prevent disconnecting the sink on a TBT link
+ * inadvertently.
+ */
+ if (IS_DISPLAY_VER(display, 14, 20) && !display->platform.battlemage) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 3) != 3)
+ return false;
+
+ /* DP_SINK_COUNT_ESI + 3 == DP_LINK_SERVICE_IRQ_VECTOR_ESI0 */
+ return drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &esi[3]) == 1;
+ }
+
return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
}
@@ -5005,8 +5098,6 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(display->drm, intel_dp->mst.active_links < 0);
-
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
@@ -5021,7 +5112,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
- if (intel_dp->mst.active_links > 0 && link_ok &&
+ if (intel_dp_mst_active_streams(intel_dp) > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
if (!intel_dp_mst_link_status(intel_dp))
link_ok = false;
@@ -5082,7 +5173,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
u8 link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->link_trained)
+ if (!intel_dp->link.active)
return false;
/*
@@ -5394,6 +5485,11 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
intel_psr_short_pulse(intel_dp);
+ if (intel_alpm_get_error(intel_dp)) {
+ intel_alpm_disable(intel_dp);
+ intel_dp->alpm_parameters.sink_alpm_error = true;
+ }
+
if (intel_dp_test_short_pulse(intel_dp))
reprobe_needed = true;
@@ -5829,20 +5925,21 @@ out_vdd_off:
}
static void
-intel_dp_force(struct drm_connector *connector)
+intel_dp_force(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_driver_check_access(display))
return;
intel_dp_unset_edid(intel_dp);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return;
intel_dp_set_edid(intel_dp);
@@ -5881,24 +5978,25 @@ static int intel_dp_get_modes(struct drm_connector *_connector)
}
static int
-intel_dp_connector_register(struct drm_connector *connector)
+intel_dp_connector_register(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
int ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret)
return ret;
drm_dbg_kms(display->drm, "registering %s bus for %s\n",
- intel_dp->aux.name, connector->kdev->kobj.name);
+ intel_dp->aux.name, connector->base.kdev->kobj.name);
- intel_dp->aux.dev = connector->kdev;
+ intel_dp->aux.dev = connector->base.kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
- drm_dp_cec_register_connector(&intel_dp->aux, connector);
+ drm_dp_cec_register_connector(&intel_dp->aux, &connector->base);
if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return ret;
@@ -5909,20 +6007,21 @@ intel_dp_connector_register(struct drm_connector *connector)
*/
if (intel_lspcon_init(dig_port)) {
if (intel_lspcon_detect_hdr_capability(dig_port))
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
}
return ret;
}
static void
-intel_dp_connector_unregister(struct drm_connector *connector)
+intel_dp_connector_unregister(struct drm_connector *_connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
drm_dp_cec_unregister_connector(&intel_dp->aux);
drm_dp_aux_unregister(&intel_dp->aux);
- intel_connector_unregister(connector);
+ intel_connector_unregister(&connector->base);
}
void intel_dp_connector_sync_state(struct intel_connector *connector,
@@ -5983,21 +6082,21 @@ static int intel_modeset_tile_group(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct drm_connector_list_iter conn_iter;
- struct drm_connector *connector;
+ struct intel_connector *connector;
int ret = 0;
drm_connector_list_iter_begin(display->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
+ for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- if (!connector->has_tile ||
- connector->tile_group->id != tile_group_id)
+ if (!connector->base.has_tile ||
+ connector->base.tile_group->id != tile_group_id)
continue;
conn_state = drm_atomic_get_connector_state(&state->base,
- connector);
+ &connector->base);
if (IS_ERR(conn_state)) {
ret = PTR_ERR(conn_state);
break;
@@ -6061,10 +6160,11 @@ static int intel_modeset_affected_transcoders(struct intel_atomic_state *state,
}
static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
- struct drm_connector *connector)
+ struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
const struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(&state->base, connector);
+ drm_atomic_get_old_connector_state(&state->base, &connector->base);
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
u8 transcoders;
@@ -6086,17 +6186,18 @@ static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
transcoders);
}
-static int intel_dp_connector_atomic_check(struct drm_connector *conn,
+static int intel_dp_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *_state)
{
- struct intel_display *display = to_intel_display(conn->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
- struct intel_connector *intel_conn = to_intel_connector(conn);
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(_state, &connector->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
int ret;
- ret = intel_digital_connector_atomic_check(conn, &state->base);
+ ret = intel_digital_connector_atomic_check(&connector->base, &state->base);
if (ret)
return ret;
@@ -6106,12 +6207,12 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return ret;
}
- if (!intel_connector_needs_modeset(state, conn))
+ if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
ret = intel_dp_tunnel_atomic_check_state(state,
intel_dp,
- intel_conn);
+ connector);
if (ret)
return ret;
@@ -6122,26 +6223,26 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (DISPLAY_VER(display) < 9)
return 0;
- if (conn->has_tile) {
- ret = intel_modeset_tile_group(state, conn->tile_group->id);
+ if (connector->base.has_tile) {
+ ret = intel_modeset_tile_group(state, connector->base.tile_group->id);
if (ret)
return ret;
}
- return intel_modeset_synced_crtcs(state, conn);
+ return intel_modeset_synced_crtcs(state, &connector->base);
}
-static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
+static void intel_dp_oob_hotplug_event(struct drm_connector *_connector,
enum drm_connector_status hpd_state)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
bool hpd_high = hpd_state == connector_status_connected;
unsigned int hpd_pin = encoder->hpd_pin;
bool need_work = false;
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (hpd_high != test_bit(hpd_pin, &display->hotplug.oob_hotplug_last_state)) {
display->hotplug.event_bits |= BIT(hpd_pin);
@@ -6150,10 +6251,10 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
hpd_high);
need_work = true;
}
- spin_unlock_irq(&i915->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
if (need_work)
- intel_hpd_schedule_detection(i915);
+ intel_hpd_schedule_detection(display);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -6180,13 +6281,12 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd ||
- intel_runtime_pm_suspended(&i915->runtime_pm) ||
+ intel_display_rpm_suspended(display) ||
!intel_pps_have_panel_power_or_vdd(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
@@ -6283,36 +6383,37 @@ intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
}
static void
-intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->base.port;
if (!intel_dp_is_edp(intel_dp))
- drm_connector_attach_dp_subconnector_property(connector);
+ drm_connector_attach_dp_subconnector_property(&connector->base);
if (!display->platform.g4x && port != PORT_A)
- intel_attach_force_audio_property(connector);
+ intel_attach_force_audio_property(&connector->base);
- intel_attach_broadcast_rgb_property(connector);
+ intel_attach_broadcast_rgb_property(&connector->base);
if (HAS_GMCH(display))
- drm_connector_attach_max_bpc_property(connector, 6, 10);
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 10);
else if (DISPLAY_VER(display) >= 5)
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 12);
/* Register HDMI colorspace for case of lspcon */
if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
- drm_connector_attach_content_type_property(connector);
- intel_attach_hdmi_colorspace_property(connector);
+ drm_connector_attach_content_type_property(&connector->base);
+ intel_attach_hdmi_colorspace_property(&connector->base);
} else {
- intel_attach_dp_colorspace_property(connector);
+ intel_attach_dp_colorspace_property(&connector->base);
}
if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
if (HAS_VRR(display))
- drm_connector_attach_vrr_capable_property(connector);
+ drm_connector_attach_vrr_capable_property(&connector->base);
}
static void
@@ -6347,7 +6448,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
@@ -6362,9 +6462,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(dev_priv)) {
+ if (intel_get_lvds_encoder(display)) {
drm_WARN_ON(display->drm,
- !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ !(HAS_PCH_IBX(display) || HAS_PCH_CPT(display)));
drm_info(display->drm,
"LVDS was detected, not registering eDP\n");
@@ -6395,7 +6495,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
*/
intel_hpd_enable_detection(encoder);
- intel_alpm_init_dpcd(intel_dp);
+ intel_alpm_init(intel_dp);
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 9189db4c2594..742ae26ac4a9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -194,6 +194,7 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -208,5 +209,7 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state);
int intel_dp_dsc_max_src_input_bpc(struct intel_display *display);
int intel_dp_dsc_min_src_input_bpc(void);
+int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index ec27bbd70bcf..bf8e8e0cc19c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -3,8 +3,10 @@
* Copyright © 2020-2021 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -111,10 +113,9 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(display)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -177,12 +178,11 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 timeout;
/* Max timeout value on G4x-BDW: 1.6ms */
- if (IS_BROADWELL(i915))
+ if (display->platform.broadwell)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -247,7 +247,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
intel_wakeref_t aux_wakeref;
- intel_wakeref_t pps_wakeref;
+ intel_wakeref_t pps_wakeref = NULL;
int i, ret, recv_bytes;
int try, clock = 0;
u32 status;
@@ -272,7 +272,20 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
aux_domain = intel_aux_power_domain(dig_port);
aux_wakeref = intel_display_power_get(display, aux_domain);
- pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * The PPS state needs to be locked for:
+ * - eDP on all platforms, since AUX transfers on eDP need VDD power
+ * (either forced or via panel power) which depends on the PPS
+ * state.
+ * - non-eDP on platforms where the PPS is a pipe instance (VLV/CHV),
+ * since changing the PPS state (via a parallel modeset for
+ * instance) may interfere with the AUX transfers on a non-eDP
+ * output as well.
+ */
+ if (intel_dp_is_edp(intel_dp) ||
+ display->platform.valleyview || display->platform.cherryview)
+ pps_wakeref = intel_pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -430,7 +443,9 @@ out:
if (vdd)
intel_pps_vdd_off_unlocked(intel_dp, false);
- intel_pps_unlock(intel_dp, pps_wakeref);
+ if (pps_wakeref)
+ intel_pps_unlock(intel_dp, pps_wakeref);
+
intel_display_power_put_async(display, aux_domain, aux_wakeref);
out_unlock:
intel_digital_port_unlock(encoder);
@@ -771,7 +786,6 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -786,10 +800,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
} else if (DISPLAY_VER(display) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (HAS_PCH_SPLIT(display)) {
intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ } else if (display->platform.valleyview || display->platform.cherryview) {
intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
} else {
@@ -799,9 +813,9 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
+ else if (display->platform.broadwell || display->platform.haswell)
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(i915))
+ else if (HAS_PCH_SPLIT(display))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 8173de8aec63..271b27c9de51 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -36,7 +36,6 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
@@ -149,7 +148,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(display->drm,
- "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d.\n",
connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
return false;
@@ -663,7 +662,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
- if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE)) {
+ if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) &&
+ (intel_dp->edp_dpcd[3] & DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE)) {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX Luminance Based Backlight Control Supported!\n",
connector->base.base.id, connector->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2966f5b39392..a479b63112ea 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -56,6 +56,8 @@
lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \
} while (0)
+#define MAX_SEQ_TRAIN_FAILURES 2
+
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
@@ -164,7 +166,7 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
* resetting its internal state when the mode is changed from
* non-transparent to transparent.
*/
- if (intel_dp->link_trained) {
+ if (intel_dp->link.active) {
if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
goto out_reset_lttpr_count;
@@ -711,8 +713,21 @@ void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, b
static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ /*
+ * Currently, we set the MSA ignore bit based on vrr.in_range.
+ * We can't really read that out during driver load since we don't have
+ * the connector information read in yet. So if we do end up doing a
+ * modeset during initial_commit() we'll clear the MSA ignore bit.
+ * GOP likely wouldn't have set this bit so after the initial commit,
+ * if there are no modesets and we enable VRR mode seamlessly
+ * (without a full modeset), the MSA ignore bit might never get set.
+ *
+ * #TODO: Implement readout of vrr.in_range.
+ * We need fastset support for setting the MSA ignore bit in DPCD,
+ * especially on the first real commit when clearing the inherited flag.
+ */
intel_dp_link_training_set_mode(intel_dp,
- crtc_state->port_clock, crtc_state->vrr.flipline);
+ crtc_state->port_clock, crtc_state->vrr.in_range);
}
void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
@@ -1110,7 +1125,10 @@ intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- intel_dp->link_trained = true;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp->link.active = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
@@ -1120,6 +1138,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
}
+
+ intel_hpd_unblock(encoder);
+
+ if (!display->hotplug.ignore_long_hpd &&
+ intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) {
+ int delay_ms = intel_dp->link.seq_train_failures ? 0 : 2000;
+
+ intel_encoder_link_check_queue_work(encoder, delay_ms);
+ }
}
static bool
@@ -1602,7 +1629,11 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
* non-transparent mode. During an earlier LTTPR detection this
* could've been prevented by an active link.
*/
- int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+ int lttpr_count;
+
+ intel_hpd_block(encoder);
+
+ lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
if (lttpr_count < 0)
/* Still continue with enabling the port and link training. */
@@ -1620,7 +1651,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n");
} else if (passed) {
intel_dp->link.seq_train_failures = 0;
- intel_encoder_link_check_queue_work(encoder, 2000);
return;
}
@@ -1643,10 +1673,8 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
return;
}
- if (intel_dp->link.seq_train_failures < 2) {
- intel_encoder_link_check_queue_work(encoder, 0);
+ if (intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES)
return;
- }
if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state))
return;
@@ -1693,7 +1721,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_rate = intel_dp->link_rate;
force_rate = intel_dp->link.force_rate;
@@ -1791,7 +1819,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_lane_count = intel_dp->lane_count;
force_lane_count = intel_dp->link.force_lane_count;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 6dc2d31ccb5a..06f4ad8de591 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -27,10 +27,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -51,7 +52,9 @@
#include "intel_link_bw.h"
#include "intel_pfit.h"
#include "intel_psr.h"
+#include "intel_step.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
/*
@@ -104,6 +107,34 @@ static struct intel_dp *to_primary_dp(struct intel_encoder *encoder)
return &dig_port->dp;
}
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp)
+{
+ return intel_dp->mst.active_streams;
+}
+
+static bool intel_dp_mst_dec_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams - 1);
+
+ if (drm_WARN_ON(display->drm, intel_dp->mst.active_streams == 0))
+ return true;
+
+ return --intel_dp->mst.active_streams == 0;
+}
+
+static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams + 1);
+
+ return intel_dp->mst.active_streams++ == 0;
+}
+
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
@@ -210,26 +241,6 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
-static void intel_dp_mst_compute_min_hblank(struct intel_crtc_state *crtc_state,
- int bpp_x16)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
- int hblank;
-
- if (DISPLAY_VER(display) < 20)
- return;
-
- /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
- hblank = DIV_ROUND_UP((DIV_ROUND_UP
- (adjusted_mode->htotal - adjusted_mode->hdisplay, 4) * bpp_x16),
- symbol_size);
-
- crtc_state->min_hblank = hblank;
-}
-
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
@@ -300,8 +311,6 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
false, dsc_slice_count, link_bpp_x16);
- intel_dp_mst_compute_min_hblank(crtc_state, link_bpp_x16);
-
intel_dp_mst_compute_m_n(crtc_state,
local_bw_overhead,
link_bpp_x16,
@@ -590,12 +599,13 @@ adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
static bool
mst_stream_compute_config_limits(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- if (!intel_dp_compute_config_limits(intel_dp, crtc_state, false, dsc,
+ if (!intel_dp_compute_config_limits(intel_dp, connector,
+ crtc_state, false, dsc,
limits))
return false;
@@ -710,6 +720,12 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -990,25 +1006,17 @@ static void mst_stream_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- enum transcoder trans = old_crtc_state->cpu_transcoder;
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
- if (intel_dp->mst.active_links == 1)
- intel_dp->link_trained = false;
+ if (intel_dp_mst_active_streams(intel_dp) == 1)
+ intel_dp->link.active = false;
intel_hdcp_disable(intel_mst->connector);
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
-
- if (DISPLAY_VER(display) >= 20)
- intel_de_write(display, DP_MIN_HBLANK_CTL(trans), 0);
}
static void mst_stream_post_disable(struct intel_atomic_state *state,
@@ -1034,8 +1042,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
bool last_mst_stream;
int i;
- intel_dp->mst.active_links--;
- last_mst_stream = intel_dp->mst.active_links == 0;
+ last_mst_stream = intel_dp_mst_dec_active_streams(intel_dp);
+
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
@@ -1062,6 +1070,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state,
old_payload, new_payload);
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -1104,8 +1114,6 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
primary_encoder->post_disable(state, primary_encoder,
old_crtc_state, NULL);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
}
static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
@@ -1116,7 +1124,7 @@ static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0 &&
+ if (intel_dp_mst_active_streams(intel_dp) == 0 &&
primary_encoder->post_pll_disable)
primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
@@ -1129,7 +1137,7 @@ static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0)
+ if (intel_dp_mst_active_streams(intel_dp) == 0)
primary_encoder->pre_pll_enable(state, primary_encoder,
pipe_config, NULL);
else
@@ -1189,13 +1197,11 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
*/
connector->encoder = encoder;
intel_mst->connector = connector;
- first_mst_stream = intel_dp->mst.active_links == 0;
+
+ first_mst_stream = intel_dp_mst_inc_active_streams(intel_dp);
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
if (first_mst_stream)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -1210,8 +1216,6 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
- intel_dp->mst.active_links++;
-
ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->mst.port));
if (ret < 0)
@@ -1279,9 +1283,9 @@ static void mst_stream_enable(struct intel_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
- bool first_mst_stream = intel_dp->mst.active_links == 1;
+ bool first_mst_stream = intel_dp_mst_active_streams(intel_dp) == 1;
struct intel_crtc *pipe_crtc;
- int ret, i, min_hblank;
+ int ret, i;
drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
@@ -1296,41 +1300,17 @@ static void mst_stream_enable(struct intel_atomic_state *state,
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
- if (DISPLAY_VER(display) >= 20) {
- /*
- * adjust the BlankingStart/BlankingEnd framing control from
- * the calculated value
- */
- min_hblank = pipe_config->min_hblank - 2;
-
- /* Maximum value to be programmed is limited to 0x10 */
- min_hblank = min(0x10, min_hblank);
-
- /*
- * Minimum hblank accepted for 128b/132b would be 5 and for
- * 8b/10b would be 3 symbol count
- */
- if (intel_dp_is_uhbr(pipe_config))
- min_hblank = max(min_hblank, 5);
- else
- min_hblank = max(min_hblank, 3);
-
- intel_de_write(display, DP_MIN_HBLANK_CTL(trans),
- min_hblank);
- }
-
enable_bs_jitter_was(pipe_config);
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ intel_vrr_transcoder_enable(pipe_config);
+
intel_ddi_clear_act_sent(encoder, pipe_config);
intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
intel_ddi_wait_for_act_sent(encoder, pipe_config);
drm_dp_check_act_status(&intel_dp->mst.mgr);
@@ -1870,12 +1850,6 @@ mst_stream_encoders_create(struct intel_digital_port *dig_port)
}
int
-intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
-{
- return dig_port->dp.mst.active_links;
-}
-
-int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
struct intel_display *display = to_intel_display(dig_port);
@@ -2101,7 +2075,7 @@ void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
u8 rate_select;
u8 link_bw;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
return;
if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index c1bbfeb02ca9..ab09b487c6bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -18,7 +18,7 @@ struct intel_link_bw_limits;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
-int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 429f89543789..69f242139420 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -222,9 +222,7 @@ static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
static const struct bxt_dpio_phy_info *
bxt_get_phy_list(struct intel_display *display, int *count)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
*count = ARRAY_SIZE(glk_dpio_phy_info);
return glk_dpio_phy_info;
} else {
@@ -808,9 +806,9 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
vlv_dpio_put(dev_priv);
}
-void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- bool reset)
+static void __chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -853,6 +851,17 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
}
}
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ vlv_dpio_get(i915);
+ __chv_data_lane_soft_reset(encoder, crtc_state, reset);
+ vlv_dpio_put(i915);
+}
+
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -880,7 +889,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
vlv_dpio_get(dev_priv);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, crtc_state, true);
+ __chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
@@ -1008,7 +1017,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
}
/* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, crtc_state, false);
+ __chv_data_lane_soft_reset(encoder, crtc_state, false);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 08a30e5aafce..a9e9b98d0bf9 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -373,14 +373,14 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return i915->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(i915))
+ return display->vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(display))
return 120000;
- else if (DISPLAY_VER(i915) != 2)
+ else if (DISPLAY_VER(display) != 2)
return 96000;
else
return 48000;
@@ -389,27 +389,27 @@ static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
struct intel_dpll_hw_state *dpll_hw_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
u32 tmp;
/* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
+ if (display->platform.cherryview && crtc->pipe != PIPE_A)
+ tmp = display->state.chv_dpll_md[crtc->pipe];
else
- tmp = intel_de_read(dev_priv,
- DPLL_MD(dev_priv, crtc->pipe));
+ tmp = intel_de_read(display,
+ DPLL_MD(display, crtc->pipe));
hw_state->dpll_md = tmp;
}
- hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe));
+ hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
- hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
+ if (!display->platform.valleyview && !display->platform.cherryview) {
+ hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
+ hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
hw_state->dpll &= ~(DPLL_LOCK_VLV |
@@ -421,8 +421,8 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
/* Returns the clock of the currently programmed mode of the given pipe. */
void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
u32 dpll = hw_state->dpll;
u32 fp;
@@ -436,7 +436,7 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
fp = hw_state->fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
@@ -444,8 +444,8 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (DISPLAY_VER(dev_priv) != 2) {
- if (IS_PINEVIEW(dev_priv))
+ if (DISPLAY_VER(display) != 2) {
+ if (display->platform.pineview)
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
@@ -462,23 +462,23 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
7 : 14;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return;
}
- if (IS_PINEVIEW(dev_priv))
+ if (display->platform.pineview)
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
enum pipe lvds_pipe;
- if (IS_I85X(dev_priv) &&
- intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ if (display->platform.i85x &&
+ intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
lvds_pipe == crtc->pipe) {
- u32 lvds = intel_de_read(dev_priv, LVDS);
+ u32 lvds = intel_de_read(display, LVDS);
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -577,7 +577,7 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_pll_is_valid(struct intel_display *display,
const struct intel_limit *limit,
const struct dpll *clock)
{
@@ -590,14 +590,14 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
return false;
- if (!IS_PINEVIEW(dev_priv) &&
- !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ if (!display->platform.pineview &&
+ !display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake)
if (clock->m1 <= clock->m2)
return false;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+ if (!display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -620,7 +620,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -628,7 +628,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
return limit->p2.p2_fast;
else
return limit->p2.p2_slow;
@@ -656,7 +656,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -677,7 +677,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -714,7 +714,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -733,7 +733,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -770,7 +770,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int max_n;
bool found = false;
@@ -794,7 +794,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -817,7 +817,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
* Check if the calculated PLL configuration is more optimal compared to the
* best configuration and error found so far. Return the calculated error.
*/
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
const struct dpll *calculated_clock,
const struct dpll *best_clock,
unsigned int best_error_ppm,
@@ -827,13 +827,13 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
- if (IS_CHERRYVIEW(to_i915(dev))) {
+ if (display->platform.cherryview) {
*error_ppm = 0;
return calculated_clock->p > best_clock->p;
}
- if (drm_WARN_ON_ONCE(dev, !target_freq))
+ if (drm_WARN_ON_ONCE(display->drm, !target_freq))
return false;
*error_ppm = div_u64(1000000ULL *
@@ -864,8 +864,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
@@ -889,12 +888,12 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target,
+ if (!vlv_PLL_is_optimal(display, target,
&clock,
best_clock,
bestppm, &ppm))
@@ -922,8 +921,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
unsigned int best_error_ppm;
struct dpll clock;
u64 m2;
@@ -958,10 +956,10 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ if (!intel_pll_is_valid(display, limit, &clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
best_error_ppm, &error_ppm))
continue;
@@ -1005,8 +1003,6 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1016,8 +1012,8 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ if (display->platform.i945g || display->platform.i945gm ||
+ display->platform.g33 || display->platform.pineview) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -1030,10 +1026,10 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_G4X(dev_priv)) {
+ if (display->platform.g4x) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- } else if (IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.pineview) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
WARN_ON(reduced_clock->p1 != clock->p1);
} else {
@@ -1057,7 +1053,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
}
WARN_ON(reduced_clock->p2 != clock->p2);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -1075,11 +1071,10 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
hw_state->fp0 = pnv_dpll_compute_fp(clock);
hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
} else {
@@ -1089,7 +1084,7 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
hw_state->dpll_md = i965_dpll_md(crtc_state);
}
@@ -1098,8 +1093,6 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1129,7 +1122,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
* both DPLLS. The spec says we should disable the DVO 2X clock
* when not needed, but this seems to work fine in practice.
*/
- if (IS_I830(dev_priv) ||
+ if (display->platform.i830 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
@@ -1157,14 +1150,14 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
int ret;
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1186,13 +1179,13 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1241,12 +1234,10 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
+ ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(display) && intel_is_dual_link_lvds(display))))
return 25;
if (crtc_state->sdvo_tv_clock)
@@ -1276,8 +1267,6 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE;
@@ -1311,7 +1300,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
* clear if it''s a win or loss power wise. No point in doing
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
*/
- if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ if (INTEL_NUM_PIPES(display) == 3 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -1362,7 +1351,6 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1375,13 +1363,13 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
- dev_priv->display.vbt.lvds_ssc_freq);
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ display->vbt.lvds_ssc_freq);
+ refclk = display->vbt.lvds_ssc_freq;
}
- if (intel_is_dual_link_lvds(dev_priv)) {
+ if (intel_is_dual_link_lvds(display)) {
if (refclk == 100000)
limit = &ilk_limits_dual_lvds_100m;
else
@@ -1539,7 +1527,6 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1547,13 +1534,13 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
@@ -1589,7 +1576,6 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1597,8 +1583,8 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1628,7 +1614,6 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1636,8 +1621,8 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1669,7 +1654,6 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1677,8 +1661,8 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1751,12 +1735,12 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = {
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -1764,9 +1748,9 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->hw.enable)
return 0;
- ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
+ ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1777,23 +1761,23 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
- drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
if (!crtc_state->hw.enable || crtc_state->shared_dpll)
return 0;
- if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
+ if (!display->funcs.dpll->crtc_get_shared_dpll)
return 0;
- ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
+ ret = display->funcs.dpll->crtc_get_shared_dpll(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1802,43 +1786,42 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
}
void
-intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 14)
- dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
- else if (IS_DG2(dev_priv))
- dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
- else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
- else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &chv_dpll_funcs;
- else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
- else if (IS_G4X(dev_priv))
- dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
- else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
+intel_dpll_init_clock_hook(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 14)
+ display->funcs.dpll = &mtl_dpll_funcs;
+ else if (display->platform.dg2)
+ display->funcs.dpll = &dg2_dpll_funcs;
+ else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
+ display->funcs.dpll = &hsw_dpll_funcs;
+ else if (HAS_PCH_SPLIT(display))
+ display->funcs.dpll = &ilk_dpll_funcs;
+ else if (display->platform.cherryview)
+ display->funcs.dpll = &chv_dpll_funcs;
+ else if (display->platform.valleyview)
+ display->funcs.dpll = &vlv_dpll_funcs;
+ else if (display->platform.g4x)
+ display->funcs.dpll = &g4x_dpll_funcs;
+ else if (display->platform.pineview)
+ display->funcs.dpll = &pnv_dpll_funcs;
+ else if (DISPLAY_VER(display) != 2)
+ display->funcs.dpll = &i9xx_dpll_funcs;
else
- dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
+ display->funcs.dpll = &i8xx_dpll_funcs;
}
-static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pps(struct intel_display *display)
{
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
return false;
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+ return display->platform.pineview || display->platform.mobile;
}
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
int i;
@@ -1846,27 +1829,27 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- if (i9xx_has_pps(dev_priv))
+ if (i9xx_has_pps(display))
assert_pps_unlocked(display, pipe);
- intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
- intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
+ intel_de_write(display, FP0(pipe), hw_state->fp0);
+ intel_de_write(display, FP1(pipe), hw_state->fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ if (DISPLAY_VER(display) >= 4) {
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
} else {
/* The pixel multiplier can only be updated once the
@@ -1874,20 +1857,21 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
*
* So write it again.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150); /* wait for warmup */
}
}
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
+static void vlv_pllb_recal_opamp(struct intel_display *display,
enum dpio_phy phy, enum dpio_channel ch)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
/*
@@ -1916,6 +1900,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
@@ -1930,7 +1915,7 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, phy, ch);
+ vlv_pllb_recal_opamp(display, phy, ch);
/* Set up Tx target for periodic Rcomp update */
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
@@ -2003,24 +1988,23 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
}
void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2030,7 +2014,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2038,8 +2022,8 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
_vlv_enable_pll(crtc_state);
}
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
@@ -2133,6 +2117,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2156,18 +2141,17 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
udelay(1);
/* Enable PLL */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Check PLL is locked */
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "PLL %d failed to lock\n", pipe);
}
void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2177,7 +2161,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk and SSC */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VCO_ENABLE);
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2192,29 +2176,29 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B),
+ intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(display, DPLL_MD(display, PIPE_B),
hw_state->dpll_md);
- intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
+ intel_de_write(display, CBR4_VLV, 0);
+ display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
* We should always have it disabled.
*/
- drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) &
+ drm_WARN_ON(display->drm,
+ (intel_de_read(display, DPLL(display, PIPE_B)) &
DPLL_VGA_MODE_DIS) == 0);
} else {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
}
/**
* vlv_force_pll_on - forcibly enable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to enable
* @dpll: PLL configuration
*
@@ -2222,10 +2206,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_crtc_state *crtc_state;
@@ -2238,7 +2221,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
crtc_state->dpll = *dpll;
crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
chv_compute_dpll(crtc_state);
chv_enable_pll(crtc_state);
} else {
@@ -2251,9 +2234,8 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
return 0;
}
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
/* Make sure the pipe isn't still relying on us */
@@ -2268,9 +2250,9 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_posting_read(display, DPLL(display, pipe));
}
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void chv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2316,18 +2298,18 @@ void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
/**
* vlv_force_pll_off - forcibly disable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to disable
*
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
{
- if (IS_CHERRYVIEW(dev_priv))
- chv_disable_pll(dev_priv, pipe);
+ if (display->platform.cherryview)
+ chv_disable_pll(display, pipe);
else
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
}
/* Only for pre-ILK configs */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 21d06cbd2ce7..280e90a57c87 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -8,16 +8,15 @@
#include <linux/types.h>
+enum pipe;
struct dpll;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dpll_hw_state;
-enum pipe;
-void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+void intel_dpll_init_clock_hook(struct intel_display *display);
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -29,14 +28,14 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
void chv_compute_dpll(struct intel_crtc_state *crtc_state);
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe);
void chv_enable_pll(const struct intel_crtc_state *crtc_state);
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void chv_disable_pll(struct intel_display *display, enum pipe pipe);
void vlv_enable_pll(const struct intel_crtc_state *crtc_state);
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe);
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state);
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index c825a507b905..9da051a3f455 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -24,9 +24,11 @@
#include <linux/math.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "bxt_dpio_phy_regs.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -38,6 +40,7 @@
#include "intel_hti.h"
#include "intel_mg_phy_regs.h"
#include "intel_pch_refclk.h"
+#include "intel_step.h"
#include "intel_tc.h"
/**
@@ -257,7 +260,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
unsigned int old_mask;
if (drm_WARN_ON(display->drm, !pll))
@@ -303,7 +306,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
/* PCH only available on ILK+ */
if (DISPLAY_VER(display) < 5)
@@ -609,13 +612,12 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
- if (HAS_PCH_IBX(i915)) {
+ if (HAS_PCH_IBX(display)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
id = (enum intel_dpll_id) crtc->pipe;
pll = intel_get_shared_dpll_by_id(display, id);
@@ -715,7 +717,6 @@ static void hsw_ddi_spll_enable(struct intel_display *display,
static void hsw_ddi_wrpll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
@@ -726,13 +727,12 @@ static void hsw_ddi_wrpll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static void hsw_ddi_spll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
@@ -743,7 +743,7 @@ static void hsw_ddi_spll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
@@ -2606,10 +2606,8 @@ ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
{
return ((display->platform.elkhartlake &&
IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
- display->platform.tigerlake ||
- display->platform.alderlake_s ||
- display->platform.alderlake_p) &&
- display->dpll.ref_clks.nssc == 38400;
+ DISPLAY_VER(display) >= 12) &&
+ display->dpll.ref_clks.nssc == 38400;
}
struct icl_combo_pll_params {
@@ -4309,7 +4307,6 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
*/
void intel_shared_dpll_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
int i;
@@ -4339,7 +4336,7 @@ void intel_shared_dpll_init(struct intel_display *display)
dpll_mgr = &skl_pll_mgr;
else if (HAS_DDI(display))
dpll_mgr = &hsw_pll_mgr;
- else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
+ else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr)
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 0d8ebe38226e..43bd97e4f589 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,6 +9,7 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -127,7 +128,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
struct drm_i915_private *i915 = vm->i915;
struct intel_display *display = &i915->display;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_vma *vma;
void __iomem *iomem;
struct i915_gem_ww_ctx ww;
@@ -137,7 +138,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
if (i915_gem_object_is_stolen(dpt->obj))
pin_flags |= PIN_MAPPABLE;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
for_i915_gem_ww(&ww, err, true) {
@@ -169,7 +170,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
dpt->obj->mm.dirty = true;
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return err ? ERR_PTR(err) : vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
index d2dede0a5229..ce5aa0ca0fa5 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -3,7 +3,6 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -12,9 +11,9 @@
void intel_dpt_configure(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (DISPLAY_VER(i915) == 14) {
+ if (DISPLAY_VER(display) == 14) {
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
@@ -22,15 +21,15 @@ void intel_dpt_configure(struct intel_crtc *crtc)
if (plane_id == PLANE_CURSOR)
continue;
- intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
+ intel_de_rmw(display, PLANE_CHICKEN(pipe, plane_id),
PLANE_CHICKEN_DISABLE_DPT,
- i915->display.params.enable_dpt ? 0 :
+ display->params.enable_dpt ? 0 :
PLANE_CHICKEN_DISABLE_DPT);
}
- } else if (DISPLAY_VER(i915) == 13) {
- intel_de_rmw(i915, CHICKEN_MISC_2,
+ } else if (DISPLAY_VER(display) == 13) {
+ intel_de_rmw(display, CHICKEN_MISC_2,
CHICKEN_MISC_DISABLE_DPT,
- i915->display.params.enable_dpt ? 0 :
+ display->params.enable_dpt ? 0 :
CHICKEN_MISC_DISABLE_DPT);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 9fc4003d1579..481488d1fe67 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,13 +4,15 @@
*
*/
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
#include "intel_dsb_buffer.h"
@@ -142,10 +144,10 @@ static int dsb_vtotal(struct intel_atomic_state *state,
static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- unsigned int latency = skl_watermark_max_latency(i915, 0);
+ unsigned int latency = skl_watermark_max_latency(display, 0);
return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
@@ -795,22 +797,22 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
enum intel_dsb_id dsb_id,
unsigned int max_cmds)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- intel_wakeref_t wakeref;
+ struct intel_display *display = to_intel_display(state);
+ struct ref_tracker *wakeref;
struct intel_dsb *dsb;
unsigned int size;
- if (!HAS_DSB(i915))
+ if (!HAS_DSB(display))
return NULL;
- if (!i915->display.params.enable_dsb)
+ if (!display->params.enable_dsb)
return NULL;
dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
@@ -818,7 +820,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
goto out_put_rpm;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
dsb->id = dsb_id;
dsb->crtc = crtc;
@@ -831,10 +833,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
return dsb;
out_put_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
kfree(dsb);
out:
- drm_info_once(&i915->drm,
+ drm_info_once(display->drm,
"[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
crtc->base.base.id, crtc->base.name, dsb_id);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 403151175a87..a8f012119165 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -4,8 +4,9 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_dsi.h"
#include "intel_panel.h"
@@ -116,14 +117,14 @@ struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
enum drm_panel_orientation orientation;
orientation = connector->panel.vbt.dsi.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
- orientation = dev_priv->display.vbt.orientation;
+ orientation = display->vbt.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 049443245310..b3c453bf7d5c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -24,9 +24,10 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_dcs_backlight.h"
@@ -162,7 +163,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
if (panel->vbt.backlight.brightness_precision_bits > 8)
@@ -172,7 +173,7 @@ static int dcs_setup_backlight(struct intel_connector *connector,
panel->backlight.level = panel->backlight.max;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using DCS for backlight control\n",
connector->base.base.id, connector->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 7b2ffd14ae6e..29c920983413 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -31,16 +31,16 @@
#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
-
#include <linux/unaligned.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -102,13 +102,13 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
flags = *data++;
type = *data++;
@@ -120,12 +120,12 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
port = intel_dsi_seq_port_to_port(intel_dsi, seq_port);
- if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port]))
+ if (drm_WARN_ON(display->drm, !intel_dsi->dsi_hosts[port]))
goto out;
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
- drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+ drm_dbg_kms(display->drm, "no dsi device for port %c\n",
port_name(port));
goto out;
}
@@ -150,8 +150,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- drm_dbg(&dev_priv->drm,
- "Generic Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
mipi_dsi_generic_write(dsi_device, data, len);
@@ -163,15 +162,14 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- drm_dbg(&dev_priv->drm,
- "DCS Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -182,10 +180,10 @@ out:
static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 delay = *((const u32 *) data);
- drm_dbg_kms(&i915->drm, "%d usecs\n", delay);
+ drm_dbg_kms(display->drm, "%d usecs\n", delay);
usleep_range(delay, delay + 10);
data += 4;
@@ -196,7 +194,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
const char *con_id, u8 idx, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this table is a quick ugly hack. */
static struct gpio_desc *soc_gpio_table[U8_MAX + 1];
struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index];
@@ -204,10 +202,10 @@ static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
if (gpio_desc) {
gpiod_set_value(gpio_desc, value);
} else {
- gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx,
+ gpio_desc = devm_gpiod_get_index(display->drm->dev, con_id, idx,
value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
if (IS_ERR(gpio_desc)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"GPIO index %u request failed (%pe)\n",
gpio_index, gpio_desc);
return;
@@ -242,16 +240,16 @@ static void soc_opaque_gpio_set_value(struct intel_connector *connector,
static void vlv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
if (connector->panel.vbt.dsi.seq_version < 3) {
if (gpio_source == 1) {
- drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
+ drm_dbg_kms(display->drm, "SC gpio not supported\n");
return;
}
if (gpio_source > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
@@ -264,7 +262,7 @@ static void vlv_gpio_set_value(struct intel_connector *connector,
static void chv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
@@ -284,13 +282,13 @@ static void chv_gpio_set_value(struct intel_connector *connector,
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
if (gpio_index >= CHV_GPIO_IDX_START_E) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"invalid gpio index %u for GPIO N\n",
gpio_index);
return;
@@ -320,13 +318,12 @@ enum {
MIPI_VIO_EN_2,
};
-static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
+static void icl_native_gpio_set_value(struct intel_display *display,
int gpio, bool value)
{
- struct intel_display *display = &dev_priv->display;
int index;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) == 11 && gpio >= MIPI_RESET_2))
return;
switch (gpio) {
@@ -343,25 +340,25 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
* The locking protects against concurrent SHOTPLUG_CTL_DDI
* modifications in irq setup and handling.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
+ spin_lock_irq(&display->irq.lock);
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
break;
case MIPI_AVDD_EN_1:
case MIPI_AVDD_EN_2:
index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), PANEL_POWER_ON,
+ intel_de_rmw(display, PP_CONTROL(display, index), PANEL_POWER_ON,
value ? PANEL_POWER_ON : 0);
break;
case MIPI_BKLT_EN_1:
case MIPI_BKLT_EN_2:
index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), EDP_BLC_ENABLE,
+ intel_de_rmw(display, PP_CONTROL(display, index), EDP_BLC_ENABLE,
value ? EDP_BLC_ENABLE : 0);
break;
case MIPI_AVEE_EN_1:
@@ -389,13 +386,12 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source = 0, gpio_index = 0, gpio_number;
bool value;
int size;
- bool native = DISPLAY_VER(i915) >= 11;
+ bool native = DISPLAY_VER(display) >= 11;
if (connector->panel.vbt.dsi.seq_version >= 3) {
size = 3;
@@ -416,16 +412,16 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
gpio_source = (data[1] >> 1) & 3;
}
- drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ drm_dbg_kms(display->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
if (native)
- icl_native_gpio_set_value(i915, gpio_number, value);
- else if (DISPLAY_VER(i915) >= 9)
+ icl_native_gpio_set_value(display, gpio_number, value);
+ else if (DISPLAY_VER(display) >= 9)
bxt_gpio_set_value(connector, gpio_index, value);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_gpio_set_value(connector, gpio_source, gpio_number, value);
- else if (IS_CHERRYVIEW(i915))
+ else if (display->platform.cherryview)
chv_gpio_set_value(connector, gpio_source, gpio_number, value);
return data + size;
@@ -463,8 +459,8 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
const u16 target_addr)
{
- struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct acpi_device *adev = ACPI_COMPANION(display->drm->dev);
struct i2c_adapter_lookup lookup = {
.target_addr = target_addr,
.intel_dsi = intel_dsi,
@@ -484,7 +480,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
@@ -494,7 +490,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
u8 payload_size = *(data + 6);
u8 *payload_data;
- drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
+ drm_dbg_kms(display->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7);
if (intel_dsi->i2c_bus_num < 0) {
@@ -504,7 +500,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
if (!adapter) {
- drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
+ drm_err(display->drm, "Cannot find a valid i2c bus for xfer\n");
goto err_bus;
}
@@ -522,7 +518,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
ret = i2c_transfer(adapter, &msg, 1);
if (ret < 0)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to xfer payload of size (%u) to reg (%u)\n",
payload_size, reg_offset);
@@ -535,16 +531,16 @@ err_bus:
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
- drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
+ drm_dbg_kms(display->drm, "Skipping SPI element execution\n");
return data + *(data + 5) + 6;
}
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
#ifdef CONFIG_PMIC_OPREGION
u32 value, mask, reg_address;
u16 i2c_address;
@@ -560,9 +556,9 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
reg_address,
value, mask);
if (ret)
- drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
+ drm_err(display->drm, "%s failed, error: %d\n", __func__, ret);
#else
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
#endif
@@ -612,12 +608,12 @@ static const char *sequence_name(enum mipi_seq seq_id)
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
return;
@@ -625,9 +621,9 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
if (!data)
return;
- drm_WARN_ON(&dev_priv->drm, *data != seq_id);
+ drm_WARN_ON(display->drm, *data != seq_id);
- drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+ drm_dbg_kms(display->drm, "Starting MIPI sequence %d - %s\n",
seq_id, sequence_name(seq_id));
/* Skip Sequence Byte. */
@@ -657,19 +653,19 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
/* Consistency check if we have size. */
if (operation_size && data != next) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Inconsistent operation size\n");
return;
}
} else if (operation_size) {
/* We have size, skip. */
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
return;
@@ -695,54 +691,44 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
-
- drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
- drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
- intel_dsi->pixel_overlap);
- drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
- drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- drm_dbg_kms(&i915->drm, "Video mode format %s\n",
- intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode == BURST_MODE ?
- "burst" : "<unknown>");
- drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
- intel_dsi->burst_mode_ratio);
- drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
- drm_dbg_kms(&i915->drm, "Eot %s\n",
- str_enabled_disabled(intel_dsi->eotp_pkt));
- drm_dbg_kms(&i915->drm, "Clockstop %s\n",
- str_enabled_disabled(!intel_dsi->clock_stop));
- drm_dbg_kms(&i915->drm, "Mode %s\n",
- intel_dsi->operation_mode ? "command" : "video");
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
+ "DSI parameters:");
+
+ drm_printf(&p, "Pclk %d\n", intel_dsi->pclk);
+ drm_printf(&p, "Pixel overlap %d\n", intel_dsi->pixel_overlap);
+ drm_printf(&p, "Lane count %d\n", intel_dsi->lane_count);
+ drm_printf(&p, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ drm_printf(&p, "Video mode format %s\n",
+ intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode == BURST_MODE ?
+ "burst" : "<unknown>");
+ drm_printf(&p, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
+ drm_printf(&p, "Reset timer %d\n", intel_dsi->rst_timer_val);
+ drm_printf(&p, "Eot %s\n", str_enabled_disabled(intel_dsi->eotp_pkt));
+ drm_printf(&p, "Clockstop %s\n", str_enabled_disabled(!intel_dsi->clock_stop));
+ drm_printf(&p, "Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
- drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
- drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
- drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
- drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
- intel_dsi->lp_rx_timeout);
- drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
- intel_dsi->turn_arnd_val);
- drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
- drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
- intel_dsi->hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
- intel_dsi->clk_lp_to_hs_count);
- drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
- intel_dsi->clk_hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "BTA %s\n",
- str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+ drm_printf(&p, "Dual link: NONE\n");
+ drm_printf(&p, "Pixel Format %d\n", intel_dsi->pixel_format);
+ drm_printf(&p, "TLPX %d\n", intel_dsi->escape_clk_div);
+ drm_printf(&p, "LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+ drm_printf(&p, "Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+ drm_printf(&p, "Init Count 0x%x\n", intel_dsi->init_count);
+ drm_printf(&p, "HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+ drm_printf(&p, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ drm_printf(&p, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ drm_printf(&p, "LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+ drm_printf(&p, "HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+ drm_printf(&p, "BTA %s\n",
+ str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
@@ -764,8 +750,7 @@ static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
@@ -773,7 +758,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u16 burst_mode_ratio;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
@@ -819,7 +804,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u32 bitrate;
if (mipi_config->target_burst_mode_freq == 0) {
- drm_err(&dev_priv->drm, "Burst mode target is not set\n");
+ drm_err(display->drm, "Burst mode target is not set\n");
return false;
}
@@ -836,7 +821,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
mipi_config->target_burst_mode_freq = bitrate;
if (mipi_config->target_burst_mode_freq < bitrate) {
- drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n");
+ drm_err(display->drm, "Burst mode freq is less than computed\n");
return false;
}
@@ -900,8 +885,7 @@ static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
@@ -911,13 +895,13 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
struct pinctrl *pinctrl;
int ret;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
mipi_config->pwm_blc == PPS_BLC_PMIC) {
gpiod_lookup_table = &pmic_panel_gpio_table;
want_panel_gpio = true;
}
- if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ if (display->platform.valleyview && mipi_config->pwm_blc == PPS_BLC_SOC) {
gpiod_lookup_table = &soc_panel_gpio_table;
want_panel_gpio = true;
want_backlight_gpio = true;
@@ -926,12 +910,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
ARRAY_SIZE(soc_pwm_pinctrl_map));
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to register pwm0 pinmux mapping\n");
- pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+ pinctrl = devm_pinctrl_get_select(display->drm->dev, "soc_pwm0");
if (IS_ERR(pinctrl))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to set pinmux to PWM\n");
}
@@ -939,9 +923,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
gpiod_add_lookup_table(gpiod_lookup_table);
if (want_panel_gpio) {
- intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags);
+ intel_dsi->gpio_panel = devm_gpiod_get(display->drm->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for panel control\n");
intel_dsi->gpio_panel = NULL;
}
@@ -949,9 +933,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if (want_backlight_gpio) {
intel_dsi->gpio_backlight =
- devm_gpiod_get(dev->dev, "backlight", flags);
+ devm_gpiod_get(display->drm->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c16fb34b737d..b61520353c92 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -31,10 +31,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
@@ -129,13 +130,13 @@ static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (!(tmp & DVO_ENABLE))
return false;
@@ -146,11 +147,11 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
*pipe = REG_FIELD_GET(DVO_PIPE_SEL_MASK, tmp);
@@ -160,13 +161,13 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -186,14 +187,14 @@ static void intel_disable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- intel_de_rmw(i915, DVO(port), DVO_ENABLE, 0);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), DVO_ENABLE, 0);
+ intel_de_posting_read(display, DVO(port));
}
static void intel_enable_dvo(struct intel_atomic_state *state,
@@ -201,7 +202,7 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
@@ -209,8 +210,8 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- intel_de_rmw(i915, DVO(port), 0, DVO_ENABLE);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), 0, DVO_ENABLE);
+ intel_de_posting_read(display, DVO(port));
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -288,7 +289,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port = encoder->port;
@@ -296,7 +297,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
u32 dvo_val;
/* Save the active data order, since I don't know what it should be set to. */
- dvo_val = intel_de_read(i915, DVO(port)) &
+ dvo_val = intel_de_read(display, DVO(port)) &
(DVO_DEDICATED_INT_ENABLE |
DVO_PRESERVE_MASK | DVO_ACT_DATA_ORDER_MASK);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
@@ -309,10 +310,10 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- intel_de_write(i915, DVO_SRCDIM(port),
+ intel_de_write(display, DVO_SRCDIM(port),
DVO_SRCDIM_HORIZONTAL(adjusted_mode->crtc_hdisplay) |
DVO_SRCDIM_VERTICAL(adjusted_mode->crtc_vdisplay));
- intel_de_write(i915, DVO(port), dvo_val);
+ intel_de_write(display, DVO(port), dvo_val);
}
static enum drm_connector_status
@@ -320,10 +321,9 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
{
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (!intel_display_device_enabled(display))
@@ -414,11 +414,10 @@ static int intel_dvo_connector_type(const struct intel_dvo_device *dvo)
}
}
-static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
+static bool intel_dvo_init_dev(struct intel_display *display,
struct intel_dvo *intel_dvo,
const struct intel_dvo_device *dvo)
{
- struct intel_display *display = &dev_priv->display;
struct i2c_adapter *i2c;
u32 dpll[I915_MAX_PIPES];
enum pipe pipe;
@@ -458,15 +457,15 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* the clock enabled before we attempt to initialize
* the device.
*/
- for_each_pipe(dev_priv, pipe)
- dpll[pipe] = intel_de_rmw(dev_priv, DPLL(dev_priv, pipe), 0,
+ for_each_pipe(display, pipe)
+ dpll[pipe] = intel_de_rmw(display, DPLL(display, pipe), 0,
DPLL_DVO_2X_MODE);
ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
/* restore the DVO 2x clock state to original */
- for_each_pipe(dev_priv, pipe) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll[pipe]);
+ for_each_pipe(display, pipe) {
+ intel_de_write(display, DPLL(display, pipe), dpll[pipe]);
}
intel_gmbus_force_bit(i2c, false);
@@ -474,14 +473,14 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
return ret;
}
-static bool intel_dvo_probe(struct drm_i915_private *i915,
+static bool intel_dvo_probe(struct intel_display *display,
struct intel_dvo *intel_dvo)
{
int i;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- if (intel_dvo_init_dev(i915, intel_dvo,
+ if (intel_dvo_init_dev(display, intel_dvo,
&intel_dvo_devices[i]))
return true;
}
@@ -489,9 +488,8 @@ static bool intel_dvo_probe(struct drm_i915_private *i915,
return false;
}
-void intel_dvo_init(struct drm_i915_private *i915)
+void intel_dvo_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_connector *connector;
struct intel_encoder *encoder;
struct intel_dvo *intel_dvo;
@@ -518,7 +516,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->pre_enable = intel_dvo_pre_enable;
connector->get_hw_state = intel_dvo_connector_get_hw_state;
- if (!intel_dvo_probe(i915, intel_dvo)) {
+ if (!intel_dvo_probe(display, intel_dvo)) {
kfree(intel_dvo);
intel_connector_free(connector);
return;
@@ -535,12 +533,12 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
BIT(INTEL_OUTPUT_DVO);
- drm_encoder_init(&i915->drm, &encoder->base,
+ drm_encoder_init(display->drm, &encoder->base,
&intel_dvo_enc_funcs,
intel_dvo_encoder_type(&intel_dvo->dev),
"DVO %c", port_name(encoder->port));
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] detected %s\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] detected %s\n",
encoder->base.base.id, encoder->base.name,
intel_dvo->dev.name);
@@ -549,7 +547,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
DRM_CONNECTOR_POLL_DISCONNECT;
connector->base.polled = connector->polled;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_dvo_connector_funcs,
intel_dvo_connector_type(&intel_dvo->dev),
intel_gmbus_get_adapter(display, GMBUS_PIN_DPC));
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h
index bf7a356422ab..83776552fc87 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo.h
@@ -6,12 +6,12 @@
#ifndef __INTEL_DVO_H__
#define __INTEL_DVO_H__
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-void intel_dvo_init(struct drm_i915_private *dev_priv);
+void intel_dvo_init(struct intel_display *display);
#else
-static inline void intel_dvo_init(struct drm_i915_private *dev_priv)
+static inline void intel_dvo_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 2b0e0f220442..05393bd60c98 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -560,11 +560,11 @@ static bool plane_has_modifier(struct intel_display *display,
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
- (GRAPHICS_VER(i915) < 20 || !IS_DGFX(i915)))
+ (GRAPHICS_VER(i915) < 20 || !display->platform.dgfx))
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS &&
- (GRAPHICS_VER(i915) < 20 || IS_DGFX(i915)))
+ (GRAPHICS_VER(i915) < 20 || display->platform.dgfx))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 30ac9b089ad6..c648ab8a93d7 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -12,6 +12,7 @@
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -117,7 +118,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
unsigned int pinctl;
@@ -136,7 +137,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
@@ -215,7 +216,7 @@ err:
vma = ERR_PTR(ret);
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b6978135e8ad..bed2bba20b55 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -55,6 +55,7 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
+#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_display_wa.h"
@@ -251,9 +252,12 @@ static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_s
* Gen9 hw miscalculates cfb stride for linear as
* PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
* we always need to use the override there.
+ *
+ * wa_14022269668 For bmg, always program the FBC_STRIDE before fbc enable
*/
if (stride != stride_aligned ||
- (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
+ (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR) ||
+ display->platform.battlemage)
return stride_aligned * 4 / 64;
return 0;
@@ -519,6 +523,20 @@ static void ilk_fbc_activate(struct intel_fbc *fbc)
DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
+static void fbc_compressor_clkgate_disable_wa(struct intel_fbc *fbc,
+ bool disable)
+{
+ struct intel_display *display = fbc->display;
+
+ if (display->platform.dg2)
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_4, DG2_DPFC_GATING_DIS,
+ disable ? DG2_DPFC_GATING_DIS : 0);
+ else if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPE_CLKGATE_DIS2(fbc->id),
+ MTL_DPFC_GATING_DIS,
+ disable ? MTL_DPFC_GATING_DIS : 0);
+}
+
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
@@ -532,6 +550,10 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+
+ /* wa_18038517565 Enable DPFC clock gating after FBC disable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, false);
}
}
@@ -921,6 +943,10 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+
+ /* wa_18038517565 Disable DPFC clock gating before FBC enable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, true);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
@@ -1436,7 +1462,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_needs_wa_16023588340(i915)) {
+ if (intel_display_needs_wa_16023588340(display)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
@@ -1464,14 +1490,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*
- * In Xe3, PSR2 selective fetch and FBC dirty rect feature cannot
- * coexist. So if PSR2 selective fetch is supported then mark that
- * FBC is not supported.
- * TODO: Need a logic to decide between PSR2 and FBC Dirty rect
+ * TODO: Implement a logic to select between PSR2 selective fetch and
+ * FBC based on Bspec: 68881 in xe2lpd onwards.
+ *
+ * As we still see some strange underruns in those platforms while
+ * disabling PSR2, keep FBC disabled in case of selective update is on
+ * until the selection logic is implemented.
*/
- if ((IS_DISPLAY_VER(display, 12, 14) || HAS_FBC_DIRTY_RECT(display)) &&
- crtc_state->has_sel_update && !crtc_state->has_panel_replay) {
- plane_state->no_fbc_reason = "PSR2 enabled";
+ if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update) {
+ plane_state->no_fbc_reason = "Selective update enabled";
return 0;
}
@@ -2120,13 +2147,12 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_fbc *fbc = m->private;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_plane *plane;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
drm_modeset_lock_all(display->drm);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&fbc->lock);
if (fbc->active) {
@@ -2151,7 +2177,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
drm_modeset_unlock_all(display->drm);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index adc19d5607de..2dc4029d71ed 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -47,9 +47,10 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_bo.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
@@ -65,9 +66,9 @@ struct intel_fbdev {
static struct intel_fbdev *to_intel_fbdev(struct drm_fb_helper *fb_helper)
{
- struct drm_i915_private *i915 = to_i915(fb_helper->client.dev);
+ struct intel_display *display = to_intel_display(fb_helper->client.dev);
- return i915->display.fbdev.fbdev;
+ return display->fbdev.fbdev;
}
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
@@ -209,11 +210,10 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_display *display = to_intel_display(helper->dev);
struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct intel_framebuffer *fb = ifbdev->fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
@@ -226,7 +226,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
if (fb &&
(sizes->fb_width > fb->base.width ||
sizes->fb_height > fb->base.height)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BIOS fb too small (%dx%d), we require (%dx%d),"
" releasing it\n",
fb->base.width, fb->base.height,
@@ -234,20 +234,20 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
drm_framebuffer_put(&fb->base);
fb = NULL;
}
- if (!fb || drm_WARN_ON(dev, !intel_fb_bo(&fb->base))) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) {
+ drm_dbg_kms(display->drm,
"no BIOS fb, allocating a new one\n");
fb = intel_fbdev_fb_alloc(helper, sizes);
if (IS_ERR(fb))
return PTR_ERR(fb);
} else {
- drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
+ drm_dbg_kms(display->drm, "re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = fb->base.width;
sizes->fb_height = fb->base.height;
}
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -265,7 +265,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
- drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
+ drm_err(display->drm, "Failed to allocate fb_info (%pe)\n", info);
ret = PTR_ERR(info);
goto out_unpin;
}
@@ -277,11 +277,11 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
obj = intel_fb_bo(&fb->base);
- ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma);
+ ret = intel_fbdev_fb_fill_info(display, info, obj, vma);
if (ret)
goto out_unpin;
- drm_fb_helper_fill_info(info, dev->fb_helper, sizes);
+ drm_fb_helper_fill_info(info, display->drm->fb_helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
@@ -292,21 +292,22 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
+ drm_dbg_kms(display->drm, "allocated %dx%d fb: 0x%08x\n",
fb->base.width, fb->base.height,
i915_ggtt_offset(vma));
ifbdev->fb = fb;
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
out_unpin:
intel_fb_unpin_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
+
return ret;
}
@@ -319,16 +320,15 @@ out_unlock:
* Note we only support a single fb shared across pipes for boot (mostly for
* fbcon), so we just find the biggest and use that.
*/
-static bool intel_fbdev_init_bios(struct drm_device *dev,
+static bool intel_fbdev_init_bios(struct intel_display *display,
struct intel_fbdev *ifbdev)
{
- struct drm_i915_private *i915 = to_i915(dev);
struct intel_framebuffer *fb = NULL;
struct intel_crtc *crtc;
unsigned int max_size = 0;
/* Find the largest fb */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -338,21 +338,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct drm_gem_object *obj = intel_fb_bo(plane_state->uapi.fb);
if (!crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active, skipping\n",
crtc->base.base.id, crtc->base.name);
continue;
}
if (!obj) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] no fb, skipping\n",
plane->base.base.id, plane->base.name);
continue;
}
if (obj->size > max_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"found possible fb from [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fb = to_intel_framebuffer(plane_state->uapi.fb);
@@ -361,13 +361,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
if (!fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"no active fbs found, not using BIOS config\n");
goto out;
}
/* Now make sure all the pipes will fit into it */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -375,13 +375,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
unsigned int cur_size;
if (!crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active, skipping\n",
crtc->base.base.id, crtc->base.name);
continue;
}
- drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
+ drm_dbg_kms(display->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
plane->base.base.id, plane->base.name);
/*
@@ -392,7 +392,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n",
plane->base.base.id, plane->base.name,
cur_size, fb->base.pitches[0]);
@@ -403,7 +403,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n",
crtc->base.base.id, crtc->base.name,
crtc_state->uapi.adjusted_mode.crtc_hdisplay,
@@ -412,7 +412,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size);
if (cur_size > max_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb not big enough for [PLANE:%d:%s] (%d vs %d)\n",
plane->base.base.id, plane->base.name,
cur_size, max_size);
@@ -420,14 +420,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb big enough [PLANE:%d:%s] (%d >= %d)\n",
plane->base.base.id, plane->base.name,
max_size, cur_size);
}
if (!fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS fb not suitable for all pipes, not using\n");
goto out;
}
@@ -437,7 +437,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
drm_framebuffer_get(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -448,13 +448,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
if (!crtc_state->uapi.active)
continue;
- drm_WARN(dev, !plane_state->uapi.fb,
+ drm_WARN(display->drm, !plane_state->uapi.fb,
"re-used BIOS config but lost an fb on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
}
- drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
+ drm_dbg_kms(display->drm, "using BIOS fb for initial console\n");
return true;
out:
@@ -479,26 +479,25 @@ static unsigned int intel_fbdev_color_mode(const struct drm_format_info *info)
}
}
-void intel_fbdev_setup(struct drm_i915_private *i915)
+void intel_fbdev_setup(struct intel_display *display)
{
- struct drm_device *dev = &i915->drm;
struct intel_fbdev *ifbdev;
unsigned int preferred_bpp = 0;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- ifbdev = drmm_kzalloc(dev, sizeof(*ifbdev), GFP_KERNEL);
+ ifbdev = drmm_kzalloc(display->drm, sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
return;
- i915->display.fbdev.fbdev = ifbdev;
- if (intel_fbdev_init_bios(dev, ifbdev))
+ display->fbdev.fbdev = ifbdev;
+ if (intel_fbdev_init_bios(display, ifbdev))
preferred_bpp = intel_fbdev_color_mode(ifbdev->fb->base.format);
if (!preferred_bpp)
preferred_bpp = 32;
- drm_client_setup_with_color_mode(dev, preferred_bpp);
+ drm_client_setup_with_color_mode(display->drm, preferred_bpp);
}
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 89bad3a2b01a..a15e3e222a0c 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -10,7 +10,7 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
-struct drm_i915_private;
+struct intel_display;
struct intel_fbdev;
struct intel_framebuffer;
@@ -19,14 +19,14 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = intel_fbdev_driver_fbdev_probe
-void intel_fbdev_setup(struct drm_i915_private *dev_priv);
+void intel_fbdev_setup(struct intel_display *display);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev);
#else
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = NULL
-static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
+static inline void intel_fbdev_setup(struct intel_display *display)
{
}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 4991c35a2632..5f4cb3328265 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -15,9 +15,9 @@
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_display *display = to_intel_display(helper->dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size;
@@ -50,14 +50,14 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
*
* Also skip stolen on MTL as Wa_22018444074 mitigation.
*/
- if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
+ if (!display->platform.meteorlake && size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
}
if (IS_ERR(obj)) {
- drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ drm_err(display->drm, "failed to allocate framebuffer (%pe)\n", obj);
return ERR_PTR(-ENOMEM);
}
@@ -67,9 +67,10 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
return to_intel_framebuffer(fb);
}
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct i915_gem_ww_ctx ww;
void __iomem *vaddr;
@@ -101,7 +102,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
continue;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index e502ae375fc0..cb7957272715 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -9,13 +9,13 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
struct drm_gem_object;
-struct drm_i915_private;
struct fb_info;
struct i915_vma;
+struct intel_display;
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
struct drm_gem_object *obj, struct i915_vma *vma);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 40deee0769ae..169bbe154b5c 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -6,15 +6,16 @@
#include <linux/string_helpers.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
-#include "intel_dp.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
#include "intel_link_bw.h"
@@ -464,7 +465,6 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -483,7 +483,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
@@ -607,7 +607,6 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -647,7 +646,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -698,7 +697,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
@@ -1077,7 +1076,6 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc)
void ilk_fdi_disable(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -1096,7 +1094,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR);
@@ -1106,7 +1104,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 7a8fbff39be0..2a787897b2d3 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -25,7 +25,8 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
@@ -57,11 +58,10 @@
static bool ivb_can_enable_err_int(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
enum pipe pipe;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
@@ -75,11 +75,10 @@ static bool ivb_can_enable_err_int(struct intel_display *display)
static bool cpt_can_enable_serr_int(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
struct intel_crtc *crtc;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
@@ -94,11 +93,10 @@ static bool cpt_can_enable_serr_int(struct intel_display *display)
static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = PIPESTAT(display, crtc->pipe);
u32 enable_mask;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((intel_de_read(display, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
@@ -115,10 +113,9 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t reg = PIPESTAT(display, pipe);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if (enable) {
u32 enable_mask = i915_pipestat_enable_mask(display, pipe);
@@ -136,24 +133,22 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
static void ilk_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pipe == PIPE_A) ?
DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
if (enable)
- ilk_enable_display_irq(dev_priv, bit);
+ ilk_enable_display_irq(display, bit);
else
- ilk_disable_display_irq(dev_priv, bit);
+ ilk_disable_display_irq(display, bit);
}
static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 err_int = intel_de_read(display, GEN7_ERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
@@ -169,7 +164,6 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable,
bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
if (enable) {
intel_de_write(display, GEN7_ERR_INT,
ERR_INT_FIFO_UNDERRUN(pipe));
@@ -177,9 +171,9 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
if (!ivb_can_enable_err_int(display))
return;
- ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_enable_display_irq(display, DE_ERR_INT_IVB);
} else {
- ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_disable_display_irq(display, DE_ERR_INT_IVB);
if (old &&
intel_de_read(display, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@@ -193,36 +187,32 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
static void bdw_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable)
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
else
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
}
static void ibx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pch_transcoder == PIPE_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
- ibx_enable_display_interrupt(dev_priv, bit);
+ ibx_enable_display_interrupt(display, bit);
else
- ibx_disable_display_interrupt(dev_priv, bit);
+ ibx_disable_display_interrupt(display, bit);
}
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
u32 serr_int = intel_de_read(display, SERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
@@ -240,8 +230,6 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable) {
intel_de_write(display, SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
@@ -249,9 +237,9 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
if (!cpt_can_enable_serr_int(display))
return;
- ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_enable_display_interrupt(display, SDE_ERROR_CPT);
} else {
- ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_disable_display_interrupt(display, SDE_ERROR_CPT);
if (old && intel_de_read(display, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
@@ -265,11 +253,10 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
bool old;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
@@ -305,13 +292,12 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *displa
bool intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned long flags;
bool ret;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
ret = __intel_set_cpu_fifo_underrun_reporting(display, pipe, enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return ret;
}
@@ -334,7 +320,6 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pch_transcoder);
unsigned long flags;
bool old;
@@ -348,12 +333,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
* crtc on LPT won't cause issues.
*/
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
old = !crtc->pch_fifo_underrun_disabled;
crtc->pch_fifo_underrun_disabled = !enable;
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
ibx_set_fifo_underrun_reporting(display,
pch_transcoder,
enable);
@@ -362,7 +347,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
pch_transcoder,
enable, old);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return old;
}
@@ -429,10 +414,9 @@ void intel_pch_fifo_underrun_irq_handler(struct intel_display *display,
*/
void intel_check_cpu_fifo_underruns(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
for_each_intel_crtc(display->drm, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
@@ -444,7 +428,7 @@ void intel_check_cpu_fifo_underruns(struct intel_display *display)
ivb_check_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
/**
@@ -457,28 +441,25 @@ void intel_check_cpu_fifo_underruns(struct intel_display *display)
*/
void intel_check_pch_fifo_underruns(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
for_each_intel_crtc(display->drm, crtc) {
if (crtc->pch_fifo_underrun_disabled)
continue;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
cpt_check_pch_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
void intel_init_fifo_underrun_reporting(struct intel_display *display,
struct intel_crtc *crtc,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
crtc->cpu_fifo_underrun_disabled = !enable;
/*
@@ -490,6 +471,6 @@ void intel_init_fifo_underrun_reporting(struct intel_display *display,
* PCH transcoders B and C would prevent enabling the south
* error interrupt (see cpt_can_enable_serr_int()).
*/
- if (intel_has_pch_trancoder(i915, crtc->pipe))
+ if (intel_has_pch_trancoder(display, crtc->pipe))
crtc->pch_fifo_underrun_disabled = !enable;
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index ba2f88ca6117..43be5377ddc1 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -58,7 +58,6 @@
#include <drm/drm_gem.h>
#include "i915_active.h"
-#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_trace.h"
@@ -72,7 +71,7 @@
/**
* frontbuffer_flush - flush frontbuffer
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -82,16 +81,14 @@
*
* Can be called without any locks held.
*/
-static void frontbuffer_flush(struct drm_i915_private *i915,
+static void frontbuffer_flush(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct intel_display *display = &i915->display;
-
/* Delay flushing when rings are still busy.*/
- spin_lock(&i915->display.fb_tracking.lock);
- frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
+ frontbuffer_bits &= ~display->fb_tracking.busy_bits;
+ spin_unlock(&display->fb_tracking.lock);
if (!frontbuffer_bits)
return;
@@ -107,7 +104,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
@@ -117,19 +114,19 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_prepare(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
- i915->display.fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&display->fb_tracking.lock);
+ display->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ display->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
}
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
@@ -137,22 +134,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_complete(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= i915->display.fb_tracking.flip_bits;
- i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ frontbuffer_bits &= display->fb_tracking.flip_bits;
+ display->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
if (frontbuffer_bits)
- frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
@@ -161,15 +158,15 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip(struct drm_i915_private *i915,
+void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ display->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
- frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
void __intel_fb_invalidate(struct intel_frontbuffer *front,
@@ -198,7 +195,6 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
unsigned int frontbuffer_bits)
{
struct intel_display *display = to_intel_display(front->obj->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -209,7 +205,7 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
}
if (frontbuffer_bits)
- frontbuffer_flush(i915, frontbuffer_bits, origin);
+ frontbuffer_flush(display, frontbuffer_bits, origin);
}
static void intel_frontbuffer_flush_work(struct work_struct *work)
@@ -280,7 +276,7 @@ static void frontbuffer_release(struct kref *ref)
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct intel_display *display = to_intel_display(obj->dev);
struct intel_frontbuffer *front, *cur;
front = intel_bo_get_frontbuffer(obj);
@@ -300,9 +296,9 @@ intel_frontbuffer_get(struct drm_gem_object *obj)
I915_ACTIVE_RETIRE_SLEEPS);
INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work);
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
cur = intel_bo_set_frontbuffer(obj, front);
- spin_unlock(&i915->display.fb_tracking.lock);
+ spin_unlock(&display->fb_tracking.lock);
if (cur != front)
kfree(front);
return cur;
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 6237780a9f68..2fee12eaf9b6 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -31,7 +31,7 @@
#include "i915_active_types.h"
struct drm_gem_object;
-struct drm_i915_private;
+struct intel_display;
enum fb_op_origin {
ORIGIN_CPU = 0,
@@ -68,11 +68,11 @@ struct intel_frontbuffer {
GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_prepare(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_complete(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *i915,
+void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits);
void intel_frontbuffer_put(struct intel_frontbuffer *front);
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index 8a49e2bb37fa..000a898c9480 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -3,10 +3,13 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/pci.h>
#include <linux/string.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_global_state.h"
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index abf457e68ee9..d55cc77650b7 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -152,32 +152,31 @@ static const struct gmbus_pin gmbus_pins_mtp[] = {
static const struct gmbus_pin *get_gmbus_pin(struct intel_display *display,
unsigned int pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct gmbus_pin *pins;
size_t size;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL) {
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL) {
pins = gmbus_pins_mtp;
size = ARRAY_SIZE(gmbus_pins_mtp);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG2) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG2) {
pins = gmbus_pins_dg2;
size = ARRAY_SIZE(gmbus_pins_dg2);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
- } else if (HAS_PCH_CNP(i915)) {
+ } else if (HAS_PCH_CNP(display)) {
pins = gmbus_pins_cnp;
size = ARRAY_SIZE(gmbus_pins_cnp);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
pins = gmbus_pins_bxt;
size = ARRAY_SIZE(gmbus_pins_bxt);
} else if (DISPLAY_VER(display) == 9) {
pins = gmbus_pins_skl;
size = ARRAY_SIZE(gmbus_pins_skl);
- } else if (IS_BROADWELL(i915)) {
+ } else if (display->platform.broadwell) {
pins = gmbus_pins_bdw;
size = ARRAY_SIZE(gmbus_pins_bdw);
} else {
@@ -240,11 +239,10 @@ static void bxt_gmbus_clock_gating(struct intel_display *display,
static u32 get_reserved(struct intel_gmbus *bus)
{
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(i915) && !IS_I845G(i915))
+ if (!display->platform.i830 && !display->platform.i845g)
reserved = intel_de_read_notrace(display, bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
@@ -314,11 +312,10 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_gmbus_reset(display);
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
pnv_gmbus_clock_gating(display, false);
set_data(bus, 1);
@@ -332,12 +329,11 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
set_data(bus, 1);
set_clock(bus, 1);
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
pnv_gmbus_clock_gating(display, true);
}
@@ -630,14 +626,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
int i = 0, inc, try = 0;
int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_gmbus_clock_gating(display, false);
- else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ else if (HAS_PCH_SPT(display) || HAS_PCH_CNP(display))
pch_gmbus_clock_gating(display, false);
retry:
@@ -748,9 +743,9 @@ timeout:
out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_gmbus_clock_gating(display, true);
- else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ else if (HAS_PCH_SPT(display) || HAS_PCH_CNP(display))
pch_gmbus_clock_gating(display, true);
return ret;
@@ -873,12 +868,11 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
*/
int intel_gmbus_setup(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
unsigned int pin;
int ret;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
display->gmbus.mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH(display))
/*
@@ -925,7 +919,7 @@ int intel_gmbus_setup(struct intel_display *display)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(i915))
+ if (display->platform.i830)
bus->force_bit = 1;
intel_gpio_setup(bus, GPIO(display, gmbus_pin->gpio));
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 1bf424a822f3..3e3038f4ee1f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -22,13 +22,18 @@
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dp_mst.h"
#include "intel_hdcp.h"
#include "intel_hdcp_gsc.h"
+#include "intel_hdcp_gsc_message.h"
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_pcode.h"
+#define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14)
+
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
@@ -136,7 +141,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
data->k++;
/* if there is only one active stream */
- if (dig_port->dp.mst.active_links <= 1)
+ if (intel_dp_mst_active_streams(&dig_port->dp) <= 1)
break;
}
drm_connector_list_iter_end(&conn_iter);
@@ -248,8 +253,8 @@ static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
return false;
/* If MTL+ make sure gsc is loaded and proxy is setup */
- if (intel_hdcp_gsc_cs_required(display)) {
- if (!intel_hdcp_gsc_check_status(display))
+ if (USE_HDCP_GSC(display)) {
+ if (!intel_hdcp_gsc_check_status(display->drm))
return false;
}
@@ -334,9 +339,7 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
static bool hdcp_key_loadable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum i915_power_well_id id;
- intel_wakeref_t wakeref;
bool enabled = false;
/*
@@ -349,7 +352,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
id = SKL_DISP_PW_1;
/* PG1 (power well #1) needs to be enabled */
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
enabled = intel_display_power_well_is_enabled(display, id);
/*
@@ -2339,7 +2342,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
static bool is_hdcp2_supported(struct intel_display *display)
{
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
return true;
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
@@ -2363,7 +2366,7 @@ void intel_hdcp_component_init(struct intel_display *display)
display->hdcp.comp_added = true;
mutex_unlock(&display->hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
ret = intel_hdcp_gsc_init(display);
else
ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
@@ -2638,7 +2641,7 @@ void intel_hdcp_component_fini(struct intel_display *display)
display->hdcp.comp_added = false;
mutex_unlock(&display->hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
intel_hdcp_gsc_fini(display);
else
component_del(display->drm->dev, &i915_hdcp_ops);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 55965844d829..6a22862d6be1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -11,27 +11,22 @@
#include "i915_drv.h"
#include "i915_utils.h"
#include "intel_hdcp_gsc.h"
-#include "intel_hdcp_gsc_message.h"
-struct intel_hdcp_gsc_message {
+struct intel_hdcp_gsc_context {
+ struct drm_i915_private *i915;
struct i915_vma *vma;
void *hdcp_cmd_in;
void *hdcp_cmd_out;
};
-bool intel_hdcp_gsc_cs_required(struct intel_display *display)
+bool intel_hdcp_gsc_check_status(struct drm_device *drm)
{
- return DISPLAY_VER(display) >= 14;
-}
-
-bool intel_hdcp_gsc_check_status(struct intel_display *display)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_i915_private *i915 = to_i915(drm);
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) {
- drm_dbg_kms(display->drm,
+ drm_dbg_kms(&i915->drm,
"GSC components required for HDCP2.2 are not ready\n");
return false;
}
@@ -41,7 +36,7 @@ bool intel_hdcp_gsc_check_status(struct intel_display *display)
/*This function helps allocate memory for the command that we will send to gsc cs */
static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
- struct intel_hdcp_gsc_message *hdcp_message)
+ struct intel_hdcp_gsc_context *gsc_context)
{
struct intel_gt *gt = i915->media_gt;
struct drm_i915_gem_object *obj = NULL;
@@ -78,9 +73,10 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
memset(cmd_in, 0, obj->base.size);
- hdcp_message->hdcp_cmd_in = cmd_in;
- hdcp_message->hdcp_cmd_out = cmd_out;
- hdcp_message->vma = vma;
+ gsc_context->hdcp_cmd_in = cmd_in;
+ gsc_context->hdcp_cmd_out = cmd_out;
+ gsc_context->vma = vma;
+ gsc_context->i915 = i915;
return 0;
@@ -91,80 +87,37 @@ out_unpin:
return err;
}
-static const struct i915_hdcp_ops gsc_hdcp_ops = {
- .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
- .verify_receiver_cert_prepare_km =
- intel_hdcp_gsc_verify_receiver_cert_prepare_km,
- .verify_hprime = intel_hdcp_gsc_verify_hprime,
- .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
- .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
- .verify_lprime = intel_hdcp_gsc_verify_lprime,
- .get_session_key = intel_hdcp_gsc_get_session_key,
- .repeater_check_flow_prepare_ack =
- intel_hdcp_gsc_repeater_check_flow_prepare_ack,
- .verify_mprime = intel_hdcp_gsc_verify_mprime,
- .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
- .close_hdcp_session = intel_hdcp_gsc_close_session,
-};
-
-static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_hdcp_gsc_context *gsc_context;
int ret;
- hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
-
- if (!hdcp_message)
- return -ENOMEM;
+ gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
+ if (!gsc_context)
+ return ERR_PTR(-ENOMEM);
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- display->hdcp.hdcp_message = hdcp_message;
- ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message);
-
- if (ret)
- drm_err(display->drm, "Could not initialize hdcp_message\n");
-
- return ret;
-}
-
-static void intel_hdcp_gsc_free_message(struct intel_display *display)
-{
- struct intel_hdcp_gsc_message *hdcp_message =
- display->hdcp.hdcp_message;
+ ret = intel_hdcp_gsc_initialize_message(i915, gsc_context);
+ if (ret) {
+ drm_err(&i915->drm, "Could not initialize gsc_context\n");
+ kfree(gsc_context);
+ gsc_context = ERR_PTR(ret);
+ }
- hdcp_message->hdcp_cmd_in = NULL;
- hdcp_message->hdcp_cmd_out = NULL;
- i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
- kfree(hdcp_message);
+ return gsc_context;
}
-int intel_hdcp_gsc_init(struct intel_display *display)
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
{
- struct i915_hdcp_arbiter *data;
- int ret;
-
- data = kzalloc(sizeof(struct i915_hdcp_arbiter), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (!gsc_context)
+ return;
- mutex_lock(&display->hdcp.hdcp_mutex);
- display->hdcp.arbiter = data;
- display->hdcp.arbiter->hdcp_dev = display->drm->dev;
- display->hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(display);
- mutex_unlock(&display->hdcp.hdcp_mutex);
-
- return ret;
-}
-
-void intel_hdcp_gsc_fini(struct intel_display *display)
-{
- intel_hdcp_gsc_free_message(display);
- kfree(display->hdcp.arbiter);
+ i915_vma_unpin_and_release(&gsc_context->vma, I915_VMA_RELEASE_MAP);
+ kfree(gsc_context);
}
static int intel_gsc_send_sync(struct drm_i915_private *i915,
@@ -211,18 +164,18 @@ static int intel_gsc_send_sync(struct drm_i915_private *i915,
/*
* This function can now be used for sending requests and will also handle
* receipt of reply messages hence no different function of message retrieval
- * is required. We will initialize intel_hdcp_gsc_message structure then add
+ * is required. We will initialize intel_hdcp_gsc_context structure then add
* gsc cs memory header as stated in specs after which the normal HDCP payload
* will follow
*/
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len)
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
{
+ struct drm_i915_private *i915 = gsc_context->i915;
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_mtl_header *header_in, *header_out;
const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in);
- struct intel_hdcp_gsc_message *hdcp_message;
u64 addr_in, addr_out, host_session_id;
u32 reply_size, msg_size_in, msg_size_out;
int ret, tries = 0;
@@ -235,10 +188,9 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
msg_size_in = msg_in_len + sizeof(*header_in);
msg_size_out = msg_out_len + sizeof(*header_out);
- hdcp_message = i915->display.hdcp.hdcp_message;
- header_in = hdcp_message->hdcp_cmd_in;
- header_out = hdcp_message->hdcp_cmd_out;
- addr_in = i915_ggtt_offset(hdcp_message->vma);
+ header_in = gsc_context->hdcp_cmd_in;
+ header_out = gsc_context->hdcp_cmd_out;
+ addr_in = i915_ggtt_offset(gsc_context->vma);
addr_out = addr_in + PAGE_SIZE;
memset(header_in, 0, msg_size_in);
@@ -246,7 +198,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
get_random_bytes(&host_session_id, sizeof(u64));
intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP,
msg_size_in, host_session_id);
- memcpy(hdcp_message->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
+ memcpy(gsc_context->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -280,7 +232,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
reply_size, (u32)msg_out_len);
}
- memcpy(msg_out, hdcp_message->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
+ memcpy(msg_out, gsc_context->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
err:
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
index 5695a5e4f609..9305c14aaffe 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -6,19 +6,17 @@
#ifndef __INTEL_HDCP_GSC_H__
#define __INTEL_HDCP_GSC_H__
-#include <linux/err.h>
#include <linux/types.h>
-struct drm_i915_private;
-struct intel_display;
-struct intel_hdcp_gsc_message;
+struct drm_device;
+struct intel_hdcp_gsc_context;
-bool intel_hdcp_gsc_cs_required(struct intel_display *display);
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len);
-int intel_hdcp_gsc_init(struct intel_display *display);
-void intel_hdcp_gsc_fini(struct intel_display *display);
-bool intel_hdcp_gsc_check_status(struct intel_display *display);
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len);
+bool intel_hdcp_gsc_check_status(struct drm_device *drm);
+
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm);
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context);
#endif /* __INTEL_HDCP_GCS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
index 129104fa9b16..98967bb148e3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
@@ -4,20 +4,23 @@
*/
#include <linux/err.h>
+
+#include <drm/drm_print.h>
#include <drm/intel/i915_hdcp_interface.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_hdcp_gsc.h"
#include "intel_hdcp_gsc_message.h"
-int
+static int
intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_init *ake_data)
{
struct wired_cmd_initiate_hdcp2_session_in session_init_in = {};
struct wired_cmd_initiate_hdcp2_session_out session_init_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ake_data)
@@ -28,7 +31,7 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
@@ -41,9 +44,9 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
session_init_in.protocol = data->protocol;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &session_init_in,
sizeof(session_init_in),
- (u8 *)&session_init_out,
+ &session_init_out,
sizeof(session_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -64,7 +67,7 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_cert *rx_cert,
@@ -75,8 +78,8 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
{
struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = {};
struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
@@ -87,7 +90,7 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
@@ -103,9 +106,9 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_rxcert_in,
sizeof(verify_rxcert_in),
- (u8 *)&verify_rxcert_out,
+ &verify_rxcert_out,
sizeof(verify_rxcert_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
@@ -134,14 +137,14 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_hprime *rx_hprime)
{
struct wired_cmd_ake_send_hprime_in send_hprime_in = {};
struct wired_cmd_ake_send_hprime_out send_hprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_hprime)
@@ -152,7 +155,7 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
@@ -166,9 +169,9 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &send_hprime_in,
sizeof(send_hprime_in),
- (u8 *)&send_hprime_out,
+ &send_hprime_out,
sizeof(send_hprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -184,14 +187,14 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int
+static int
intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
struct wired_cmd_ake_send_pairing_info_in pairing_info_in = {};
struct wired_cmd_ake_send_pairing_info_out pairing_info_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !pairing_info)
@@ -202,7 +205,7 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
@@ -217,9 +220,9 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &pairing_info_in,
sizeof(pairing_info_in),
- (u8 *)&pairing_info_out,
+ &pairing_info_out,
sizeof(pairing_info_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -236,15 +239,15 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
return 0;
}
-int
+static int
intel_hdcp_gsc_initiate_locality_check(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_init *lc_init_data)
{
struct wired_cmd_init_locality_check_in lc_init_in = {};
struct wired_cmd_init_locality_check_out lc_init_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !lc_init_data)
@@ -255,7 +258,7 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
@@ -266,8 +269,8 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in),
- (u8 *)&lc_init_out, sizeof(lc_init_out));
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &lc_init_in, sizeof(lc_init_in),
+ &lc_init_out, sizeof(lc_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@@ -285,14 +288,14 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_lc_send_lprime *rx_lprime)
{
struct wired_cmd_validate_locality_in verify_lprime_in = {};
struct wired_cmd_validate_locality_out verify_lprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_lprime)
@@ -303,7 +306,7 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
@@ -318,9 +321,9 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_lprime_in,
sizeof(verify_lprime_in),
- (u8 *)&verify_lprime_out,
+ &verify_lprime_out,
sizeof(verify_lprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -337,14 +340,15 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int intel_hdcp_gsc_get_session_key(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data)
+static int
+intel_hdcp_gsc_get_session_key(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data)
{
struct wired_cmd_get_session_key_in get_skey_in = {};
struct wired_cmd_get_session_key_out get_skey_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ske_data)
@@ -355,7 +359,7 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
@@ -366,8 +370,8 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in),
- (u8 *)&get_skey_out, sizeof(get_skey_out));
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &get_skey_in, sizeof(get_skey_in),
+ &get_skey_out, sizeof(get_skey_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@@ -387,7 +391,7 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_send_receiverid_list
@@ -397,8 +401,8 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
{
struct wired_cmd_verify_repeater_in verify_repeater_in = {};
struct wired_cmd_verify_repeater_out verify_repeater_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !rep_topology || !rep_send_ack || !data)
@@ -409,7 +413,7 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
@@ -430,9 +434,9 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
HDCP_2_2_RECEIVER_IDS_MAX_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_repeater_in,
sizeof(verify_repeater_in),
- (u8 *)&verify_repeater_out,
+ &verify_repeater_out,
sizeof(verify_repeater_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -453,14 +457,15 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
return 0;
}
-int intel_hdcp_gsc_verify_mprime(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready)
+static int
+intel_hdcp_gsc_verify_mprime(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready)
{
struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
size_t cmd_size;
@@ -472,7 +477,7 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
cmd_size = struct_size(verify_mprime_in, streams, data->k);
if (cmd_size == SIZE_MAX)
@@ -499,8 +504,8 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
verify_mprime_in->k = cpu_to_be16(data->k);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size,
- (u8 *)&verify_mprime_out,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, verify_mprime_in, cmd_size,
+ &verify_mprime_out,
sizeof(verify_mprime_out));
kfree(verify_mprime_in);
if (byte < 0) {
@@ -518,13 +523,13 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
return 0;
}
-int intel_hdcp_gsc_enable_authentication(struct device *dev,
- struct hdcp_port_data *data)
+static int intel_hdcp_gsc_enable_authentication(struct device *dev,
+ struct hdcp_port_data *data)
{
struct wired_cmd_enable_auth_in enable_auth_in = {};
struct wired_cmd_enable_auth_out enable_auth_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
@@ -535,7 +540,7 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
@@ -547,9 +552,9 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
enable_auth_in.stream_type = data->streams[0].stream_type;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &enable_auth_in,
sizeof(enable_auth_in),
- (u8 *)&enable_auth_out,
+ &enable_auth_out,
sizeof(enable_auth_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -565,13 +570,13 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
{
struct wired_cmd_close_session_in session_close_in = {};
struct wired_cmd_close_session_out session_close_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
@@ -582,7 +587,7 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
@@ -594,9 +599,9 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
session_close_in.port.physical_port = (u8)data->hdcp_ddi;
session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &session_close_in,
sizeof(session_close_in),
- (u8 *)&session_close_out,
+ &session_close_out,
sizeof(session_close_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -611,3 +616,57 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
return 0;
}
+
+static const struct i915_hdcp_ops gsc_hdcp_ops = {
+ .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ intel_hdcp_gsc_verify_receiver_cert_prepare_km,
+ .verify_hprime = intel_hdcp_gsc_verify_hprime,
+ .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
+ .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
+ .verify_lprime = intel_hdcp_gsc_verify_lprime,
+ .get_session_key = intel_hdcp_gsc_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ intel_hdcp_gsc_repeater_check_flow_prepare_ack,
+ .verify_mprime = intel_hdcp_gsc_verify_mprime,
+ .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
+ .close_hdcp_session = intel_hdcp_gsc_close_session,
+};
+
+int intel_hdcp_gsc_init(struct intel_display *display)
+{
+ struct intel_hdcp_gsc_context *gsc_context;
+ struct i915_hdcp_arbiter *arbiter;
+ int ret = 0;
+
+ arbiter = kzalloc(sizeof(*arbiter), GFP_KERNEL);
+ if (!arbiter)
+ return -ENOMEM;
+
+ mutex_lock(&display->hdcp.hdcp_mutex);
+
+ gsc_context = intel_hdcp_gsc_context_alloc(display->drm);
+ if (IS_ERR(gsc_context)) {
+ ret = PTR_ERR(gsc_context);
+ kfree(arbiter);
+ goto out;
+ }
+
+ display->hdcp.arbiter = arbiter;
+ display->hdcp.arbiter->hdcp_dev = display->drm->dev;
+ display->hdcp.arbiter->ops = &gsc_hdcp_ops;
+ display->hdcp.gsc_context = gsc_context;
+
+out:
+ mutex_unlock(&display->hdcp.hdcp_mutex);
+
+ return ret;
+}
+
+void intel_hdcp_gsc_fini(struct intel_display *display)
+{
+ intel_hdcp_gsc_context_free(display->hdcp.gsc_context);
+ display->hdcp.gsc_context = NULL;
+ kfree(display->hdcp.arbiter);
+ display->hdcp.arbiter = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
index 2d597f27e931..9f54157a4a3e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
@@ -6,68 +6,9 @@
#ifndef __INTEL_HDCP_GSC_MESSAGE_H__
#define __INTEL_HDCP_GSC_MESSAGE_H__
-#include <linux/types.h>
-
-struct device;
-struct drm_i915_private;
-struct hdcp_port_data;
-struct hdcp2_ake_init;
-struct hdcp2_ake_send_cert;
-struct hdcp2_ake_no_stored_km;
-struct hdcp2_ake_send_hprime;
-struct hdcp2_ake_send_pairing_info;
-struct hdcp2_lc_init;
-struct hdcp2_lc_send_lprime;
-struct hdcp2_ske_send_eks;
-struct hdcp2_rep_send_receiverid_list;
-struct hdcp2_rep_send_ack;
-struct hdcp2_rep_stream_ready;
struct intel_display;
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len);
-bool intel_hdcp_gsc_check_status(struct intel_display *display);
-int
-intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_init *ake_data);
-int
-intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_cert *rx_cert,
- bool *km_stored,
- struct hdcp2_ake_no_stored_km
- *ek_pub_km,
- size_t *msg_sz);
-int
-intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_send_hprime *rx_hprime);
-int
-intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_send_pairing_info *pairing_info);
-int
-intel_hdcp_gsc_initiate_locality_check(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_lc_init *lc_init_data);
-int
-intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_lc_send_lprime *rx_lprime);
-int intel_hdcp_gsc_get_session_key(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data);
-int
-intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_send_receiverid_list
- *rep_topology,
- struct hdcp2_rep_send_ack
- *rep_send_ack);
-int intel_hdcp_gsc_verify_mprime(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready);
-int intel_hdcp_gsc_enable_authentication(struct device *dev,
- struct hdcp_port_data *data);
-int
-intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data);
+int intel_hdcp_gsc_init(struct intel_display *display);
+void intel_hdcp_gsc_fini(struct intel_display *display);
#endif /* __INTEL_HDCP_GSC_MESSAGE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 33b8d5229db0..98033471902c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -38,14 +38,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/intel/intel_lpe_audio.h>
#include <media/cec-notifier.h>
#include "g4x_hdmi.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -64,6 +65,7 @@
#include "intel_panel.h"
#include "intel_pfit.h"
#include "intel_snps_phy.h"
+#include "intel_vrr.h"
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
@@ -714,7 +716,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
int ret;
if (!crtc_state->has_infoframe)
@@ -723,7 +725,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
- ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame, &connector->base,
adjusted_mode);
if (ret)
return false;
@@ -742,7 +744,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
- drm_hdmi_avi_infoframe_quant_range(frame, connector,
+ drm_hdmi_avi_infoframe_quant_range(frame, &connector->base,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
@@ -768,7 +770,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
int ret;
@@ -778,7 +780,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
- if (IS_DGFX(i915))
+ if (display->platform.dgfx)
ret = hdmi_spd_infoframe_init(frame, "Intel", "Discrete gfx");
else
ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
@@ -978,7 +980,6 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -988,9 +989,9 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
if (HAS_DDI(display))
reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.valleyview || display->platform.cherryview)
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return false;
@@ -1004,7 +1005,6 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -1014,9 +1014,9 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
if (HAS_DDI(display))
reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.valleyview || display->platform.cherryview)
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return;
@@ -1028,9 +1028,9 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+ if (display->platform.g4x || !crtc_state->has_infoframe)
return;
crtc_state->infoframes.enable |=
@@ -1538,7 +1538,6 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_connector *connector = hdmi->attached_connector;
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int ret;
if (!enable)
@@ -1557,7 +1556,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
* WA: To fix incorrect positioning of the window of
* opportunity and enc_en signalling in KABYLAKE.
*/
- if (IS_KABYLAKE(dev_priv) && enable)
+ if (display->platform.kabylake && enable)
return kbl_repositioning_enc_en_signal(connector,
cpu_transcoder);
@@ -1569,7 +1568,6 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
@@ -1582,15 +1580,15 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
if (ret)
return false;
- intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
+ intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ if (wait_for((intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
- port)));
+ intel_de_read(display, HDCP_STATUS(display, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1813,14 +1811,13 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_tmds_clock, vbt_max_tmds_clock;
- if (DISPLAY_VER(display) >= 13 || IS_ALDERLAKE_S(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || display->platform.alderlake_s)
max_tmds_clock = 600000;
else if (DISPLAY_VER(display) >= 10)
max_tmds_clock = 594000;
- else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv))
+ else if (DISPLAY_VER(display) >= 8 || display->platform.haswell)
max_tmds_clock = 300000;
else if (DISPLAY_VER(display) >= 5)
max_tmds_clock = 225000;
@@ -1879,7 +1876,6 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
bool has_hdmi_sink)
{
struct intel_display *display = to_intel_display(hdmi);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
@@ -1889,16 +1885,16 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* GLK DPLL can't generate 446-480 MHz */
- if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
+ if (display->platform.geminilake && clock > 446666 && clock < 480000)
return MODE_CLOCK_RANGE;
/* BXT/GLK DPLL can't generate 223-240 MHz */
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
- if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
+ if (display->platform.cherryview && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
/* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
@@ -1942,11 +1938,12 @@ static bool intel_hdmi_source_bpc_possible(struct intel_display *display, int bp
}
}
-static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
+static bool intel_hdmi_sink_bpc_possible(struct drm_connector *_connector,
int bpc, bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- const struct drm_display_info *info = &connector->display_info;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ const struct drm_display_info *info = &connector->base.display_info;
const struct drm_hdmi_info *hdmi = &info->hdmi;
switch (bpc) {
@@ -1975,12 +1972,13 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
}
static enum drm_mode_status
-intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
+intel_hdmi_mode_clock_valid(struct drm_connector *_connector, int clock,
bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
enum drm_mode_status status = MODE_OK;
int bpc;
@@ -1995,7 +1993,8 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
if (!intel_hdmi_source_bpc_possible(display, bpc))
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format))
+ if (!intel_hdmi_sink_bpc_possible(&connector->base, bpc, has_hdmi_sink,
+ sink_format))
continue;
status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink);
@@ -2010,15 +2009,16 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
}
static enum drm_mode_status
-intel_hdmi_mode_valid(struct drm_connector *connector,
+intel_hdmi_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *mode)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
enum drm_mode_status status;
int clock = mode->clock;
- int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
- bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
+ int max_dotclk = display->cdclk.max_dotclk_freq;
+ bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->base.state);
bool ycbcr_420_only;
enum intel_output_format sink_format;
@@ -2047,22 +2047,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
if (clock > 600000)
return MODE_CLOCK_HIGH;
- ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
+ ycbcr_420_only = drm_mode_is_420_only(&connector->base.display_info, mode);
if (ycbcr_420_only)
sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
else
sink_format = INTEL_OUTPUT_FORMAT_RGB;
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
+ status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink, sink_format);
if (status != MODE_OK) {
if (ycbcr_420_only ||
- !connector->ycbcr_420_allowed ||
- !drm_mode_is_420_also(&connector->display_info, mode))
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(&connector->base.display_info, mode))
return status;
sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
+ status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink,
+ sink_format);
if (status != MODE_OK)
return status;
}
@@ -2073,16 +2074,16 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
int bpc, bool has_hdmi_sink)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_connector_state *connector_state;
- struct drm_connector *connector;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_digital_connector_state *connector_state;
+ struct intel_connector *connector;
int i;
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->uapi.crtc)
+ for_each_new_intel_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->base.crtc != crtc_state->uapi.crtc)
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink,
+ if (!intel_hdmi_sink_bpc_possible(&connector->base, bpc, has_hdmi_sink,
crtc_state->sink_format))
return false;
}
@@ -2210,7 +2211,7 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_connector *connector = conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2218,7 +2219,7 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
return false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- return connector->display_info.has_audio;
+ return connector->base.display_info.has_audio;
else
return intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
@@ -2322,14 +2323,14 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
- struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_scdc *scdc = &connector->base.display_info.hdmi.scdc;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (!connector->interlace_allowed &&
+ if (!connector->base.interlace_allowed &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return -EINVAL;
@@ -2384,6 +2385,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
conn_state);
@@ -2422,25 +2425,26 @@ void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder)
}
static void
-intel_hdmi_unset_edid(struct drm_connector *connector)
+intel_hdmi_unset_edid(struct drm_connector *_connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
- drm_edid_free(to_intel_connector(connector)->detect_edid);
- to_intel_connector(connector)->detect_edid = NULL;
+ drm_edid_free(connector->detect_edid);
+ connector->detect_edid = NULL;
}
static void
-intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
- struct i2c_adapter *ddc = connector->ddc;
+ struct i2c_adapter *ddc = connector->base.ddc;
enum drm_dp_dual_mode_type type;
type = drm_dp_dual_mode_detect(display->drm, ddc);
@@ -2455,7 +2459,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
* if the port is a dual mode capable DP port.
*/
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
- if (!connector->force &&
+ if (!connector->base.force &&
intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
drm_dbg_kms(display->drm,
"Assuming DP dual mode adaptor presence based on VBT\n");
@@ -2478,7 +2482,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
hdmi->dp_dual_mode.max_tmds_clock);
/* Older VBTs are often buggy and can't be trusted :( Play it safe. */
- if ((DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv)) &&
+ if ((DISPLAY_VER(display) >= 8 || display->platform.haswell) &&
!intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
drm_dbg_kms(display->drm,
"Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n");
@@ -2487,34 +2491,35 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
}
static bool
-intel_hdmi_set_edid(struct drm_connector *connector)
+intel_hdmi_set_edid(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct i2c_adapter *ddc = connector->ddc;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct i2c_adapter *ddc = connector->base.ddc;
intel_wakeref_t wakeref;
const struct drm_edid *drm_edid;
bool connected = false;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
- drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid = drm_edid_read_ddc(&connector->base, ddc);
if (!drm_edid && !intel_gmbus_is_forced_bit(ddc)) {
drm_dbg_kms(display->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(ddc, true);
- drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid = drm_edid_read_ddc(&connector->base, ddc);
intel_gmbus_force_bit(ddc, false);
}
/* Below we depend on display info having been updated */
- drm_edid_connector_update(connector, drm_edid);
+ drm_edid_connector_update(&connector->base, drm_edid);
- to_intel_connector(connector)->detect_edid = drm_edid;
+ connector->detect_edid = drm_edid;
if (drm_edid_is_digital(drm_edid)) {
- intel_hdmi_dp_dual_mode_detect(connector);
+ intel_hdmi_dp_dual_mode_detect(&connector->base);
connected = true;
}
@@ -2522,28 +2527,29 @@ intel_hdmi_set_edid(struct drm_connector *connector)
intel_display_power_put(display, POWER_DOMAIN_GMBUS, wakeref);
cec_notifier_set_phys_addr(intel_hdmi->cec_notifier,
- connector->display_info.source_physical_address);
+ connector->base.display_info.source_physical_address);
return connected;
}
static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector, bool force)
+intel_hdmi_detect(struct drm_connector *_connector, bool force)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
enum drm_connector_status status = connector_status_disconnected;
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_device_enabled(display))
return connector_status_disconnected;
if (!intel_display_driver_check_access(display))
- return connector->status;
+ return connector->base.status;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
@@ -2551,9 +2557,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
!intel_digital_port_connected(encoder))
goto out;
- intel_hdmi_unset_edid(connector);
+ intel_hdmi_unset_edid(&connector->base);
- if (intel_hdmi_set_edid(connector))
+ if (intel_hdmi_set_edid(&connector->base))
status = connector_status_connected;
out:
@@ -2566,49 +2572,54 @@ out:
}
static void
-intel_hdmi_force(struct drm_connector *connector)
+intel_hdmi_force(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_driver_check_access(display))
return;
- intel_hdmi_unset_edid(connector);
+ intel_hdmi_unset_edid(&connector->base);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return;
- intel_hdmi_set_edid(connector);
+ intel_hdmi_set_edid(&connector->base);
}
-static int intel_hdmi_get_modes(struct drm_connector *connector)
+static int intel_hdmi_get_modes(struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
+
/* drm_edid_connector_update() done in ->detect() or ->force() */
- return drm_edid_connector_add_modes(connector);
+ return drm_edid_connector_add_modes(&connector->base);
}
static int
-intel_hdmi_connector_register(struct drm_connector *connector)
+intel_hdmi_connector_register(struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
int ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret)
return ret;
return ret;
}
-static void intel_hdmi_connector_unregister(struct drm_connector *connector)
+static void intel_hdmi_connector_unregister(struct drm_connector *_connector)
{
- struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
cec_notifier_conn_unregister(n);
- intel_connector_unregister(connector);
+ intel_connector_unregister(&connector->base);
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2624,15 +2635,16 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_hdmi_connector_atomic_check(struct drm_connector *connector,
+static int intel_hdmi_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *state)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
if (HAS_DDI(display))
- return intel_digital_connector_atomic_check(connector, state);
+ return intel_digital_connector_atomic_check(&connector->base, state);
else
- return g4x_hdmi_connector_atomic_check(connector, state);
+ return g4x_hdmi_connector_atomic_check(&connector->base, state);
}
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
@@ -2642,22 +2654,23 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
};
static void
-intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(intel_hdmi);
- intel_attach_force_audio_property(connector);
- intel_attach_broadcast_rgb_property(connector);
- intel_attach_aspect_ratio_property(connector);
+ intel_attach_force_audio_property(&connector->base);
+ intel_attach_broadcast_rgb_property(&connector->base);
+ intel_attach_aspect_ratio_property(&connector->base);
- intel_attach_hdmi_colorspace_property(connector);
- drm_connector_attach_content_type_property(connector);
+ intel_attach_hdmi_colorspace_property(&connector->base);
+ drm_connector_attach_content_type_property(&connector->base);
if (DISPLAY_VER(display) >= 10)
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
if (!HAS_GMCH(display))
- drm_connector_attach_max_bpc_property(connector, 8, 12);
+ drm_connector_attach_max_bpc_property(&connector->base, 8, 12);
}
/*
@@ -2679,25 +2692,26 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
* True on success, false on failure.
*/
bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
- struct drm_connector *connector,
+ struct drm_connector *_connector,
bool high_tmds_clock_ratio,
bool scrambling)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(encoder);
struct drm_scrambling *sink_scrambling =
- &connector->display_info.hdmi.scdc.scrambling;
+ &connector->base.display_info.hdmi.scdc.scrambling;
if (!sink_scrambling->supported)
return true;
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
- connector->base.id, connector->name,
+ connector->base.base.id, connector->base.name,
str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
- return drm_scdc_set_high_tmds_clock_ratio(connector, high_tmds_clock_ratio) &&
- drm_scdc_set_scrambling(connector, scrambling);
+ return drm_scdc_set_high_tmds_clock_ratio(&connector->base, high_tmds_clock_ratio) &&
+ drm_scdc_set_scrambling(&connector->base, scrambling);
}
static u8 chv_encoder_to_ddc_pin(struct intel_encoder *encoder)
@@ -2808,7 +2822,7 @@ static u8 mcc_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
WARN_ON(encoder->port == PORT_C);
@@ -2819,7 +2833,7 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
* combo outputs. With CMP, the traditional DDI A-D pins are used for
* all outputs.
*/
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP && phy >= PHY_C)
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
return GMBUS_PIN_1_BXT + phy;
@@ -2828,7 +2842,6 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_encoder_to_phy(encoder);
drm_WARN_ON(display->drm, encoder->port == PORT_A);
@@ -2839,7 +2852,7 @@ static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
* combo outputs. With CMP, the traditional DDI A-D pins are used for
* all outputs.
*/
- if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP && phy >= PHY_C)
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
return GMBUS_PIN_1_BXT + phy;
@@ -2892,27 +2905,26 @@ static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 ddc_pin;
- if (IS_ALDERLAKE_S(dev_priv))
+ if (display->platform.alderlake_s)
ddc_pin = adls_encoder_to_ddc_pin(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
ddc_pin = dg1_encoder_to_ddc_pin(encoder);
- else if (IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.rocketlake)
ddc_pin = rkl_encoder_to_ddc_pin(encoder);
- else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(dev_priv))
+ else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(display))
ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
- else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
- HAS_PCH_TGP(dev_priv))
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
+ HAS_PCH_TGP(display))
ddc_pin = mcc_encoder_to_ddc_pin(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
ddc_pin = icl_encoder_to_ddc_pin(encoder);
- else if (HAS_PCH_CNP(dev_priv))
+ else if (HAS_PCH_CNP(display))
ddc_pin = cnp_encoder_to_ddc_pin(encoder);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
ddc_pin = bxt_encoder_to_ddc_pin(encoder);
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.cherryview)
ddc_pin = chv_encoder_to_ddc_pin(encoder);
else
ddc_pin = g4x_encoder_to_ddc_pin(encoder);
@@ -2986,15 +2998,13 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
void intel_infoframe_init(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
dig_port->write_infoframe = vlv_write_infoframe;
dig_port->read_infoframe = vlv_read_infoframe;
dig_port->set_infoframes = vlv_set_infoframes;
dig_port->infoframes_enabled = vlv_infoframes_enabled;
- } else if (IS_G4X(dev_priv)) {
+ } else if (display->platform.g4x) {
dig_port->write_infoframe = g4x_write_infoframe;
dig_port->read_infoframe = g4x_read_infoframe;
dig_port->set_infoframes = g4x_set_infoframes;
@@ -3011,7 +3021,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->set_infoframes = hsw_set_infoframes;
dig_port->infoframes_enabled = hsw_infoframes_enabled;
}
- } else if (HAS_PCH_IBX(dev_priv)) {
+ } else if (HAS_PCH_IBX(display)) {
dig_port->write_infoframe = ibx_write_infoframe;
dig_port->read_infoframe = ibx_read_infoframe;
dig_port->set_infoframes = ibx_set_infoframes;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 00d7b1ccf190..fc5d8928c37e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -30,6 +30,7 @@
#include "i915_irq.h"
#include "intel_connector.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -118,7 +119,7 @@ intel_connector_hpd_pin(struct intel_connector *connector)
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
- * @dev_priv: private driver data pointer
+ * @display: display device
* @pin: the pin to gather stats on
* @long_hpd: whether the HPD IRQ was long or short
*
@@ -127,13 +128,13 @@ intel_connector_hpd_pin(struct intel_connector *connector)
* responsible for further action.
*
* The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
+ * stored in @display->hotplug.hpd_storm_threshold which defaults to
* @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
* short IRQs count as +1. If this threshold is exceeded, it's considered an
* IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
*
* By default, most systems will only count long IRQs towards
- * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
+ * &display->hotplug.hpd_storm_threshold. However, some older systems also
* suffer from short IRQ storms and must also track these. Because short IRQ
* storms are naturally caused by sideband interactions with DP MST devices,
* short IRQ detection is only enabled for systems without DP MST support.
@@ -145,10 +146,10 @@ intel_connector_hpd_pin(struct intel_connector *connector)
*
* Return true if an IRQ storm was detected on @pin.
*/
-static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+static bool intel_hpd_irq_storm_detect(struct intel_display *display,
enum hpd_pin pin, bool long_hpd)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct intel_hotplug *hpd = &display->hotplug;
unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
const int increment = long_hpd ? 10 : 1;
@@ -156,7 +157,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
bool storm = false;
if (!threshold ||
- (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
+ (!long_hpd && !display->hotplug.hpd_short_storm_enabled))
return false;
if (!time_in_range(jiffies, start, end)) {
@@ -167,11 +168,11 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Received HPD interrupt on PIN %d - cnt: %d\n",
pin,
hpd->stats[pin].count);
@@ -180,56 +181,62 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
return storm;
}
-static bool detection_work_enabled(struct drm_i915_private *i915)
+static bool detection_work_enabled(struct intel_display *display)
{
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- return i915->display.hotplug.detection_work_enabled;
+ return display->hotplug.detection_work_enabled;
}
static bool
-mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- lockdep_assert_held(&i915->irq_lock);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!detection_work_enabled(i915))
+ lockdep_assert_held(&display->irq.lock);
+
+ if (!detection_work_enabled(display))
return false;
return mod_delayed_work(i915->unordered_wq, work, delay);
}
static bool
-queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- lockdep_assert_held(&i915->irq_lock);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ lockdep_assert_held(&display->irq.lock);
- if (!detection_work_enabled(i915))
+ if (!detection_work_enabled(display))
return false;
return queue_delayed_work(i915->unordered_wq, work, delay);
}
static bool
-queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
+queue_detection_work(struct intel_display *display, struct work_struct *work)
{
- lockdep_assert_held(&i915->irq_lock);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!detection_work_enabled(i915))
+ lockdep_assert_held(&display->irq.lock);
+
+ if (!detection_work_enabled(display))
return false;
return queue_work(i915->unordered_wq, work);
}
static void
-intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
+intel_hpd_irq_storm_switch_to_polling(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
bool hpd_disabled = false;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -238,15 +245,15 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ display->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
connector->base.name);
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
+ display->hotplug.stats[pin].state = HPD_DISABLED;
connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
@@ -255,36 +262,35 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.reenable_work,
+ drm_kms_helper_poll_reschedule(display->drm);
+ mod_delayed_detection_work(display,
+ &display->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- display.hotplug.reenable_work.work);
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.reenable_work.work);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
enum hpd_pin pin;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
+ display->hotplug.stats[pin].state != HPD_DISABLED)
continue;
if (connector->base.polled != connector->polled)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Reenabling HPD on connector %s\n",
connector->base.name);
connector->base.polled = connector->polled;
@@ -292,15 +298,15 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
for_each_hpd_pin(pin) {
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
+ display->hotplug.stats[pin].state = HPD_ENABLED;
}
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static enum intel_hotplug_state
@@ -349,32 +355,72 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
enc_to_dig_port(encoder)->hpd_pulse != NULL;
}
+static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(display->drm, encoder) {
+ if (encoder->hpd_pin != pin)
+ continue;
+
+ if (intel_encoder_has_hpd_pulse(encoder))
+ return true;
+ }
+
+ return false;
+}
+
+static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin)
+{
+ lockdep_assert_held(&display->irq.lock);
+
+ return display->hotplug.stats[pin].blocked_count;
+}
+
+static u32 get_blocked_hpd_pin_mask(struct intel_display *display)
+{
+ enum hpd_pin pin;
+ u32 hpd_pin_mask = 0;
+
+ for_each_hpd_pin(pin) {
+ if (hpd_pin_is_blocked(display, pin))
+ hpd_pin_mask |= BIT(pin);
+ }
+
+ return hpd_pin_mask;
+}
+
static void i915_digport_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
- u32 long_port_mask, short_port_mask;
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.dig_port_work);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ u32 long_hpd_pin_mask, short_hpd_pin_mask;
struct intel_encoder *encoder;
+ u32 blocked_hpd_pin_mask;
u32 old_bits = 0;
- spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->display.hotplug.long_port_mask;
- dev_priv->display.hotplug.long_port_mask = 0;
- short_port_mask = dev_priv->display.hotplug.short_port_mask;
- dev_priv->display.hotplug.short_port_mask = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask;
+ short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask;
+
+ spin_unlock_irq(&display->irq.lock);
+
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_digital_port *dig_port;
- enum port port = encoder->port;
+ enum hpd_pin pin = encoder->hpd_pin;
bool long_hpd, short_hpd;
enum irqreturn ret;
if (!intel_encoder_has_hpd_pulse(encoder))
continue;
- long_hpd = long_port_mask & BIT(port);
- short_hpd = short_port_mask & BIT(port);
+ long_hpd = long_hpd_pin_mask & BIT(pin);
+ short_hpd = short_hpd_pin_mask & BIT(pin);
if (!long_hpd && !short_hpd)
continue;
@@ -384,16 +430,16 @@ static void i915_digport_work_func(struct work_struct *work)
ret = dig_port->hpd_pulse(dig_port, long_hpd);
if (ret == IRQ_NONE) {
/* fall back to old school hpd */
- old_bits |= BIT(encoder->hpd_pin);
+ old_bits |= BIT(pin);
}
}
if (old_bits) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.event_bits |= old_bits;
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.event_bits |= old_bits;
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
+ spin_unlock_irq(&display->irq.lock);
}
}
@@ -406,13 +452,17 @@ static void i915_digport_work_func(struct work_struct *work)
*/
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ struct intel_encoder *encoder = &dig_port->base;
+
+ spin_lock_irq(&display->irq.lock);
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
- spin_unlock_irq(&i915->irq_lock);
+ hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin);
+ if (!hpd_pin_is_blocked(display, encoder->hpd_pin))
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
- queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
+ spin_unlock_irq(&display->irq.lock);
}
/*
@@ -420,9 +470,9 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
*/
static void i915_hotplug_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.hotplug_work.work);
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.hotplug_work.work);
+ struct intel_hotplug *hotplug = &display->hotplug;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
u32 changed = 0, retry = 0;
@@ -430,30 +480,32 @@ static void i915_hotplug_work_func(struct work_struct *work)
u32 hpd_retry_bits;
struct drm_connector *first_changed_connector = NULL;
int changed_connectors = 0;
+ u32 blocked_hpd_pin_mask;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
+ mutex_lock(&display->drm->mode_config.mutex);
+ drm_dbg_kms(display->drm, "running encoder hotplug functions\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- hpd_event_bits = dev_priv->display.hotplug.event_bits;
- dev_priv->display.hotplug.event_bits = 0;
- hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
- dev_priv->display.hotplug.retry_bits = 0;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask;
+ hotplug->event_bits &= ~hpd_event_bits;
+ hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask;
+ hotplug->retry_bits &= ~hpd_retry_bits;
/* Enable polling for connectors which had HPD IRQ storms */
- intel_hpd_irq_storm_switch_to_polling(dev_priv);
+ intel_hpd_irq_storm_switch_to_polling(display);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Skip calling encode hotplug handlers if ignore long HPD set*/
- if (dev_priv->display.hotplug.ignore_long_hpd) {
- drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ if (display->hotplug.ignore_long_hpd) {
+ drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
+ mutex_unlock(&display->drm->mode_config.mutex);
return;
}
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
u32 hpd_bit;
@@ -472,7 +524,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
else
connector->hotplug_retries++;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Connector %s (pin %i) received hotplug event. (retry %d)\n",
connector->base.name, pin,
connector->hotplug_retries);
@@ -495,12 +547,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (changed_connectors == 1)
drm_kms_helper_connector_hotplug_event(first_changed_connector);
else if (changed_connectors > 0)
- drm_kms_helper_hotplug_event(&dev_priv->drm);
+ drm_kms_helper_hotplug_event(display->drm);
if (first_changed_connector)
drm_connector_put(first_changed_connector);
@@ -508,20 +560,20 @@ static void i915_hotplug_work_func(struct work_struct *work)
/* Remove shared HPD pins that have changed */
retry &= ~changed;
if (retry) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.retry_bits |= retry;
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.retry_bits |= retry;
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work,
+ mod_delayed_detection_work(display,
+ &display->hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
}
/**
* intel_hpd_irq_handler - main hotplug irq handler
- * @dev_priv: drm_i915_private
+ * @display: display device
* @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses
*
@@ -535,7 +587,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves.
*/
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask)
{
struct intel_encoder *encoder;
@@ -548,7 +600,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!pin_mask)
return;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
/*
* Determine whether ->hpd_pulse() exists for each pin, and
@@ -556,8 +608,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* as each pin may have up to two encoders (HDMI and DP) and
* only the one of them (DP) will have ->hpd_pulse().
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- enum port port = encoder->port;
+ for_each_intel_encoder(display->drm, encoder) {
bool long_hpd;
pin = encoder->hpd_pin;
@@ -569,18 +620,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"digital hpd on [ENCODER:%d:%s] - %s\n",
encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
- queue_dig = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_dig = true;
if (long_hpd) {
long_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.long_port_mask |= BIT(port);
+ display->hotplug.long_hpd_pin_mask |= BIT(pin);
} else {
short_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.short_port_mask |= BIT(port);
+ display->hotplug.short_hpd_pin_mask |= BIT(pin);
}
}
@@ -591,20 +644,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(pin) & pin_mask))
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
+ if (display->hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+ drm_WARN_ONCE(display->drm, !HAS_GMCH(display),
"Received HPD interrupt on pin %d although disabled\n",
pin);
continue;
}
- if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
+ if (display->hotplug.stats[pin].state != HPD_ENABLED)
continue;
/*
@@ -615,13 +668,15 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
- dev_priv->display.hotplug.event_bits |= BIT(pin);
+ display->hotplug.event_bits |= BIT(pin);
long_hpd = true;
- queue_hp = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_hp = true;
}
- if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
- dev_priv->display.hotplug.event_bits &= ~BIT(pin);
+ if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) {
+ display->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
queue_hp = true;
}
@@ -632,7 +687,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* happens later in our hotplug work.
*/
if (storm_detected)
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
/*
* Our hotplug handler can grab modeset locks (by calling down into the
@@ -641,17 +696,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* deadlock.
*/
if (queue_dig)
- queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
+ queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&display->irq.lock);
}
/**
* intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables the hotplug support. It requires that interrupts have
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
@@ -663,40 +718,40 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*
* Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
*/
-void intel_hpd_init(struct drm_i915_private *dev_priv)
+void intel_hpd_init(struct intel_display *display)
{
int i;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
for_each_hpd_pin(i) {
- dev_priv->display.hotplug.stats[i].count = 0;
- dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
+ display->hotplug.stats[i].count = 0;
+ display->hotplug.stats[i].state = HPD_ENABLED;
}
/*
* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- intel_hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ intel_hpd_irq_setup(display);
+ spin_unlock_irq(&display->irq.lock);
}
-static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
+static void i915_hpd_poll_detect_connectors(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
struct intel_connector *first_changed_connector = NULL;
int changed = 0;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- if (!i915->drm.mode_config.poll_enabled)
+ if (!display->drm->mode_config.poll_enabled)
goto out;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD))
continue;
@@ -714,7 +769,7 @@ static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
out:
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!changed)
return;
@@ -722,25 +777,23 @@ out:
if (changed == 1)
drm_kms_helper_connector_hotplug_event(&first_changed_connector->base);
else
- drm_kms_helper_hotplug_event(&i915->drm);
+ drm_kms_helper_hotplug_event(display->drm);
drm_connector_put(&first_changed_connector->base);
}
static void i915_hpd_poll_init_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.poll_init_work);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.poll_init_work);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
intel_wakeref_t wakeref;
bool enabled;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
+ enabled = READ_ONCE(display->hotplug.poll_enabled);
/*
* Prevent taking a power reference from this sequence of
* i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() ->
@@ -750,14 +803,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (!enabled) {
wakeref = intel_display_power_get(display,
POWER_DOMAIN_DISPLAY_CORE);
- drm_WARN_ON(&dev_priv->drm,
- READ_ONCE(dev_priv->display.hotplug.poll_enabled));
- cancel_work(&dev_priv->display.hotplug.poll_init_work);
+ drm_WARN_ON(display->drm,
+ READ_ONCE(display->hotplug.poll_enabled));
+ cancel_work(&display->hotplug.poll_init_work);
}
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -765,7 +818,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (pin == HPD_NONE)
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
continue;
connector->base.polled = connector->polled;
@@ -776,19 +829,19 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
}
drm_connector_list_iter_end(&conn_iter);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
if (enabled)
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
+ drm_kms_helper_poll_reschedule(display->drm);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/*
* We might have missed any hotplugs that happened while we were
* in the middle of disabling polling
*/
if (!enabled) {
- i915_hpd_poll_detect_connectors(dev_priv);
+ i915_hpd_poll_detect_connectors(display);
intel_display_power_put(display,
POWER_DOMAIN_DISPLAY_CORE,
@@ -798,7 +851,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
/**
* intel_hpd_poll_enable - enable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -812,15 +865,12 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*
* Also see: intel_hpd_init() and intel_hpd_poll_disable().
*/
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_enable(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (!HAS_DISPLAY(dev_priv) ||
- !intel_display_device_enabled(display))
+ if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
+ WRITE_ONCE(display->hotplug.poll_enabled, true);
/*
* We might already be holding dev->mode_config.mutex, so do this in a
@@ -828,15 +878,15 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
+ spin_unlock_irq(&display->irq.lock);
}
/**
* intel_hpd_poll_disable - disable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function disables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -853,26 +903,26 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
*
* Also see: intel_hpd_init() and intel_hpd_poll_enable().
*/
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_disable(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
+ WRITE_ONCE(display->hotplug.poll_enabled, false);
- spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_poll_fini(struct drm_i915_private *i915)
+void intel_hpd_poll_fini(struct intel_display *display)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
/* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
intel_connector_cancel_modeset_retry_work(connector);
intel_hdcp_cancel_works(connector);
@@ -880,157 +930,261 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_hpd_init_early(struct drm_i915_private *i915)
+void intel_hpd_init_early(struct intel_display *display)
{
- INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work,
+ INIT_DELAYED_WORK(&display->hotplug.hotplug_work,
i915_hotplug_work_func);
- INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func);
- INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
- INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work,
+ INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&display->hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
- i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
/* If we have MST support, we want to avoid doing short HPD IRQ storm
* detection, as short HPD storms will occur as a natural part of
* sideband messaging with MST.
* On older platforms however, IRQ storms can occur with both long and
* short pulses, as seen on some G4x systems.
*/
- i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
+ display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display);
}
-static bool cancel_all_detection_work(struct drm_i915_private *i915)
+static bool cancel_all_detection_work(struct intel_display *display)
{
bool was_pending = false;
- if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
+ if (cancel_delayed_work_sync(&display->hotplug.hotplug_work))
was_pending = true;
- if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
+ if (cancel_work_sync(&display->hotplug.poll_init_work))
was_pending = true;
- if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
+ if (cancel_delayed_work_sync(&display->hotplug.reenable_work))
was_pending = true;
return was_pending;
}
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+void intel_hpd_cancel_work(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+
+ drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
- dev_priv->display.hotplug.long_port_mask = 0;
- dev_priv->display.hotplug.short_port_mask = 0;
- dev_priv->display.hotplug.event_bits = 0;
- dev_priv->display.hotplug.retry_bits = 0;
+ display->hotplug.long_hpd_pin_mask = 0;
+ display->hotplug.short_hpd_pin_mask = 0;
+ display->hotplug.event_bits = 0;
+ display->hotplug.retry_bits = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
- cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
+ cancel_work_sync(&display->hotplug.dig_port_work);
/*
* All other work triggered by hotplug events should be canceled by
* now.
*/
- if (cancel_all_detection_work(dev_priv))
- drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
+ if (cancel_all_detection_work(display))
+ drm_dbg_kms(display->drm, "Hotplug detection work still active\n");
}
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static void queue_work_for_missed_irqs(struct intel_display *display)
{
- bool ret = false;
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool queue_hp_work = false;
+ u32 blocked_hpd_pin_mask;
+ enum hpd_pin pin;
- if (pin == HPD_NONE)
- return false;
+ lockdep_assert_held(&display->irq.lock);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
- ret = true;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask)
+ queue_hp_work = true;
+
+ for_each_hpd_pin(pin) {
+ switch (display->hotplug.stats[pin].state) {
+ case HPD_MARK_DISABLED:
+ queue_hp_work = true;
+ break;
+ case HPD_DISABLED:
+ case HPD_ENABLED:
+ break;
+ default:
+ MISSING_CASE(display->hotplug.stats[pin].state);
+ }
}
- spin_unlock_irq(&dev_priv->irq_lock);
- return ret;
+ if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask)
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
+
+ if (queue_hp_work)
+ queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
}
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin)
{
- if (pin == HPD_NONE)
+ struct intel_hotplug *hotplug = &display->hotplug;
+
+ lockdep_assert_held(&display->irq.lock);
+
+ hotplug->stats[pin].blocked_count++;
+
+ return hotplug->stats[pin].blocked_count == 1;
+}
+
+static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin)
+{
+ struct intel_hotplug *hotplug = &display->hotplug;
+
+ lockdep_assert_held(&display->irq.lock);
+
+ if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0))
+ return true;
+
+ hotplug->stats[pin].blocked_count--;
+
+ return hotplug->stats[pin].blocked_count == 0;
+}
+
+/**
+ * intel_hpd_block - Block handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to block the HPD handling for
+ *
+ * Blocks the handling of HPD IRQs on the HPD pin of @encoder.
+ *
+ * On return:
+ *
+ * - It's guaranteed that the blocked encoders' HPD pulse handler
+ * (via intel_digital_port::hpd_pulse()) is not running.
+ * - The hotplug event handling (via intel_encoder::hotplug()) of an
+ * HPD IRQ pending at the time this function is called may be still
+ * running.
+ * - Detection on the encoder's connector (via
+ * drm_connector_helper_funcs::detect_ctx(),
+ * drm_connector_funcs::detect()) remains allowed, for instance as part of
+ * userspace connector probing, or DRM core's connector polling.
+ *
+ * The call must be followed by calling intel_hpd_unblock(), or
+ * intel_hpd_clear_and_unblock().
+ *
+ * Note that the handling of HPD IRQs for another encoder using the same HPD
+ * pin as that of @encoder will be also blocked.
+ */
+void intel_hpd_block(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool do_flush = false;
+
+ if (encoder->hpd_pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&display->irq.lock);
+
+ if (block_hpd_pin(display, encoder->hpd_pin))
+ do_flush = true;
+
+ spin_unlock_irq(&display->irq.lock);
+
+ if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin))
+ flush_work(&hotplug->dig_port_work);
+}
+
+/**
+ * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be handled for @encoder and for any
+ * other encoder sharing the same HPD pin.
+ */
+void intel_hpd_unblock(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (encoder->hpd_pin == HPD_NONE)
return;
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+
+ if (unblock_hpd_pin(display, encoder->hpd_pin))
+ queue_work_for_missed_irqs(display);
+
+ spin_unlock_irq(&display->irq.lock);
}
-static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
+/**
+ * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be cleared, handling only new IRQs.
+ */
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder)
{
- bool queue_work = false;
- enum hpd_pin pin;
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ enum hpd_pin pin = encoder->hpd_pin;
- lockdep_assert_held(&i915->irq_lock);
+ if (pin == HPD_NONE)
+ return;
- if (i915->display.hotplug.event_bits ||
- i915->display.hotplug.retry_bits)
- queue_work = true;
+ spin_lock_irq(&display->irq.lock);
- for_each_hpd_pin(pin) {
- switch (i915->display.hotplug.stats[pin].state) {
- case HPD_MARK_DISABLED:
- queue_work = true;
- break;
- case HPD_ENABLED:
- break;
- default:
- MISSING_CASE(i915->display.hotplug.stats[pin].state);
- }
+ if (unblock_hpd_pin(display, pin)) {
+ hotplug->event_bits &= ~BIT(pin);
+ hotplug->retry_bits &= ~BIT(pin);
+ hotplug->short_hpd_pin_mask &= ~BIT(pin);
+ hotplug->long_hpd_pin_mask &= ~BIT(pin);
}
- if (queue_work)
- queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_enable_detection_work(struct intel_display *display)
{
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = true;
- queue_work_for_missed_irqs(i915);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.detection_work_enabled = true;
+ queue_work_for_missed_irqs(display);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_disable_detection_work(struct intel_display *display)
{
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = false;
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.detection_work_enabled = false;
+ spin_unlock_irq(&display->irq.lock);
- cancel_all_detection_work(i915);
+ cancel_all_detection_work(display);
}
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
+bool intel_hpd_schedule_detection(struct intel_display *display)
{
unsigned long flags;
bool ret;
- spin_lock_irqsave(&i915->irq_lock, flags);
- ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
- spin_unlock_irqrestore(&i915->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
+ ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return ret;
}
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->display.hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
+ flush_work(&display->hotplug.dig_port_work);
+ flush_delayed_work(&display->hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -1044,8 +1198,8 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct intel_hotplug *hotplug = &display->hotplug;
unsigned int new_threshold;
int i;
char *newline;
@@ -1070,21 +1224,21 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
return -EINVAL;
if (new_threshold > 0)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting HPD storm detection threshold to %d\n",
new_threshold);
else
- drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+ drm_dbg_kms(display->drm, "Disabling HPD storm detection\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
hotplug->hpd_storm_threshold = new_threshold;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1105,10 +1259,10 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Enabled: %s\n",
- str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
+ str_yes_no(display->hotplug.hpd_short_storm_enabled));
return 0;
}
@@ -1125,8 +1279,8 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct intel_hotplug *hotplug = &display->hotplug;
char *newline;
char tmp[16];
int i;
@@ -1147,22 +1301,22 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
/* Reset to the "default" state for this system */
if (strcmp(tmp, "reset") == 0)
- new_state = !HAS_DP_MST(dev_priv);
+ new_state = !HAS_DP_MST(display);
else if (kstrtobool(tmp, &new_state) != 0)
return -EINVAL;
- drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n",
new_state ? "En" : "Dis");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
hotplug->hpd_short_storm_enabled = new_state;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1176,14 +1330,14 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = {
.write = i915_hpd_short_storm_ctl_write,
};
-void intel_hpd_debugfs_register(struct drm_i915_private *i915)
+void intel_hpd_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_storm_ctl_fops);
+ display, &i915_hpd_storm_ctl_fops);
debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_short_storm_ctl_fops);
+ display, &i915_hpd_short_storm_ctl_fops);
debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
- &i915->display.hotplug.ignore_long_hpd);
+ &display->hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index d6986902b054..edc41c9d3d65 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,30 +8,31 @@
#include <linux/types.h>
-struct drm_i915_private;
+enum port;
struct intel_connector;
struct intel_digital_port;
+struct intel_display;
struct intel_encoder;
-enum port;
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_fini(struct drm_i915_private *i915);
+void intel_hpd_poll_enable(struct intel_display *display);
+void intel_hpd_poll_disable(struct intel_display *display);
+void intel_hpd_poll_fini(struct intel_display *display);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask);
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
-void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_early(struct drm_i915_private *i915);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+void intel_hpd_init(struct intel_display *display);
+void intel_hpd_init_early(struct intel_display *display);
+void intel_hpd_cancel_work(struct intel_display *display);
enum hpd_pin intel_hpd_pin_default(enum port port);
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_debugfs_register(struct drm_i915_private *i915);
+void intel_hpd_block(struct intel_encoder *encoder);
+void intel_hpd_unblock(struct intel_encoder *encoder);
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder);
+void intel_hpd_debugfs_register(struct intel_display *display);
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915);
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915);
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915);
+void intel_hpd_enable_detection_work(struct intel_display *display);
+void intel_hpd_disable_detection_work(struct intel_display *display);
+bool intel_hpd_schedule_detection(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 2137ac7b882a..c024b42369c8 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -3,8 +3,10 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -131,68 +133,67 @@ static const u32 hpd_mtp[HPD_NUM_PINS] = {
[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
};
-static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
+static void intel_hpd_init_pins(struct intel_display *display)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct intel_hotplug *hpd = &display->hotplug;
- if (HAS_GMCH(dev_priv)) {
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv))
+ if (HAS_GMCH(display)) {
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
hpd->hpd = hpd_status_g4x;
else
hpd->hpd = hpd_status_i915;
return;
}
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
hpd->hpd = hpd_xelpdp;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
hpd->hpd = hpd_gen11;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
hpd->hpd = hpd_bxt;
- else if (DISPLAY_VER(dev_priv) == 9)
+ else if (DISPLAY_VER(display) == 9)
hpd->hpd = NULL; /* no north HPD on SKL */
- else if (DISPLAY_VER(dev_priv) >= 8)
+ else if (DISPLAY_VER(display) >= 8)
hpd->hpd = hpd_bdw;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
hpd->hpd = hpd_ivb;
else
hpd->hpd = hpd_ilk;
- if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
- (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
+ if ((INTEL_PCH_TYPE(display) < PCH_DG1) &&
+ (!HAS_PCH_SPLIT(display) || HAS_PCH_NOP(display)))
return;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
hpd->pch_hpd = hpd_mtp;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
hpd->pch_hpd = hpd_sde_dg1;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
- else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
+ else if (HAS_PCH_CNP(display) || HAS_PCH_SPT(display))
hpd->pch_hpd = hpd_spt;
- else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
+ else if (HAS_PCH_LPT(display) || HAS_PCH_CPT(display))
hpd->pch_hpd = hpd_cpt;
- else if (HAS_PCH_IBX(dev_priv))
+ else if (HAS_PCH_IBX(display))
hpd->pch_hpd = hpd_ibx;
else
- MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
+ MISSING_CASE(INTEL_PCH_TYPE(display));
}
/* For display hotplug interrupt */
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits)
{
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, bits & ~mask);
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, bits & ~mask);
- intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN(dev_priv), mask,
- bits);
+ intel_de_rmw(display, PORT_HOTPLUG_EN(display), mask, bits);
}
/**
* i915_hotplug_interrupt_update - update hotplug interrupt enable
- * @dev_priv: driver private
+ * @display: display device
* @mask: bits to update
* @bits: bits to enable
* NOTE: the HPD enable bits are modified both inside and outside
@@ -202,13 +203,13 @@ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* held already, this function acquires the lock itself. A non-locking
* version is also available.
*/
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask,
u32 bits)
{
- spin_lock_irq(&dev_priv->irq_lock);
- i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_hotplug_interrupt_update_locked(display, mask, bits);
+ spin_unlock_irq(&display->irq.lock);
}
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
@@ -339,7 +340,7 @@ static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
*
* Note that the caller is expected to zero out the masks initially.
*/
-static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
+static void intel_get_hpd_pins(struct intel_display *display,
u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
@@ -359,37 +360,37 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
*long_mask |= BIT(pin);
}
- drm_dbg(&dev_priv->drm,
- "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
+ drm_dbg_kms(display->drm,
+ "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
-static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_enabled_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 enabled_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
- if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+ for_each_intel_encoder(display->drm, encoder)
+ if (display->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
}
-static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_hotplug_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 hotplug_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug_irqs |= hpd[encoder->hpd_pin];
return hotplug_irqs;
}
-static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_mask(struct intel_display *display,
hotplug_mask_func hotplug_mask)
{
enum hpd_pin pin;
@@ -401,25 +402,25 @@ static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
return hotplug;
}
-static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_enables(struct intel_display *display,
hotplug_enables_func hotplug_enables)
{
struct intel_encoder *encoder;
u32 hotplug = 0;
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug |= hotplug_enables(encoder);
return hotplug;
}
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
+u32 i9xx_hpd_irq_ack(struct intel_display *display)
{
u32 hotplug_status = 0, hotplug_status_mask;
int i;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
else
@@ -435,53 +436,51 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
* bits can itself generate a new hotplug interrupt :(
*/
for (i = 0; i < 10; i++) {
- u32 tmp = intel_uncore_read(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv)) & hotplug_status_mask;
+ u32 tmp = intel_de_read(display,
+ PORT_HOTPLUG_STAT(display)) & hotplug_status_mask;
if (tmp == 0)
return hotplug_status;
hotplug_status |= tmp;
- intel_uncore_write(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv),
- hotplug_status);
+ intel_de_write(display, PORT_HOTPLUG_STAT(display),
+ hotplug_status);
}
- drm_WARN_ONCE(&dev_priv->drm, 1,
+ drm_WARN_ONCE(display->drm, 1,
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT(dev_priv)));
+ intel_de_read(display, PORT_HOTPLUG_STAT(display)));
return hotplug_status;
}
-void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status)
{
- struct intel_display *display = &dev_priv->display;
u32 pin_mask = 0, long_mask = 0;
u32 hotplug_trigger;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
else
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (hotplug_trigger) {
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, hotplug_trigger,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
- if ((IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
intel_dp_aux_irq_handler(display);
}
-void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
@@ -491,7 +490,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ dig_hotplug_reg = intel_de_read(display, PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
@@ -500,63 +499,61 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
dig_hotplug_reg &= ~mask;
}
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ intel_de_write(display, PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
pch_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &i915->display;
enum hpd_pin pin;
u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
u32 pin_mask = 0, long_mask = 0;
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
trigger_aux |= iir & XE2LPD_AUX_DDI_MASK;
for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
u32 val;
- if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger))
+ if (!(display->hotplug.hpd[pin] & hotplug_trigger))
continue;
pin_mask |= BIT(pin);
- val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin));
- intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val);
+ val = intel_de_read(display, XELPDP_PORT_HOTPLUG_CTL(pin));
+ intel_de_write(display, XELPDP_PORT_HOTPLUG_CTL(pin), val);
if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT))
long_mask |= BIT(pin);
}
if (pin_mask) {
- drm_dbg(&i915->drm,
- "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, pin_mask, long_mask);
+ drm_dbg_kms(display->drm,
+ "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, pin_mask, long_mask);
- intel_hpd_irq_handler(i915, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
if (trigger_aux)
intel_dp_aux_irq_handler(display);
if (!pin_mask && !trigger_aux)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Unexpected DE HPD/AUX interrupt 0x%08x\n", iir);
}
-void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void icp_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
u32 pin_mask = 0, long_mask = 0;
@@ -565,37 +562,36 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
u32 dig_hotplug_reg;
/* Locking due to DSI native GPIO sequences */
- spin_lock(&dev_priv->irq_lock);
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
- spin_unlock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_DDI, 0, 0);
+ spin_unlock(&display->irq.lock);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_TC, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_tc_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_ICP)
intel_gmbus_irq_handler(display);
}
-void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void spt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -604,61 +600,61 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG2, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
intel_gmbus_irq_handler(display);
}
-void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
ilk_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
bxt_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir)
{
u32 pin_mask = 0, long_mask = 0;
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
@@ -667,29 +663,29 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
else
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unexpected DE HPD interrupt 0x%08x\n", iir);
}
@@ -711,7 +707,7 @@ static u32 ibx_hotplug_mask(enum hpd_pin hpd_pin)
static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
switch (encoder->hpd_pin) {
case HPD_PORT_A:
@@ -719,7 +715,7 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
- return HAS_PCH_LPT_LP(i915) ?
+ return HAS_PCH_LPT_LP(display) ?
PORTA_HOTPLUG_ENABLE : 0;
case HPD_PORT_B:
return PORTB_HOTPLUG_ENABLE |
@@ -735,37 +731,37 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, ibx_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ibx_hotplug_enables));
}
static void ibx_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- ibx_hotplug_mask(encoder->hpd_pin),
- ibx_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ ibx_hotplug_mask(encoder->hpd_pin),
+ ibx_hotplug_enables(encoder));
}
-static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- ibx_hpd_detection_setup(dev_priv);
+ ibx_hpd_detection_setup(display);
}
static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -806,36 +802,36 @@ static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
return icp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, icp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_ddi_hotplug_enables));
}
static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI,
- icp_ddi_hotplug_mask(encoder->hpd_pin),
- icp_ddi_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ icp_ddi_hotplug_mask(encoder->hpd_pin),
+ icp_ddi_hotplug_enables(encoder));
}
-static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, icp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_tc_hotplug_enables));
}
static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC,
- icp_tc_hotplug_mask(encoder->hpd_pin),
- icp_tc_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ icp_tc_hotplug_mask(encoder->hpd_pin),
+ icp_tc_hotplug_enables(encoder));
}
static void icp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -844,23 +840,23 @@ static void icp_hpd_enable_detection(struct intel_encoder *encoder)
icp_tc_hpd_enable_detection(encoder);
}
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* We reduce the value to 250us to be able to detect SHPD when an external display
* is connected. This is also expected of us as stated in DP1.4a Table 3-4.
*/
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- icp_ddi_hpd_detection_setup(dev_priv);
- icp_tc_hpd_detection_setup(dev_priv);
+ icp_ddi_hpd_detection_setup(display);
+ icp_tc_hpd_detection_setup(display);
}
static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin)
@@ -883,88 +879,88 @@ static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
return gen11_hotplug_mask(encoder->hpd_pin);
}
-static void dg1_hpd_invert(struct drm_i915_private *i915)
+static void dg1_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
INVERT_DDIC_HPD |
INVERT_DDID_HPD);
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void dg1_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- dg1_hpd_invert(i915);
+ dg1_hpd_invert(display);
icp_hpd_enable_detection(encoder);
}
-static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void dg1_hpd_irq_setup(struct intel_display *display)
{
- dg1_hpd_invert(dev_priv);
- icp_hpd_irq_setup(dev_priv);
+ dg1_hpd_invert(display);
+ icp_hpd_irq_setup(display);
}
-static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
-static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tbt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
static void gen11_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
gen11_tc_hpd_enable_detection(encoder);
gen11_tbt_hpd_enable_detection(encoder);
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
icp_hpd_enable_detection(encoder);
}
-static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void gen11_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
- ~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
+ intel_de_rmw(display, GEN11_DE_HPD_IMR, hotplug_irqs,
+ ~enabled_irqs & hotplug_irqs);
+ intel_de_posting_read(display, GEN11_DE_HPD_IMR);
- gen11_tc_hpd_detection_setup(dev_priv);
- gen11_tbt_hpd_detection_setup(dev_priv);
+ gen11_tc_hpd_detection_setup(display);
+ gen11_tbt_hpd_detection_setup(display);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_hpd_irq_setup(display);
}
static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1001,39 +997,39 @@ static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder)
return mtp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, mtp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_ddi_hotplug_enables));
}
static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_ddi_hotplug_mask(encoder->hpd_pin),
mtp_ddi_hotplug_enables(encoder));
}
-static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, mtp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_tc_hotplug_enables));
}
static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_tc_hotplug_mask(encoder->hpd_pin),
mtp_tc_hotplug_enables(encoder));
}
-static void mtp_hpd_invert(struct drm_i915_private *i915)
+static void mtp_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
@@ -1044,49 +1040,49 @@ static void mtp_hpd_invert(struct drm_i915_private *i915)
INVERT_TC4_HPD |
INVERT_DDID_HPD_MTP |
INVERT_DDIE_HPD);
- intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void mtp_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- mtp_hpd_invert(i915);
+ mtp_hpd_invert(display);
mtp_ddi_hpd_enable_detection(encoder);
mtp_tc_hpd_enable_detection(encoder);
}
-static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
+static void mtp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the
* SHPD_FILTER_CNT value should be.
*/
- intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- mtp_hpd_invert(i915);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ mtp_hpd_invert(display);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
-static void xe2lpd_sde_hpd_irq_setup(struct drm_i915_private *i915)
+static void xe2lpd_sde_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
@@ -1094,7 +1090,7 @@ static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4;
}
-static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
+static void _xelpdp_pica_hpd_detection_setup(struct intel_display *display,
enum hpd_pin hpd_pin, bool enable)
{
u32 mask = XELPDP_TBT_HOTPLUG_ENABLE |
@@ -1103,18 +1099,18 @@ static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
if (!is_xelpdp_pica_hpd_pin(hpd_pin))
return;
- intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
+ intel_de_rmw(display, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
mask, enable ? mask : 0);
}
static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true);
+ _xelpdp_pica_hpd_detection_setup(display, encoder->hpd_pin, true);
}
-static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
+static void xelpdp_pica_hpd_detection_setup(struct intel_display *display)
{
struct intel_encoder *encoder;
u32 available_pins = 0;
@@ -1122,11 +1118,11 @@ static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS);
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
available_pins |= BIT(encoder->hpd_pin);
for_each_hpd_pin(pin)
- _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin));
+ _xelpdp_pica_hpd_detection_setup(display, pin, available_pins & BIT(pin));
}
static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -1135,23 +1131,23 @@ static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
mtp_hpd_enable_detection(encoder);
}
-static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
+static void xelpdp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs,
+ intel_de_rmw(display, PICAINTERRUPT_IMR, hotplug_irqs,
~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR);
+ intel_de_posting_read(display, PICAINTERRUPT_IMR);
- xelpdp_pica_hpd_detection_setup(i915);
+ xelpdp_pica_hpd_detection_setup(display);
- if (INTEL_PCH_TYPE(i915) >= PCH_LNL)
- xe2lpd_sde_hpd_irq_setup(i915);
- else if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
- mtp_hpd_irq_setup(i915);
+ if (INTEL_PCH_TYPE(display) >= PCH_LNL)
+ xe2lpd_sde_hpd_irq_setup(display);
+ else if (INTEL_PCH_TYPE(display) >= PCH_MTL)
+ mtp_hpd_irq_setup(display);
}
static u32 spt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1190,57 +1186,57 @@ static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
return spt_hotplug2_mask(encoder->hpd_pin);
}
-static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct intel_display *display)
{
/* Display WA #1179 WaHardHangonHotPlug: cnp */
- if (HAS_PCH_CNP(dev_priv)) {
- intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ if (HAS_PCH_CNP(display)) {
+ intel_de_rmw(display, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
/* Enable digital hotplug on the PCH */
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, spt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug_enables));
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ intel_hpd_hotplug_mask(display, spt_hotplug2_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug2_enables));
}
static void spt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
/* Display WA #1179 WaHardHangonHotPlug: cnp */
- if (HAS_PCH_CNP(i915)) {
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1,
- CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ if (HAS_PCH_CNP(display)) {
+ intel_de_rmw(display, SOUTH_CHICKEN1,
+ CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- spt_hotplug_mask(encoder->hpd_pin),
- spt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ spt_hotplug_mask(encoder->hpd_pin),
+ spt_hotplug_enables(encoder));
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2,
- spt_hotplug2_mask(encoder->hpd_pin),
- spt_hotplug2_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ spt_hotplug2_mask(encoder->hpd_pin),
+ spt_hotplug2_enables(encoder));
}
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP)
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- spt_hpd_detection_setup(dev_priv);
+ spt_hpd_detection_setup(display);
}
static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1265,44 +1261,44 @@ static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the CPU, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ intel_hpd_hotplug_mask(display, ilk_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ilk_hotplug_enables));
}
static void ilk_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- ilk_hotplug_mask(encoder->hpd_pin),
- ilk_hotplug_enables(encoder));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ ilk_hotplug_mask(encoder->hpd_pin),
+ ilk_hotplug_enables(encoder));
ibx_hpd_enable_detection(encoder);
}
-static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- if (DISPLAY_VER(dev_priv) >= 8)
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ if (DISPLAY_VER(display) >= 8)
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
else
- ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ ilk_update_display_irq(display, hotplug_irqs, enabled_irqs);
- ilk_hpd_detection_setup(dev_priv);
+ ilk_hpd_detection_setup(display);
- ibx_hpd_irq_setup(dev_priv);
+ ibx_hpd_irq_setup(display);
}
static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1344,80 +1340,80 @@ static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, bxt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, bxt_hotplug_enables));
}
static void bxt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- bxt_hotplug_mask(encoder->hpd_pin),
- bxt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ bxt_hotplug_mask(encoder->hpd_pin),
+ bxt_hotplug_enables(encoder));
}
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
- bxt_hpd_detection_setup(dev_priv);
+ bxt_hpd_detection_setup(display);
}
-static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915)
+static void g45_hpd_peg_band_gap_wa(struct intel_display *display)
{
/*
* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd);
+ intel_de_rmw(display, PEG_BAND_GAP_DATA, 0xf, 0xd);
}
static void i915_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
- if (IS_G45(i915))
- g45_hpd_peg_band_gap_wa(i915);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* HPD sense and interrupt enable are one and the same */
- i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
+ i915_hotplug_interrupt_update(display, hotplug_en, hotplug_en);
}
-static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void i915_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_en;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
/*
* Note HDMI and DP share hotplug bits. Enable bits are the same for all
* generations.
*/
- hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
+ hotplug_en = intel_hpd_enabled_irqs(display, hpd_mask_i915);
/*
* Programming the CRT detection parameters tends to generate a spurious
* hotplug event about three seconds later. So just do it once.
*/
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- if (IS_G45(dev_priv))
- g45_hpd_peg_band_gap_wa(dev_priv);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* Ignore TV since it's buggy */
- i915_hotplug_interrupt_update_locked(dev_priv,
+ i915_hotplug_interrupt_update_locked(display,
HOTPLUG_INT_EN_MASK |
CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
CRT_HOTPLUG_ACTIVATION_PERIOD_64,
@@ -1426,7 +1422,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
struct intel_hotplug_funcs {
/* Enable HPD sense and interrupts for all present encoders */
- void (*hpd_irq_setup)(struct drm_i915_private *i915);
+ void (*hpd_irq_setup)(struct intel_display *display);
/* Enable HPD sense for a single encoder */
void (*hpd_enable_detection)(struct intel_encoder *encoder);
};
@@ -1449,47 +1445,47 @@ HPD_FUNCS(ilk);
void intel_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_enable_detection(encoder);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_enable_detection(encoder);
}
-void intel_hpd_irq_setup(struct drm_i915_private *i915)
+void intel_hpd_irq_setup(struct intel_display *display)
{
- if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
- !i915->display.irq.vlv_display_irqs_enabled)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled)
return;
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_irq_setup(i915);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_irq_setup(display);
}
-void intel_hotplug_irq_init(struct drm_i915_private *i915)
+void intel_hotplug_irq_init(struct intel_display *display)
{
- intel_hpd_init_pins(i915);
+ intel_hpd_init_pins(display);
- intel_hpd_init_early(i915);
+ intel_hpd_init_early(display);
- if (HAS_GMCH(i915)) {
- if (I915_HAS_HOTPLUG(i915))
- i915->display.funcs.hotplug = &i915_hpd_funcs;
+ if (HAS_GMCH(display)) {
+ if (HAS_HOTPLUG(display))
+ display->funcs.hotplug = &i915_hpd_funcs;
} else {
- if (HAS_PCH_DG2(i915))
- i915->display.funcs.hotplug = &icp_hpd_funcs;
- else if (HAS_PCH_DG1(i915))
- i915->display.funcs.hotplug = &dg1_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 14)
- i915->display.funcs.hotplug = &xelpdp_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 11)
- i915->display.funcs.hotplug = &gen11_hpd_funcs;
- else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->display.funcs.hotplug = &bxt_hpd_funcs;
- else if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
- i915->display.funcs.hotplug = &icp_hpd_funcs;
- else if (INTEL_PCH_TYPE(i915) >= PCH_SPT)
- i915->display.funcs.hotplug = &spt_hpd_funcs;
+ if (HAS_PCH_DG2(display))
+ display->funcs.hotplug = &icp_hpd_funcs;
+ else if (HAS_PCH_DG1(display))
+ display->funcs.hotplug = &dg1_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 14)
+ display->funcs.hotplug = &xelpdp_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 11)
+ display->funcs.hotplug = &gen11_hpd_funcs;
+ else if (display->platform.geminilake || display->platform.broxton)
+ display->funcs.hotplug = &bxt_hpd_funcs;
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ display->funcs.hotplug = &icp_hpd_funcs;
+ else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
+ display->funcs.hotplug = &spt_hpd_funcs;
else
- i915->display.funcs.hotplug = &ilk_hpd_funcs;
+ display->funcs.hotplug = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
index e4db752df096..9063bb02a2e9 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
struct intel_encoder;
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *i915);
+u32 i9xx_hpd_irq_ack(struct intel_display *display);
-void i9xx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_status);
-void ibx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void ilk_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void gen11_hpd_irq_handler(struct drm_i915_private *i915, u32 iir);
-void bxt_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir);
-void icp_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
-void spt_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status);
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir);
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir);
+void icp_irq_handler(struct intel_display *display, u32 pch_iir);
+void spt_irq_handler(struct intel_display *display, u32 pch_iir);
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits);
-void i915_hotplug_interrupt_update(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask, u32 bits);
void intel_hpd_enable_detection(struct intel_encoder *encoder);
-void intel_hpd_irq_setup(struct drm_i915_private *i915);
+void intel_hpd_irq_setup(struct intel_display *display);
-void intel_hotplug_irq_init(struct drm_i915_private *i915);
+void intel_hotplug_irq_init(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index fb6b84f6a81d..dc454420c134 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index 86cc03a4413c..aad52d0d83e1 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 59551c8414c2..666148a14522 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -179,7 +179,7 @@ static int lpe_audio_irq_init(struct intel_display *display)
handle_simple_irq,
"hdmi_lpe_audio_irq_handler");
- return irq_set_chip_data(irq, dev_priv);
+ return 0;
}
static bool lpe_audio_detect(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 63c1afa30b05..f94b7eeae20f 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -27,6 +27,7 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "i915_reg.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 19f52d1659fa..8ce7c630da52 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,9 +37,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
@@ -84,15 +84,15 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base);
}
-bool intel_lvds_port_enabled(struct drm_i915_private *i915,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
u32 val;
- val = intel_de_read(i915, lvds_reg);
+ val = intel_de_read(display, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(i915))
+ if (HAS_PCH_CPT(display))
*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
else
*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
@@ -104,7 +104,6 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -113,7 +112,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!wakeref)
return false;
- ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
+ ret = intel_lvds_port_enabled(display, lvds_encoder->reg, pipe);
intel_display_power_put(display, encoder->power_domain, wakeref);
@@ -123,13 +122,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS);
- tmp = intel_de_read(dev_priv, lvds_encoder->reg);
+ tmp = intel_de_read(display, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
@@ -141,13 +140,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.flags |= flags;
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
crtc_state->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (DISPLAY_VER(dev_priv) < 4) {
- tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
+ if (DISPLAY_VER(display) < 4) {
+ tmp = intel_de_read(display, PFIT_CONTROL(display));
crtc_state->gmch_pfit.control |= tmp & PFIT_PANEL_8TO6_DITHER_ENABLE;
}
@@ -155,24 +154,24 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
-static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_get_hw_state(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- pps->powerdown_on_reset = intel_de_read(dev_priv,
- PP_CONTROL(dev_priv, 0)) & PANEL_POWER_RESET;
+ pps->powerdown_on_reset = intel_de_read(display,
+ PP_CONTROL(display, 0)) & PANEL_POWER_RESET;
- val = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_ON_DELAYS(display, 0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
pps->delays.power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
pps->delays.backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_OFF_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_OFF_DELAYS(display, 0));
pps->delays.power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
pps->delays.backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_DIVISOR(dev_priv, 0));
+ val = intel_de_read(display, PP_DIVISOR(display, 0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
@@ -185,12 +184,12 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->delays.power_cycle = val * 1000;
- if (DISPLAY_VER(dev_priv) < 5 &&
+ if (DISPLAY_VER(display) < 5 &&
pps->delays.power_up == 0 &&
pps->delays.backlight_on == 0 &&
pps->delays.power_down == 0 &&
pps->delays.backlight_off == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Panel power timings uninitialized, "
"setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
@@ -201,7 +200,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->delays.backlight_off = 200 * 10;
}
- drm_dbg(&dev_priv->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
+ drm_dbg(display->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
"divider %d port %d powerdown_on_reset %d\n",
pps->delays.power_up, pps->delays.power_down,
pps->delays.power_cycle, pps->delays.backlight_on,
@@ -209,28 +208,28 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->port, pps->powerdown_on_reset);
}
-static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_init_hw(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- val = intel_de_read(dev_priv, PP_CONTROL(dev_priv, 0));
- drm_WARN_ON(&dev_priv->drm,
+ val = intel_de_read(display, PP_CONTROL(display, 0));
+ drm_WARN_ON(display->drm,
(val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
if (pps->powerdown_on_reset)
val |= PANEL_POWER_RESET;
- intel_de_write(dev_priv, PP_CONTROL(dev_priv, 0), val);
+ intel_de_write(display, PP_CONTROL(display, 0), val);
- intel_de_write(dev_priv, PP_ON_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_ON_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->delays.power_up) |
REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->delays.backlight_on));
- intel_de_write(dev_priv, PP_OFF_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_OFF_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->delays.power_down) |
REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->delays.backlight_off));
- intel_de_write(dev_priv, PP_DIVISOR(dev_priv, 0),
+ intel_de_write(display, PP_DIVISOR(display, 0),
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
DIV_ROUND_UP(pps->delays.power_cycle, 1000) + 1));
@@ -243,25 +242,24 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
assert_fdi_rx_pll_disabled(display, pipe);
assert_shared_dpll_disabled(display, crtc_state->shared_dpll);
} else {
assert_pll_disabled(display, pipe);
}
- intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_init_hw(display, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(i915)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~LVDS_PIPE_SEL_MASK_CPT;
temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
@@ -296,7 +294,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the TRANSCONF reg.
*/
- if (DISPLAY_VER(i915) == 4) {
+ if (DISPLAY_VER(display) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
@@ -312,7 +310,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- intel_de_write(i915, lvds_encoder->reg, temp);
+ intel_de_write(display, lvds_encoder->reg, temp);
}
/*
@@ -323,16 +321,16 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN);
+ intel_de_rmw(display, lvds_encoder->reg, 0, LVDS_PORT_EN);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), 0, PANEL_POWER_ON);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, PP_CONTROL(display, 0), 0, PANEL_POWER_ON);
+ intel_de_posting_read(display, lvds_encoder->reg);
- if (intel_de_wait_for_set(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_set(display, PP_STATUS(display, 0), PP_ON, 5000))
+ drm_err(display->drm,
"timed out waiting for panel to power on\n");
intel_backlight_enable(crtc_state, conn_state);
@@ -343,16 +341,16 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), PANEL_POWER_ON, 0);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 1000))
- drm_err(&dev_priv->drm,
+ intel_de_rmw(display, PP_CONTROL(display, 0), PANEL_POWER_ON, 0);
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_ON, 1000))
+ drm_err(display->drm,
"timed out waiting for panel to power off\n");
- intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, lvds_encoder->reg, LVDS_PORT_EN, 0);
+ intel_de_posting_read(display, lvds_encoder->reg);
}
static void gmch_disable_lvds(struct intel_atomic_state *state,
@@ -384,10 +382,10 @@ static void pch_post_disable_lvds(struct intel_atomic_state *state,
static void intel_lvds_shutdown(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
+ drm_err(display->drm,
"timed out waiting for panel power cycle delay\n");
}
@@ -420,7 +418,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct intel_connector *connector = lvds_encoder->attached_connector;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -429,12 +427,12 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) {
- drm_err(&i915->drm, "Can't support LVDS on pipe A\n");
+ if (DISPLAY_VER(display) < 4 && crtc->pipe == 0) {
+ drm_err(display->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
@@ -447,7 +445,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"forcing display bpp (was %d) to LVDS (%d)\n",
crtc_state->pipe_bpp, lvds_bpp);
crtc_state->pipe_bpp = lvds_bpp;
@@ -775,11 +773,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
@@ -787,24 +785,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
+bool intel_is_dual_link_lvds(struct intel_display *display)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(display);
return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
- struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev);
+ struct intel_display *display = to_intel_display(&lvds_encoder->base);
struct intel_connector *connector = lvds_encoder->attached_connector;
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
unsigned int val;
/* use the module option value if specified */
- if (i915->display.params.lvds_channel_mode > 0)
- return i915->display.params.lvds_channel_mode == 2;
+ if (display->params.lvds_channel_mode > 0)
+ return display->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
@@ -819,8 +817,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = intel_de_read(i915, lvds_encoder->reg);
- if (HAS_PCH_CPT(i915))
+ val = intel_de_read(display, lvds_encoder->reg);
+ if (HAS_PCH_CPT(display))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
@@ -837,14 +835,13 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @i915: i915 device
+ * @display: display device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_i915_private *i915)
+void intel_lvds_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_lvds_encoder *lvds_encoder;
struct intel_connector *connector;
const struct drm_edid *drm_edid;
@@ -855,25 +852,25 @@ void intel_lvds_init(struct drm_i915_private *i915)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support,
+ drm_WARN(display->drm, !display->vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!i915->display.vbt.int_lvds_support) {
- drm_dbg_kms(&i915->drm,
+ if (!display->vbt.int_lvds_support) {
+ drm_dbg_kms(display->drm,
"Internal LVDS support disabled by VBT\n");
return;
}
- if (HAS_PCH_SPLIT(i915))
+ if (HAS_PCH_SPLIT(display))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- lvds = intel_de_read(i915, lvds_reg);
+ lvds = intel_de_read(display, lvds_reg);
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
}
@@ -881,11 +878,11 @@ void intel_lvds_init(struct drm_i915_private *i915)
ddc_pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(display, &ddc_pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT\n");
return;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT, but enabled anyway\n");
}
@@ -902,18 +899,18 @@ void intel_lvds_init(struct drm_i915_private *i915)
lvds_encoder->attached_connector = connector;
encoder = &lvds_encoder->base;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
intel_gmbus_get_adapter(display, ddc_pin));
- drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
encoder->enable = intel_enable_lvds;
encoder->pre_enable = intel_pre_enable_lvds;
encoder->compute_config = intel_lvds_compute_config;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
encoder->disable = pch_disable_lvds;
encoder->post_disable = pch_post_disable_lvds;
} else {
@@ -931,7 +928,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
encoder->port = PORT_NONE;
encoder->cloneable = 0;
- if (DISPLAY_VER(i915) < 4)
+ if (DISPLAY_VER(display) < 4)
encoder->pipe_mask = BIT(PIPE_B);
else
encoder->pipe_mask = ~0;
@@ -943,7 +940,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_lvds_add_properties(&connector->base);
- intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_get_hw_state(display, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
/*
@@ -958,7 +955,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
drm_edid = drm_edid_read_switcheroo(&connector->base, connector->base.ddc);
else
@@ -991,7 +988,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
if (!intel_panel_preferred_fixed_mode(connector))
intel_panel_add_encoder_fixed_mode(connector, encoder);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
if (!intel_panel_preferred_fixed_mode(connector))
@@ -1002,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_backlight_setup(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n",
+ drm_dbg_kms(display->drm, "detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -1010,7 +1007,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
return;
failed:
- drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n");
+ drm_dbg_kms(display->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(&connector->base);
drm_encoder_cleanup(&encoder->base);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h
index 7ad5fa9c0434..a6db1706a97c 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.h
+++ b/drivers/gpu/drm/i915/display/intel_lvds.h
@@ -11,28 +11,28 @@
#include "i915_reg_defs.h"
enum pipe;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe);
-void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+void intel_lvds_init(struct intel_display *display);
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display);
+bool intel_is_dual_link_lvds(struct intel_display *display);
#else
-static inline bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+static inline bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
return false;
}
-static inline void intel_lvds_init(struct drm_i915_private *dev_priv)
+static inline void intel_lvds_init(struct intel_display *display)
{
}
-static inline struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+static inline struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
return NULL;
}
-static inline bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+static inline bool intel_is_dual_link_lvds(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 312b21b1ab59..0325b0c9506d 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -6,11 +6,11 @@
* state.
*/
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
@@ -31,13 +31,14 @@
#include "intel_pmdemand.h"
#include "intel_tc.h"
#include "intel_vblank.h"
+#include "intel_vga.h"
#include "intel_wm.h"
#include "skl_watermark.h"
static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
@@ -48,7 +49,7 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
if (!crtc_state->hw.active)
return;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -56,9 +57,9 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
intel_plane_disable_noatomic(crtc, plane);
}
- state = drm_atomic_state_alloc(&i915->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"failed to disable [CRTC:%d:%s], out of memory",
crtc->base.base.id, crtc->base.name);
return;
@@ -68,7 +69,7 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
to_intel_atomic_state(state)->internal = true;
/* Everything's already locked, -EDEADLK can't happen. */
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc,
BIT(pipe) |
intel_crtc_joiner_secondary_pipes(crtc_state)) {
struct intel_crtc_state *temp_crtc_state =
@@ -77,14 +78,14 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
ret = drm_atomic_add_affected_connectors(state, &temp_crtc->base);
- drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
+ drm_WARN_ON(display->drm, IS_ERR(temp_crtc_state) || ret);
}
- i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
+ display->funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.base.id, crtc->base.name);
@@ -118,13 +119,12 @@ static void set_encoder_for_connector(struct intel_connector *connector,
static void reset_encoder_connector_state(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.encoder != &encoder->base)
continue;
@@ -143,10 +143,10 @@ static void reset_encoder_connector_state(struct intel_encoder *encoder)
static void reset_crtc_encoder_state(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_encoder *encoder;
- for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder) {
+ for_each_encoder_on_crtc(display->drm, &crtc->base, encoder) {
reset_encoder_connector_state(encoder);
encoder->base.crtc = NULL;
}
@@ -155,9 +155,8 @@ static void reset_crtc_encoder_state(struct intel_crtc *crtc)
static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -169,7 +168,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
reset_crtc_encoder_state(crtc);
intel_fbc_disable(crtc);
- intel_update_watermarks(i915);
+ intel_update_watermarks(display);
intel_display_power_put_all_in_set(display, &crtc->enabled_power_domains);
@@ -184,13 +183,13 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
* Return all the pipes using a transcoder in @transcoder_mask.
* For joiner configs return only the joiner primary.
*/
-static u8 get_transcoder_pipes(struct drm_i915_private *i915,
+static u8 get_transcoder_pipes(struct intel_display *display,
u8 transcoder_mask)
{
struct intel_crtc *temp_crtc;
u8 pipes = 0;
- for_each_intel_crtc(&i915->drm, temp_crtc) {
+ for_each_intel_crtc(display->drm, temp_crtc) {
struct intel_crtc_state *temp_crtc_state =
to_intel_crtc_state(temp_crtc->base.state);
@@ -215,7 +214,6 @@ static void get_portsync_pipes(struct intel_crtc *crtc,
u8 *master_pipe_mask, u8 *slave_pipes_mask)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_crtc *master_crtc;
@@ -234,20 +232,20 @@ static void get_portsync_pipes(struct intel_crtc *crtc,
else
master_transcoder = crtc_state->master_transcoder;
- *master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder));
- drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask));
+ *master_pipe_mask = get_transcoder_pipes(display, BIT(master_transcoder));
+ drm_WARN_ON(display->drm, !is_power_of_2(*master_pipe_mask));
master_crtc = intel_crtc_for_pipe(display, ffs(*master_pipe_mask) - 1);
master_crtc_state = to_intel_crtc_state(master_crtc->base.state);
- *slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask);
+ *slave_pipes_mask = get_transcoder_pipes(display, master_crtc_state->sync_mode_slaves_mask);
}
-static u8 get_joiner_secondary_pipes(struct drm_i915_private *i915, u8 primary_pipes_mask)
+static u8 get_joiner_secondary_pipes(struct intel_display *display, u8 primary_pipes_mask)
{
struct intel_crtc *primary_crtc;
u8 pipes = 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, primary_crtc, primary_pipes_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, primary_crtc, primary_pipes_mask) {
struct intel_crtc_state *primary_crtc_state =
to_intel_crtc_state(primary_crtc->base.state);
@@ -260,45 +258,45 @@ static u8 get_joiner_secondary_pipes(struct drm_i915_private *i915, u8 primary_p
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc *temp_crtc;
u8 portsync_master_mask;
u8 portsync_slaves_mask;
u8 joiner_secondaries_mask;
- struct intel_crtc *temp_crtc;
/* TODO: Add support for MST */
get_portsync_pipes(crtc, &portsync_master_mask, &portsync_slaves_mask);
- joiner_secondaries_mask = get_joiner_secondary_pipes(i915,
+ joiner_secondaries_mask = get_joiner_secondary_pipes(display,
portsync_master_mask |
portsync_slaves_mask);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
portsync_master_mask & portsync_slaves_mask ||
portsync_master_mask & joiner_secondaries_mask ||
portsync_slaves_mask & joiner_secondaries_mask);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, joiner_secondaries_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, joiner_secondaries_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_slaves_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, portsync_slaves_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_master_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, portsync_master_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc,
joiner_secondaries_mask |
portsync_slaves_mask |
portsync_master_mask)
intel_crtc_disable_noatomic_complete(temp_crtc);
}
-static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915)
+static void intel_modeset_update_connector_atomic_state(struct intel_display *display)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state = connector->base.state;
struct intel_encoder *encoder =
@@ -320,7 +318,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_is_joiner_secondary(crtc_state))
return;
@@ -333,7 +331,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
- if (DISPLAY_INFO(i915)->color.degamma_lut_size) {
+ if (DISPLAY_INFO(display)->color.degamma_lut_size) {
/* assume 1:1 mapping */
drm_property_replace_blob(&crtc_state->hw.degamma_lut,
crtc_state->pre_csc_lut);
@@ -348,7 +346,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
* to gamma_lut as that is the only valid source of LUTs
* in the uapi.
*/
- drm_WARN_ON(&i915->drm, crtc_state->post_csc_lut &&
+ drm_WARN_ON(display->drm, crtc_state->post_csc_lut &&
crtc_state->pre_csc_lut);
drm_property_replace_blob(&crtc_state->hw.degamma_lut,
@@ -367,15 +365,14 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
}
static void
-intel_sanitize_plane_mapping(struct drm_i915_private *i915)
+intel_sanitize_plane_mapping(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
- if (DISPLAY_VER(i915) >= 4)
+ if (DISPLAY_VER(display) >= 4)
return;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_crtc *plane_crtc;
@@ -387,7 +384,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *i915)
if (pipe == crtc->pipe)
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
plane->base.base.id, plane->base.name);
@@ -424,12 +421,12 @@ static bool intel_crtc_needs_link_reset(struct intel_crtc *crtc)
static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
struct intel_connector *found_connector = NULL;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (&encoder->base == connector->base.encoder) {
found_connector = connector;
@@ -467,7 +464,7 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
static bool intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
bool needs_link_reset;
@@ -475,7 +472,7 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc,
struct intel_plane *plane;
/* Disable everything but the primary plane */
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -516,7 +513,7 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc,
return true;
}
-static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
+static void intel_sanitize_all_crtcs(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_crtc *crtc;
@@ -531,7 +528,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
for (;;) {
u32 old_mask = crtcs_forced_off;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
u32 crtc_mask = drm_crtc_mask(&crtc->base);
if (crtcs_forced_off & crtc_mask)
@@ -544,7 +541,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
break;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -554,7 +551,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/*
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
@@ -566,7 +563,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
* without several WARNs, but for now let's take the easy
* road.
*/
- return IS_SANDYBRIDGE(i915) &&
+ return display->platform.sandybridge &&
crtc_state->hw.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
@@ -575,13 +572,12 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc_state *crtc_state = crtc ?
to_intel_crtc_state(crtc->base.state) : NULL;
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
/*
* We need to check both for a crtc link (meaning that the encoder is
@@ -592,7 +588,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS has misprogrammed the hardware. Disabling pipe %c\n",
pipe_name(crtc->pipe));
has_active_crtc = false;
@@ -600,7 +596,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
@@ -617,7 +613,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
if (crtc_state) {
struct drm_encoder *best_encoder;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
@@ -651,18 +647,17 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
intel_ddi_sanitize_encoder_pll_mapping(encoder);
}
/* FIXME read out full plane state for all planes */
-static void readout_plane_state(struct drm_i915_private *i915)
+static void readout_plane_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
struct intel_crtc_state *crtc_state;
@@ -676,13 +671,13 @@ static void readout_plane_state(struct drm_i915_private *i915)
intel_set_plane_visible(crtc_state, plane_state, visible);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
plane->base.base.id, plane->base.name,
str_enabled_disabled(visible), pipe_name(pipe));
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -690,18 +685,17 @@ static void readout_plane_state(struct drm_i915_private *i915)
}
}
-static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
+static void intel_modeset_readout_hw_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -716,15 +710,15 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
crtc->base.enabled = crtc_state->hw.enable;
crtc->active = crtc_state->hw.active;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
str_enabled_disabled(crtc_state->hw.active));
}
- readout_plane_state(i915);
+ readout_plane_state(display);
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_crtc_state *crtc_state = NULL;
pipe = 0;
@@ -743,7 +737,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
/* encoder should read be linked to joiner primary */
WARN_ON(intel_crtc_is_joiner_secondary(crtc_state));
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(crtc_state)) {
struct intel_crtc_state *secondary_crtc_state;
@@ -766,7 +760,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
if (encoder->sync_state)
encoder->sync_state(encoder, crtc_state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id, encoder->base.name,
str_enabled_disabled(encoder->base.crtc),
@@ -775,7 +769,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
intel_dpll_readout_hw_state(display);
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct intel_crtc_state *crtc_state = NULL;
@@ -809,37 +803,37 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
if (connector->sync_state)
connector->sync_state(connector, crtc_state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
str_enabled_disabled(connector->base.encoder));
}
drm_connector_list_iter_end(&conn_iter);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
- if (crtc_state->hw.active) {
- /*
- * The initial mode needs to be set in order to keep
- * the atomic core happy. It wants a valid mode if the
- * crtc's enabled, so we do the above call.
- *
- * But we don't set all the derived state fully, hence
- * set a flag to indicate that a full recalculation is
- * needed on the next commit.
- */
- crtc_state->inherited = true;
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
+ */
+ crtc_state->inherited = true;
+ if (crtc_state->hw.active) {
intel_crtc_update_active_timings(crtc_state,
crtc_state->vrr.enable);
intel_crtc_copy_hw_to_uapi_state(crtc_state);
}
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -855,14 +849,14 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
* use plane->min_cdclk() :(
*/
if (plane_state->uapi.visible && plane->min_cdclk) {
- if (crtc_state->double_wide || DISPLAY_VER(i915) >= 10)
+ if (crtc_state->double_wide || DISPLAY_VER(display) >= 10)
crtc_state->min_cdclk[plane->id] =
DIV_ROUND_UP(crtc_state->pixel_rate, 2);
else
crtc_state->min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_cdclk %d kHz\n",
plane->base.base.id, plane->base.name,
crtc_state->min_cdclk[plane->id]);
@@ -874,7 +868,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
/* TODO move here (or even earlier?) on all platforms */
if (DISPLAY_VER(display) >= 9)
- intel_wm_get_hw_state(i915);
+ intel_wm_get_hw_state(display);
intel_bw_update_hw_state(display);
intel_cdclk_update_hw_state(display);
@@ -883,11 +877,11 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
}
static void
-get_encoder_power_domains(struct drm_i915_private *i915)
+get_encoder_power_domains(struct intel_display *display)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
@@ -905,49 +899,51 @@ get_encoder_power_domains(struct drm_i915_private *i915)
}
}
-static void intel_early_display_was(struct drm_i915_private *i915)
+static void intel_early_display_was(struct intel_display *display)
{
/*
* Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
- if (IS_DISPLAY_VER(i915, 10, 12))
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
+ if (IS_DISPLAY_VER(display, 10, 12))
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
- if (IS_HASWELL(i915))
- intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
+ if (display->platform.haswell)
+ intel_de_rmw(display, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
- if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ if (display->platform.kabylake || display->platform.coffeelake ||
+ display->platform.cometlake) {
/* Display WA #1142:kbl,cfl,cml */
- intel_de_rmw(i915, CHICKEN_PAR1_1,
+ intel_de_rmw(display, CHICKEN_PAR1_1,
KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
- intel_de_rmw(i915, CHICKEN_MISC_2,
+ intel_de_rmw(display, CHICKEN_MISC_2,
KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
KBL_ARB_FILL_SPARE_14);
}
}
-void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+void intel_modeset_setup_hw_state(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx)
{
- struct intel_display *display = &i915->display;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);
- intel_early_display_was(i915);
- intel_modeset_readout_hw_state(i915);
+ intel_early_display_was(display);
+ intel_vga_disable(display);
+
+ intel_modeset_readout_hw_state(display);
/* HW state is read out, now we need to sanitize this mess. */
- get_encoder_power_domains(i915);
+ get_encoder_power_domains(display);
- intel_pch_sanitize(i915);
+ intel_pch_sanitize(display);
intel_cmtg_sanitize(display);
@@ -955,7 +951,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
*/
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -969,35 +965,35 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
}
}
- intel_fbc_sanitize(&i915->display);
+ intel_fbc_sanitize(display);
- intel_sanitize_plane_mapping(i915);
+ intel_sanitize_plane_mapping(display);
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
intel_sanitize_encoder(encoder);
/*
* Sanitizing CRTCs needs their connector atomic state to be
* up-to-date, so ensure that already here.
*/
- intel_modeset_update_connector_atomic_state(i915);
+ intel_modeset_update_connector_atomic_state(display);
- intel_sanitize_all_crtcs(i915, ctx);
+ intel_sanitize_all_crtcs(display, ctx);
intel_dpll_sanitize_state(display);
/* TODO move earlier on all platforms */
if (DISPLAY_VER(display) < 9)
- intel_wm_get_hw_state(i915);
- intel_wm_sanitize(i915);
+ intel_wm_get_hw_state(display);
+ intel_wm_sanitize(display);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_power_domain_mask put_domains;
intel_modeset_get_crtc_power_domains(crtc_state, &put_domains);
- if (drm_WARN_ON(&i915->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+ if (drm_WARN_ON(display->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
intel_modeset_put_crtc_power_domains(crtc, &put_domains);
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.h b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
index 3beff67b33d0..f5e6f3ae9572 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.h
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
@@ -6,10 +6,10 @@
#ifndef __INTEL_MODESET_SETUP_H__
#define __INTEL_MODESET_SETUP_H__
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
+struct intel_display;
-void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+void intel_modeset_setup_hw_state(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx);
#endif /* __INTEL_MODESET_SETUP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index a008412fdd04..766a9983665a 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -6,13 +6,14 @@
*/
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
#include "intel_cx0_phy.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_modeset_verify.h"
@@ -28,9 +29,8 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (connector->get_hw_state(connector)) {
@@ -91,7 +91,6 @@ verify_connector_state(struct intel_atomic_state *state,
static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (crtc_state->has_pch_encoder) {
int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(display, crtc_state),
@@ -103,7 +102,7 @@ static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_s
* Yell if the encoder disagrees. Allow for slight
* rounding differences.
*/
- drm_WARN(&i915->drm, abs(fdi_dotclock - dotclock) > 1,
+ drm_WARN(display->drm, abs(fdi_dotclock - dotclock) > 1,
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
fdi_dotclock, dotclock);
}
@@ -113,17 +112,16 @@ static void
verify_encoder_state(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_encoder *encoder;
struct drm_connector *connector;
const struct drm_connector_state *old_conn_state, *new_conn_state;
int i;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
bool enabled = false, found = false;
enum pipe pipe;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s]\n",
encoder->base.base.id,
encoder->base.name);
@@ -166,7 +164,6 @@ verify_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_crtc_state *sw_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_state *hw_crtc_state;
@@ -185,7 +182,7 @@ verify_crtc_state(struct intel_atomic_state *state,
intel_crtc_get_pipe_config(hw_crtc_state);
/* we keep both pipes enabled on 830 */
- if (IS_I830(i915) && hw_crtc_state->hw.active)
+ if (display->platform.i830 && hw_crtc_state->hw.active)
hw_crtc_state->hw.active = sw_crtc_state->hw.active;
INTEL_DISPLAY_STATE_WARN(display,
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index aff9a3455c1b..12308495afa5 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -291,7 +291,6 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *frontbuffer = NULL;
@@ -307,7 +306,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
- intel_frontbuffer_flip_prepare(i915, INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_flip_prepare(display, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
if (vma)
@@ -359,14 +358,13 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
if (drm_WARN_ON(display->drm, !vma))
return;
- intel_frontbuffer_flip_complete(i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip_complete(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_vma_unpin(vma);
i915_vma_put(vma);
diff --git a/drivers/gpu/drm/i915/display/intel_pch.c b/drivers/gpu/drm/i915/display/intel_pch.c
new file mode 100644
index 000000000000..469e8a3cfb49
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Intel Corporation.
+ */
+
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
+#include "intel_display_core.h"
+#include "intel_pch.h"
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
+#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
+#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
+#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
+
+/*
+ * Check for platforms where the south display is on the same PCI device or SoC
+ * die as the north display. The PCH (if it even exists) is not involved in
+ * display. Return a fake PCH type for south display handling on these
+ * platforms, without actually detecting the PCH, and PCH_NONE otherwise.
+ */
+static enum intel_pch intel_pch_fake_for_south_display(struct intel_display *display)
+{
+ enum intel_pch pch_type = PCH_NONE;
+
+ if (DISPLAY_VER(display) >= 20)
+ pch_type = PCH_LNL;
+ else if (display->platform.battlemage || display->platform.meteorlake)
+ pch_type = PCH_MTL;
+ else if (display->platform.dg2)
+ pch_type = PCH_DG2;
+ else if (display->platform.dg1)
+ pch_type = PCH_DG1;
+
+ return pch_type;
+}
+
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct intel_display *display, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Ibex Peak PCH\n");
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) != 5);
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found CougarPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ DISPLAY_VER(display) != 6 &&
+ !display->platform.ivybridge);
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found PantherPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ DISPLAY_VER(display) != 6 &&
+ !display->platform.ivybridge);
+ /* PPT is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found LynxPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ display->platform.haswell_ult ||
+ display->platform.broadwell_ult);
+ return PCH_LPT_H;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found LynxPoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell_ult &&
+ !display->platform.broadwell_ult);
+ return PCH_LPT_LP;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found WildcatPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ display->platform.haswell_ult ||
+ display->platform.broadwell_ult);
+ /* WPT is LPT compatible */
+ return PCH_LPT_H;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found WildcatPoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell_ult &&
+ !display->platform.broadwell_ult);
+ /* WPT is LPT compatible */
+ return PCH_LPT_LP;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found SunrisePoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake);
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found SunrisePoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Kaby Lake PCH (KBP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ /* KBP is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Cannon Lake PCH (CNP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm,
+ "Found Cannon Lake LP PCH (CNP-LP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Comet Lake PCH (CMP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake &&
+ !display->platform.rocketlake);
+ /* CMP is CNP compatible */
+ return PCH_CNP;
+ case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Comet Lake V PCH (CMP-V)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ /* CMP-V is based on KBP, which is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Ice Lake PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.icelake);
+ return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Mule Creek Canyon PCH\n");
+ drm_WARN_ON(display->drm, !(display->platform.jasperlake ||
+ display->platform.elkhartlake));
+ /* MCC is TGP compatible */
+ return PCH_TGP;
+ case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Tiger Lake LP PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.tigerlake &&
+ !display->platform.rocketlake &&
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_TGP;
+ case INTEL_PCH_JSP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Jasper Lake PCH\n");
+ drm_WARN_ON(display->drm, !(display->platform.jasperlake ||
+ display->platform.elkhartlake));
+ /* JSP is ICP compatible */
+ return PCH_ICP;
+ case INTEL_PCH_ADP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Alder Lake PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.alderlake_s &&
+ !display->platform.alderlake_p);
+ return PCH_ADP;
+ default:
+ return PCH_NONE;
+ }
+}
+
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
+{
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static void
+intel_virt_detect_pch(const struct intel_display *display,
+ unsigned short *pch_id, enum intel_pch *pch_type)
+{
+ unsigned short id = 0;
+
+ /*
+ * In a virtualized passthrough environment we can be in a
+ * setup where the ISA bridge is not able to be passed through.
+ * In this case, a south bridge can be emulated and we have to
+ * make an educated guess as to which PCH is really there.
+ */
+
+ if (display->platform.alderlake_s || display->platform.alderlake_p)
+ id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
+ else if (display->platform.tigerlake || display->platform.rocketlake)
+ id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (display->platform.icelake)
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (display->platform.coffeelake ||
+ display->platform.cometlake)
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (display->platform.kabylake || display->platform.skylake)
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (display->platform.haswell_ult ||
+ display->platform.broadwell_ult)
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (display->platform.haswell || display->platform.broadwell)
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (DISPLAY_VER(display) == 6 || display->platform.ivybridge)
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (DISPLAY_VER(display) == 5)
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+
+ if (id)
+ drm_dbg_kms(display->drm, "Assuming PCH ID %04x\n", id);
+ else
+ drm_dbg_kms(display->drm, "Assuming no PCH\n");
+
+ *pch_type = intel_pch_type(display, id);
+
+ /* Sanity check virtual PCH id */
+ if (drm_WARN_ON(display->drm,
+ id && *pch_type == PCH_NONE))
+ id = 0;
+
+ *pch_id = id;
+}
+
+void intel_pch_detect(struct intel_display *display)
+{
+ struct pci_dev *pch = NULL;
+ unsigned short id;
+ enum intel_pch pch_type;
+
+ pch_type = intel_pch_fake_for_south_display(display);
+ if (pch_type != PCH_NONE) {
+ display->pch_type = pch_type;
+ drm_dbg_kms(display->drm,
+ "PCH not involved in display, using fake PCH type %d for south display\n",
+ pch_type);
+ return;
+ }
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ *
+ * In some virtualized environments (e.g. XEN), there is irrelevant
+ * ISA bridge in the system. To work reliably, we should scan through
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ if (pch->vendor != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ pch_type = intel_pch_type(display, id);
+ if (pch_type != PCH_NONE) {
+ display->pch_type = pch_type;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ intel_virt_detect_pch(display, &id, &pch_type);
+ display->pch_type = pch_type;
+ break;
+ }
+ }
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && !HAS_DISPLAY(display)) {
+ drm_dbg_kms(display->drm,
+ "Display disabled, reverting to NOP PCH\n");
+ display->pch_type = PCH_NOP;
+ } else if (!pch) {
+ if (i915_run_as_guest() && HAS_DISPLAY(display)) {
+ intel_virt_detect_pch(display, &id, &pch_type);
+ display->pch_type = pch_type;
+ } else {
+ drm_dbg_kms(display->drm, "No PCH found.\n");
+ }
+ }
+
+ pci_dev_put(pch);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pch.h b/drivers/gpu/drm/i915/display/intel_pch.h
new file mode 100644
index 000000000000..cf4dab1b98bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Intel Corporation.
+ */
+
+#ifndef __INTEL_PCH__
+#define __INTEL_PCH__
+
+#include "intel_display_conversion.h"
+
+struct intel_display;
+
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
+enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT_H, /* Lynxpoint/Wildcatpoint H PCH */
+ PCH_LPT_LP, /* Lynxpoint/Wildcatpoint LP PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
+ PCH_ICP, /* Ice Lake/Jasper Lake PCH */
+ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
+ PCH_ADP, /* Alder Lake PCH */
+
+ /* Fake PCHs, functionality handled on the same PCI dev */
+ PCH_DG1 = 1024,
+ PCH_DG2,
+ PCH_MTL,
+ PCH_LNL,
+};
+
+#define INTEL_PCH_TYPE(_display) (__to_intel_display(_display)->pch_type)
+#define HAS_PCH_DG2(display) (INTEL_PCH_TYPE(display) == PCH_DG2)
+#define HAS_PCH_ADP(display) (INTEL_PCH_TYPE(display) == PCH_ADP)
+#define HAS_PCH_DG1(display) (INTEL_PCH_TYPE(display) == PCH_DG1)
+#define HAS_PCH_TGP(display) (INTEL_PCH_TYPE(display) == PCH_TGP)
+#define HAS_PCH_ICP(display) (INTEL_PCH_TYPE(display) == PCH_ICP)
+#define HAS_PCH_CNP(display) (INTEL_PCH_TYPE(display) == PCH_CNP)
+#define HAS_PCH_SPT(display) (INTEL_PCH_TYPE(display) == PCH_SPT)
+#define HAS_PCH_LPT_H(display) (INTEL_PCH_TYPE(display) == PCH_LPT_H)
+#define HAS_PCH_LPT_LP(display) (INTEL_PCH_TYPE(display) == PCH_LPT_LP)
+#define HAS_PCH_LPT(display) (INTEL_PCH_TYPE(display) == PCH_LPT_H || \
+ INTEL_PCH_TYPE(display) == PCH_LPT_LP)
+#define HAS_PCH_CPT(display) (INTEL_PCH_TYPE(display) == PCH_CPT)
+#define HAS_PCH_IBX(display) (INTEL_PCH_TYPE(display) == PCH_IBX)
+#define HAS_PCH_NOP(display) (INTEL_PCH_TYPE(display) == PCH_NOP)
+#define HAS_PCH_SPLIT(display) (INTEL_PCH_TYPE(display) != PCH_NONE)
+
+void intel_pch_detect(struct intel_display *display);
+
+#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 99f6d6f53fa7..1743ebf551cb 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -3,8 +3,9 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
@@ -20,28 +21,27 @@
#include "intel_pps.h"
#include "intel_sdvo.h"
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
- return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) ||
- (HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A);
+ return HAS_PCH_IBX(display) || HAS_PCH_CPT(display) ||
+ (HAS_PCH_LPT_H(display) && pch_transcoder == PIPE_A);
}
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
return PIPE_A;
else
return crtc->pipe;
}
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_dp_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t dp_reg)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
@@ -52,16 +52,15 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
port_name(port), pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ HAS_PCH_IBX(display) && !state && port_pipe == PIPE_B,
"IBX PCH DP %c still using transcoder B\n",
port_name(port));
}
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_hdmi_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t hdmi_reg)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
@@ -72,20 +71,19 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
port_name(port), pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ HAS_PCH_IBX(display) && !state && port_pipe == PIPE_B,
"IBX PCH HDMI %c still using transcoder B\n",
port_name(port));
}
-static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_ports_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
+ assert_pch_dp_disabled(display, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(display, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(display, pipe, PORT_D, PCH_DP_D);
INTEL_DISPLAY_STATE_WARN(display,
intel_crt_port_enabled(display, PCH_ADPA, &port_pipe) && port_pipe == pipe,
@@ -93,20 +91,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe,
+ intel_lvds_port_enabled(display, PCH_LVDS, &port_pipe) && port_pipe == pipe,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
/* PCH SDVOB multiplex with HDMIB */
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
+ assert_pch_hdmi_disabled(display, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(display, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(display, pipe, PORT_D, PCH_HDMID);
}
-static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_transcoder_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
bool enabled;
@@ -117,45 +114,45 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_hdmi_port(struct intel_display *display,
enum port port, i915_reg_t hdmi_reg)
{
- u32 val = intel_de_read(dev_priv, hdmi_reg);
+ u32 val = intel_de_read(display, hdmi_reg);
if (val & SDVO_ENABLE ||
(val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for HDMI %c\n",
port_name(port));
val &= ~SDVO_PIPE_SEL_MASK;
val |= SDVO_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, hdmi_reg, val);
+ intel_de_write(display, hdmi_reg, val);
}
-static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_dp_port(struct intel_display *display,
enum port port, i915_reg_t dp_reg)
{
- u32 val = intel_de_read(dev_priv, dp_reg);
+ u32 val = intel_de_read(display, dp_reg);
if (val & DP_PORT_EN ||
(val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for DP %c\n",
port_name(port));
val &= ~DP_PIPE_SEL_MASK;
val |= DP_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, dp_reg, val);
+ intel_de_write(display, dp_reg, val);
}
-static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+static void ibx_sanitize_pch_ports(struct intel_display *display)
{
/*
* The BIOS may select transcoder B on some of the PCH
@@ -168,14 +165,14 @@ static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
* (see. intel_dp_link_down(), intel_disable_hdmi(),
* intel_disable_sdvo()).
*/
- ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+ ibx_sanitize_pch_dp_port(display, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(display, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(display, PORT_D, PCH_DP_D);
/* PCH SDVOB multiplex with HDMIB */
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+ ibx_sanitize_pch_hdmi_port(display, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(display, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(display, PORT_D, PCH_HDMID);
}
static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
@@ -225,32 +222,30 @@ void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder)));
-
- intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_HBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder)));
+
+ intel_de_write(display, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_VBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNCSHIFT(display, cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val, pipeconf_val;
@@ -262,7 +257,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
assert_fdi_tx_enabled(display, pipe);
assert_fdi_rx_enabled(display, pipe);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
reg = TRANS_CHICKEN2(pipe);
val = intel_de_read(display, reg);
/*
@@ -280,7 +275,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val = intel_de_read(display, reg);
pipeconf_val = intel_de_read(display, TRANSCONF(display, pipe));
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
val |= TRANS_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
@@ -299,7 +294,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) {
- if (HAS_PCH_IBX(dev_priv) &&
+ if (HAS_PCH_IBX(display) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX;
else
@@ -317,7 +312,6 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -326,18 +320,18 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
assert_fdi_rx_disabled(display, pipe);
/* Ports must be off as well */
- assert_pch_ports_disabled(dev_priv, pipe);
+ assert_pch_ports_disabled(display, pipe);
reg = PCH_TRANSCONF(pipe);
- intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0);
+ intel_de_rmw(display, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ if (intel_de_wait_for_clear(display, reg, TRANS_STATE_ENABLE, 50))
+ drm_err(display->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
/* Workaround: Clear the timing override chicken bit again. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe),
+ intel_de_rmw(display, TRANS_CHICKEN2(pipe),
TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
@@ -366,14 +360,13 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state,
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
u32 temp;
- assert_pch_transcoder_disabled(dev_priv, pipe);
+ assert_pch_transcoder_disabled(display, pipe);
/* For PCH output, training FDI link */
intel_fdi_link_train(crtc, crtc_state);
@@ -382,7 +375,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
* We need to program the right clock selection
* before writing the pixel multiplier into the DPLL.
*/
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
u32 sel;
temp = intel_de_read(display, PCH_DPLL_SEL);
@@ -418,7 +411,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev_priv) &&
+ if (HAS_PCH_CPT(display) &&
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -459,23 +452,27 @@ void ilk_pch_disable(struct intel_atomic_state *state,
void ilk_pch_post_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
ilk_disable_pch_transcoder(crtc);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
/* disable TRANS_DP_CTL */
- intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe),
+ intel_de_rmw(display, TRANS_DP_CTL(pipe),
TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK,
TRANS_DP_PORT_SEL_NONE);
/* disable DPLL_SEL */
- intel_de_rmw(dev_priv, PCH_DPLL_SEL,
+ intel_de_rmw(display, PCH_DPLL_SEL,
TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0);
}
ilk_fdi_pll_disable(crtc);
+
+ intel_disable_shared_dpll(old_crtc_state);
}
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
@@ -497,9 +494,8 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum pipe pipe = crtc->pipe;
enum intel_dpll_id pll_id;
@@ -518,7 +514,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
&crtc_state->fdi_m_n);
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
/*
* The pipe->pch transcoder and pch transcoder->pll
* mapping is fixed.
@@ -550,8 +546,6 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val, pipeconf_val;
@@ -559,49 +553,49 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
assert_fdi_tx_enabled(display, (enum pipe)cpu_transcoder);
assert_fdi_rx_enabled(display, PIPE_A);
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(display, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(display, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder));
+ pipeconf_val = intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder));
if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK)
val |= TRANS_INTERLACE_INTERLACED;
else
val |= TRANS_INTERLACE_PROGRESSIVE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
- if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ intel_de_write(display, LPT_TRANSCONF, val);
+ if (intel_de_wait_for_set(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
+ drm_err(display->drm, "Failed to enable PCH transcoder\n");
}
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+static void lpt_disable_pch_transcoder(struct intel_display *display)
{
- intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0);
+ intel_de_rmw(display, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ if (intel_de_wait_for_clear(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
+ drm_err(display->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
+ intel_de_rmw(display, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void lpt_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- assert_pch_transcoder_disabled(dev_priv, PIPE_A);
+ assert_pch_transcoder_disabled(display, PIPE_A);
lpt_program_iclkip(crtc_state);
@@ -614,36 +608,36 @@ void lpt_pch_enable(struct intel_atomic_state *state,
void lpt_pch_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- lpt_disable_pch_transcoder(dev_priv);
+ lpt_disable_pch_transcoder(display);
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
}
void lpt_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
+ if ((intel_de_read(display, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
return;
crtc_state->has_pch_encoder = true;
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ tmp = intel_de_read(display, FDI_RX_CTL(PIPE_A));
crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
&crtc_state->fdi_m_n);
- crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+ crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(display);
}
-void intel_pch_sanitize(struct drm_i915_private *i915)
+void intel_pch_sanitize(struct intel_display *display)
{
- if (HAS_PCH_IBX(i915))
- ibx_sanitize_pch_ports(i915);
+ if (HAS_PCH_IBX(display))
+ ibx_sanitize_pch_ports(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h
index 35f8288af3d1..cd6b3ed05887 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.h
@@ -9,14 +9,14 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_link_m_n;
#ifdef I915
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -41,9 +41,9 @@ void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
-void intel_pch_sanitize(struct drm_i915_private *i915);
+void intel_pch_sanitize(struct intel_display *display);
#else
-static inline bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+static inline bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
return false;
@@ -90,7 +90,7 @@ static inline void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
}
-static inline void intel_pch_sanitize(struct drm_i915_private *i915)
+static inline void intel_pch_sanitize(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 33467de3d115..693b90e3dfc3 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -11,27 +11,28 @@
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
-static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_reset_mphy(struct intel_display *display)
{
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
+ intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
- if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset assert timeout\n");
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
+ intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
- if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
-static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_program_mphy(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
- lpt_fdi_reset_mphy(dev_priv);
+ lpt_fdi_reset_mphy(display);
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
@@ -103,11 +104,12 @@ static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 temp;
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_GATE);
intel_sbi_lock(dev_priv);
@@ -175,24 +177,25 @@ int lpt_iclkip(const struct intel_crtc_state *crtc_state)
/* Program iCLKIP clock to the desired frequency */
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
struct iclkip_params p;
u32 temp;
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
lpt_compute_iclkip(&p, clock);
- drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
+ drm_WARN_ON(display->drm, lpt_iclkip_freq(&p) != clock);
/* This should not happen with any sane values */
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
@@ -224,15 +227,16 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
/* Wait for initialization time */
udelay(24);
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
-int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+int lpt_get_iclkip(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct iclkip_params p;
u32 temp;
- if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ if ((intel_de_read(display, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
iclkip_params_init(&p);
@@ -268,15 +272,16 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
-static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+static void lpt_enable_clkout_dp(struct intel_display *display,
bool with_spread, bool with_fdi)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
- if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ if (drm_WARN(display->drm, with_fdi && !with_spread,
"FDI requires downspread\n"))
with_spread = true;
- if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ if (drm_WARN(display->drm, HAS_PCH_LPT_LP(display) &&
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
@@ -295,10 +300,10 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
if (with_fdi)
- lpt_fdi_program_mphy(dev_priv);
+ lpt_fdi_program_mphy(display);
}
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -307,13 +312,14 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
}
/* Sequence to disable CLKOUT_DP */
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+void lpt_disable_clkout_dp(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
intel_sbi_lock(dev_priv);
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -364,15 +370,16 @@ static const u16 sscdivintphase[] = {
* < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
* change in clock period = -(steps / 10) * 5.787 ps
*/
-static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
+static void lpt_bend_clkout_dp(struct intel_display *display, int steps)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
int idx = BEND_IDX(steps);
- if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
+ if (drm_WARN_ON(display->drm, steps % 5 != 0))
return;
- if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
+ if (drm_WARN_ON(display->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
intel_sbi_lock(dev_priv);
@@ -393,10 +400,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
#undef BEND_IDX
-static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+static bool spll_uses_pch_ssc(struct intel_display *display)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, SPLL_CTL);
if ((ctl & SPLL_PLL_ENABLE) == 0)
return false;
@@ -405,18 +412,17 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
- if (IS_BROADWELL(dev_priv) &&
+ if (display->platform.broadwell &&
(ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
return true;
return false;
}
-static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
- enum intel_dpll_id id)
+static bool wrpll_uses_pch_ssc(struct intel_display *display, enum intel_dpll_id id)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, WRPLL_CTL(id));
if ((ctl & WRPLL_PLL_ENABLE) == 0)
return false;
@@ -424,7 +430,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
return true;
- if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
+ if ((display->platform.broadwell || display->platform.haswell_ult) &&
(ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
@@ -432,12 +438,12 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
return false;
}
-static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void lpt_init_pch_refclk(struct intel_display *display)
{
struct intel_encoder *encoder;
bool has_fdi = false;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_fdi = true;
@@ -462,37 +468,36 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
- dev_priv->display.dpll.pch_ssc_use = 0;
+ display->dpll.pch_ssc_use = 0;
- if (spll_uses_pch_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ if (spll_uses_pch_ssc(display)) {
+ drm_dbg_kms(display->drm, "SPLL using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL1)) {
+ drm_dbg_kms(display->drm, "WRPLL1 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL2)) {
+ drm_dbg_kms(display->drm, "WRPLL2 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (dev_priv->display.dpll.pch_ssc_use)
+ if (display->dpll.pch_ssc_use)
return;
if (has_fdi) {
- lpt_bend_clkout_dp(dev_priv, 0);
- lpt_enable_clkout_dp(dev_priv, true, true);
+ lpt_bend_clkout_dp(display, 0);
+ lpt_enable_clkout_dp(display, true, true);
} else {
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
}
}
-static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ilk_init_pch_refclk(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
int i;
@@ -521,7 +526,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
has_ck505 = display->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
@@ -607,7 +612,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
+ drm_dbg_kms(display->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else {
val &= ~DREF_SSC1_ENABLE;
@@ -623,7 +628,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
} else {
@@ -670,10 +675,10 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/*
* Initialize reference clocks when the driver loads
*/
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+void intel_init_pch_refclk(struct intel_display *display)
{
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ilk_init_pch_refclk(dev_priv);
- else if (HAS_PCH_LPT(dev_priv))
- lpt_init_pch_refclk(dev_priv);
+ if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
+ ilk_init_pch_refclk(display);
+ else if (HAS_PCH_LPT(display))
+ lpt_init_pch_refclk(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
index ae3403c0ced8..25cc53c568bc 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -8,25 +8,25 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
#ifdef I915
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct intel_display *display);
+int lpt_get_iclkip(struct intel_display *display);
int lpt_iclkip(const struct intel_crtc_state *crtc_state);
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+void intel_init_pch_refclk(struct intel_display *display);
+void lpt_disable_clkout_dp(struct intel_display *display);
#else
static inline void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
}
-static inline void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_iclkip(struct intel_display *display)
{
}
-static inline int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+static inline int lpt_get_iclkip(struct intel_display *display)
{
return 0;
}
@@ -34,10 +34,10 @@ static inline int lpt_iclkip(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+static inline void intel_init_pch_refclk(struct intel_display *display)
{
}
-static inline void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_clkout_dp(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 10e26c3db946..6182f484b5bd 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -75,7 +75,7 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_auto_source(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source)
{
@@ -85,8 +85,8 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
- drm_modeset_lock_all(&dev_priv->drm);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ drm_modeset_lock_all(display->drm);
+ for_each_intel_encoder(display->drm, encoder) {
if (!encoder->base.crtc)
continue;
@@ -113,7 +113,7 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
break;
default:
- drm_WARN(&dev_priv->drm, 1, "nonexisting DP port %c\n",
+ drm_WARN(display->drm, 1, "nonexisting DP port %c\n",
port_name(dig_port->base.port));
break;
}
@@ -122,10 +122,10 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
}
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
-static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int vlv_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -133,7 +133,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
@@ -148,7 +148,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_CHERRYVIEW(dev_priv))
+ if (!display->platform.cherryview)
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
need_stable_symbols = true;
@@ -170,7 +170,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -186,26 +186,26 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
default:
return -EINVAL;
}
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
return 0;
}
-static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int i9xx_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
break;
case INTEL_PIPE_CRC_SOURCE_TV:
- if (!SUPPORTS_TV(dev_priv))
+ if (!SUPPORTS_TV(display))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
@@ -229,10 +229,10 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+static void vlv_undo_pipe_scramble_reset(struct intel_display *display,
enum pipe pipe)
{
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
switch (pipe) {
case PIPE_A:
@@ -249,7 +249,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -281,18 +281,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
static void
intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
- i915gm_irq_cstate_wa(dev_priv, enable);
+ if (display->platform.i945gm || display->platform.i915gm)
+ i915gm_irq_cstate_wa(display, enable);
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(&dev_priv->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state) {
ret = -ENOMEM;
goto unlock;
@@ -311,7 +311,7 @@ retry:
pipe_config->uapi.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
- if (IS_HASWELL(dev_priv) &&
+ if (display->platform.haswell &&
pipe_config->hw.active && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->uapi.mode_changed = true;
@@ -327,13 +327,13 @@ put_state:
drm_atomic_state_put(state);
unlock:
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Toggling workaround to %i returns %i\n", enable, ret);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
-static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int ivb_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -361,7 +361,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int skl_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -404,22 +404,22 @@ static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int get_new_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source, u32 *val)
{
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
return i8xx_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.ironlake || display->platform.sandybridge)
return ilk_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_pipe_crc_ctl_reg(display, pipe, source, val);
else
- return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ return skl_pipe_crc_ctl_reg(display, pipe, source, val);
}
static int
@@ -447,7 +447,7 @@ void intel_crtc_crc_init(struct intel_crtc *crtc)
spin_lock_init(&pipe_crc->lock);
}
-static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i8xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -459,7 +459,7 @@ static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i9xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -472,7 +472,7 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+static int vlv_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -487,7 +487,7 @@ static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ilk_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -501,7 +501,7 @@ static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ivb_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -515,7 +515,7 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+static int skl_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -535,21 +535,21 @@ static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
}
static int
-intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+intel_is_valid_crc_source(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
- if (DISPLAY_VER(dev_priv) == 2)
- return i8xx_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_crc_source_valid(dev_priv, source);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_crc_source_valid(dev_priv, source);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
- return ilk_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_crc_source_valid(dev_priv, source);
+ if (DISPLAY_VER(display) == 2)
+ return i8xx_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_crc_source_valid(display, source);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_crc_source_valid(display, source);
+ else if (display->platform.ironlake || display->platform.sandybridge)
+ return ilk_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_crc_source_valid(display, source);
else
- return skl_crc_source_valid(dev_priv, source);
+ return skl_crc_source_valid(display, source);
}
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -562,16 +562,16 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum intel_pipe_crc_source source;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
- intel_is_valid_crc_source(dev_priv, source) == 0) {
+ intel_is_valid_crc_source(display, source) == 0) {
*values_cnt = 5;
return 0;
}
@@ -583,7 +583,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
@@ -594,14 +593,14 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(pipe);
wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -610,17 +609,17 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
if (enable)
intel_crtc_crc_setup_workarounds(crtc, true);
- ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ ret = get_new_crc_ctl_reg(display, pipe, &source, &val);
if (ret != 0)
goto out;
pipe_crc->source = source;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
if (!source) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ if (display->platform.valleyview || display->platform.cherryview)
+ vlv_undo_pipe_scramble_reset(display, pipe);
}
pipe_crc->skipped = 0;
@@ -636,7 +635,7 @@ out:
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
u32 val = 0;
@@ -644,19 +643,20 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
if (!crtc->base.crc.opened)
return;
- if (get_new_crc_ctl_reg(dev_priv, pipe, &pipe_crc->source, &val) < 0)
+ if (get_new_crc_ctl_reg(display, pipe, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
pipe_crc->skipped = 0;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
}
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
@@ -665,7 +665,7 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
pipe_crc->skipped = INT_MIN;
spin_unlock_irq(&pipe_crc->lock);
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), 0);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), 0);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index b1675b46e06c..c00d9184c586 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -52,44 +52,57 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
return false;
}
+static enum intel_memory_type
+initial_plane_memory_type(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->platform.dgfx)
+ return INTEL_MEMORY_LOCAL;
+ else if (HAS_LMEMBAR_SMEM_STOLEN(i915))
+ return INTEL_MEMORY_STOLEN_LOCAL;
+ else
+ return INTEL_MEMORY_STOLEN_SYSTEM;
+}
+
static bool
-initial_plane_phys_lmem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
+initial_plane_phys(struct intel_display *display,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_i915_private *i915 = to_i915(display->drm);
- gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_memory_region *mem;
+ enum intel_memory_type mem_type;
+ bool is_present, is_local;
dma_addr_t dma_addr;
- gen8_pte_t pte;
u32 base;
+ mem_type = initial_plane_memory_type(display);
+ mem = intel_memory_region_by_type(i915, mem_type);
+ if (!mem) {
+ drm_dbg_kms(display->drm,
+ "Initial plane memory region (type %s) not initialized\n",
+ intel_memory_type_str(mem_type));
+ return false;
+ }
+
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
- gte += base / I915_GTT_PAGE_SIZE;
+ dma_addr = intel_ggtt_read_entry(&ggtt->vm, base, &is_present, &is_local);
- pte = ioread64(gte);
- if (!(pte & GEN12_GGTT_PTE_LM)) {
+ if (!is_present) {
drm_err(display->drm,
- "Initial plane programming missing PTE_LM bit\n");
+ "Initial plane FB PTE not present\n");
return false;
}
- dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
-
- if (IS_DGFX(i915))
- mem = i915->mm.regions[INTEL_REGION_LMEM_0];
- else
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
+ if (intel_memory_type_is_local(mem->type) != is_local) {
+ drm_err(display->drm,
+ "Initial plane FB PTE unsuitable for %s\n",
+ mem->region.name);
return false;
}
- /*
- * On lmem we don't currently expect this to
- * ever be placed in the stolen portion.
- */
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
drm_err(display->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
@@ -107,42 +120,6 @@ initial_plane_phys_lmem(struct intel_display *display,
return true;
}
-static bool
-initial_plane_phys_smem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_memory_region *mem;
- u32 base;
-
- base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
-
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
- return false;
- }
-
- /* FIXME get and validate the dma_addr from the PTE */
- plane_config->phys_base = base;
- plane_config->mem = mem;
-
- return true;
-}
-
-static bool
-initial_plane_phys(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
- return initial_plane_phys_lmem(display, plane_config);
- else
- return initial_plane_phys_smem(display, plane_config);
-}
-
static struct i915_vma *
initial_plane_vma(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 63301a01906c..1253376c7654 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -5,6 +5,8 @@
#include <linux/bitops.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
@@ -476,13 +478,34 @@ static bool intel_pmdemand_req_complete(struct intel_display *display)
XELPDP_PMDEMAND_REQ_ENABLE);
}
-static void intel_pmdemand_wait(struct intel_display *display)
+static void intel_pmdemand_poll(struct intel_display *display)
{
- if (!wait_event_timeout(display->pmdemand.waitqueue,
- intel_pmdemand_req_complete(display),
- msecs_to_jiffies_timeout(10)))
+ const unsigned int timeout_ms = 10;
+ u32 status;
+ int ret;
+
+ ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ XELPDP_PMDEMAND_REQ_ENABLE, 0,
+ 50, timeout_ms, &status);
+
+ if (ret == -ETIMEDOUT)
drm_err(display->drm,
- "timed out waiting for Punit PM Demand Response\n");
+ "timed out waiting for Punit PM Demand Response within %ums (status 0x%08x)\n",
+ timeout_ms, status);
+}
+
+static void intel_pmdemand_wait(struct intel_display *display)
+{
+ /* Wa_14024400148 For lnl use polling method */
+ if (DISPLAY_VER(display) == 20) {
+ intel_pmdemand_poll(display);
+ } else {
+ if (!wait_event_timeout(display->pmdemand.waitqueue,
+ intel_pmdemand_req_complete(display),
+ msecs_to_jiffies_timeout(10)))
+ drm_err(display->drm,
+ "timed out waiting for Punit PM Demand Response\n");
+ }
}
/* Required to be programmed during Display Init Sequences. */
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 617ce4993172..05e1e5c7e8b7 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -91,7 +91,6 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
bool pll_enabled, release_cl_override = false;
@@ -134,7 +133,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = display->platform.cherryview &&
!chv_phy_powergate_ch(display, phy, ch, true);
- if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(display))) {
+ if (vlv_force_pll_on(display, pipe, vlv_get_dpll(display))) {
drm_err(display->drm,
"Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
@@ -158,7 +157,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
intel_de_posting_read(display, intel_dp->output_reg);
if (!pll_enabled) {
- vlv_force_pll_off(dev_priv, pipe);
+ vlv_force_pll_off(display, pipe);
if (release_cl_override)
chv_phy_powergate_ch(display, phy, ch, false);
@@ -351,21 +350,19 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
static int intel_num_pps(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.valleyview || display->platform.cherryview)
return 2;
if (display->platform.geminilake || display->platform.broxton)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_DG1)
return 1;
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
return 2;
return 1;
@@ -374,11 +371,10 @@ static int intel_num_pps(struct intel_display *display)
static bool intel_pps_is_valid(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp->pps.pps_idx == 1 &&
- INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) <= PCH_ADP)
+ INTEL_PCH_TYPE(display) >= PCH_ICP &&
+ INTEL_PCH_TYPE(display) <= PCH_ADP)
return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
@@ -500,7 +496,6 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pps_idx;
memset(regs, 0, sizeof(*regs));
@@ -519,7 +514,7 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
if (display->platform.geminilake || display->platform.broxton ||
- INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ INTEL_PCH_TYPE(display) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
regs->pp_div = PP_DIVISOR(display, pps_idx);
@@ -744,11 +739,11 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return false;
+ lockdep_assert_held(&display->pps.mutex);
+
cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
intel_dp->pps.want_panel_vdd = true;
@@ -925,11 +920,11 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return;
+ lockdep_assert_held(&display->pps.mutex);
+
INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd,
"[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
@@ -1592,7 +1587,6 @@ static void pps_init_delays(struct intel_dp *intel_dp)
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp_on, pp_off, port_sel = 0;
int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
@@ -1639,7 +1633,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
* power sequencer any more. */
if (display->platform.valleyview || display->platform.cherryview) {
port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ } else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
switch (port) {
case PORT_A:
port_sel = PANEL_PORT_SELECT_DPA;
@@ -1792,9 +1786,7 @@ void intel_pps_unlock_regs_wa(struct intel_display *display)
void intel_pps_setup(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (HAS_PCH_SPLIT(i915) || display->platform.geminilake || display->platform.broxton)
+ if (HAS_PCH_SPLIT(display) || display->platform.geminilake || display->platform.broxton)
display->pps.mmio_base = PCH_PPS_BASE;
else if (display->platform.valleyview || display->platform.cherryview)
display->pps.mmio_base = VLV_PPS_BASE;
@@ -1837,7 +1829,6 @@ void intel_pps_connector_debugfs_add(struct intel_connector *connector)
void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = INVALID_PIPE;
@@ -1846,7 +1837,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
if (drm_WARN_ON(display->drm, HAS_DDI(display)))
return;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
u32 port_sel;
pp_reg = PP_CONTROL(display, 0);
@@ -1855,7 +1846,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, PCH_LVDS, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPA:
g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe);
@@ -1883,7 +1874,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
drm_WARN_ON(display->drm,
port_sel != PANEL_PORT_SELECT_LVDS);
- intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, LVDS, &panel_pipe);
}
val = intel_de_read(display, pp_reg);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 4e938bad808c..430ad4ef7146 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_vblank.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -36,7 +37,9 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_frontbuffer.h"
@@ -45,6 +48,7 @@
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
#include "intel_vblank.h"
+#include "intel_vrr.h"
#include "skl_universal_plane.h"
/**
@@ -463,8 +467,8 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
if (DISPLAY_VER(display) >= 9) {
u32 val;
- val = intel_de_rmw(dev_priv,
- PSR_EVENT(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ PSR_EVENT(display, cpu_transcoder),
0, 0);
psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
@@ -689,7 +693,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 aux_clock_divider, aux_ctl;
/* write DP_SET_POWER=D0 */
@@ -704,7 +707,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- intel_de_write(dev_priv,
+ intel_de_write(display,
psr_aux_data_reg(display, cpu_transcoder, i >> 2),
intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
@@ -794,31 +797,10 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
}
-static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- u8 val;
-
- /*
- * eDP Panel Replay uses always ALPM
- * PSR2 uses ALPM but PSR1 doesn't
- */
- if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
- !crtc_state->has_sel_update))
- return;
-
- val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
-
- if (crtc_state->has_panel_replay)
- val |= DP_ALPM_MODE_AUX_LESS;
-
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
-}
-
static void intel_psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- intel_psr_enable_sink_alpm(intel_dp, crtc_state);
+ intel_alpm_enable_sink(intel_dp, crtc_state);
crtc_state->has_panel_replay ?
_panel_replay_enable_sink(intel_dp, crtc_state) :
@@ -839,7 +821,6 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val = 0;
if (DISPLAY_VER(display) >= 11)
@@ -873,7 +854,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
* WA 0479: hsw,bdw
* "Do not skip both TP1 and TP2/TP3"
*/
- if (DISPLAY_VER(dev_priv) < 9 &&
+ if (DISPLAY_VER(display) < 9 &&
connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_100us;
@@ -906,10 +887,21 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
return idle_frames;
}
+static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ u32 current_dc_state = intel_display_power_get_current_dc_state(display);
+ struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
+
+ return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
+ current_dc_state != DC_STATE_EN_UPTO_DC6) ||
+ intel_dp->psr.active_non_psr_pipes ||
+ READ_ONCE(vblank->enabled);
+}
+
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
@@ -919,7 +911,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(display) < 20)
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (intel_dp->psr.link_standby)
@@ -935,6 +927,14 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
+
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ is_dc5_dc6_blocked(intel_dp))
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ true);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
@@ -1013,14 +1013,21 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
+ u8 idle_frames;
- val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ is_dc5_dc6_blocked(intel_dp))
+ idle_frames = 0;
+ else
+ idle_frames = psr_compute_idle_frames(intel_dp);
+ val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
- if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
val |= EDP_SU_TRACK_ENABLE;
if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
@@ -1038,7 +1045,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1103,9 +1110,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
else if (DISPLAY_VER(display) >= 12)
return cpu_transcoder == TRANSCODER_A;
@@ -1183,10 +1188,9 @@ dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = dig_port->base.port;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return pipe <= PIPE_B && port <= PORT_B;
else
return pipe == PIPE_A && port == PORT_A;
@@ -1197,7 +1201,6 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
struct i915_power_domains *power_domains = &display->power.domains;
u32 exit_scanlines;
@@ -1223,7 +1226,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
return;
/*
@@ -1264,7 +1267,6 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
@@ -1286,7 +1288,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = 4;
@@ -1412,7 +1414,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
@@ -1421,20 +1422,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
/* JSL and EHL only supports eDP 1.3 */
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ if (display->platform.jasperlake || display->platform.elkhartlake) {
drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
return false;
}
/* Wa_16011181250 */
- if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
- IS_DG2(dev_priv)) {
+ if (display->platform.rocketlake || display->platform.alderlake_s ||
+ display->platform.dg2) {
drm_dbg_kms(display->drm,
"PSR2 is defeatured for this platform\n");
return false;
}
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not completely functional in this stepping\n");
return false;
@@ -1453,7 +1454,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
+ (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
drm_dbg_kms(display->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
@@ -1486,7 +1487,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
@@ -1604,6 +1605,12 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -1634,12 +1641,6 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
if (!alpm_config_valid(intel_dp, crtc_state, true))
return false;
- if (crtc_state->crc_enabled) {
- drm_dbg_kms(display->drm,
- "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
- return false;
- }
-
return true;
}
@@ -1658,6 +1659,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc;
+ u8 active_pipes = 0;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm, "PSR disabled by flag\n");
@@ -1711,6 +1715,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm,
"PSR disabled to workaround PSR FSM hang issue\n");
}
+
+ /* Rest is for Wa_16025596647 */
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ /* Not needed by Panel Replay */
+ if (crtc_state->has_panel_replay)
+ return;
+
+ /* We ignore possible secondary PSR/Panel Replay capable eDP */
+ for_each_intel_crtc(display->drm, crtc)
+ active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
+
+ active_pipes = intel_calc_active_pipes(state, active_pipes);
+
+ crtc_state->active_non_psr_pipes = active_pipes &
+ ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1827,7 +1849,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask = 0;
@@ -1866,7 +1887,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* As a workaround leave LPSP unmasked to prevent PSR entry
* when external displays are active.
*/
- if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
+ if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
mask |= EDP_PSR_DEBUG_MASK_LPSP;
if (DISPLAY_VER(display) < 20)
@@ -1880,7 +1901,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
/* allow PSR with sprite enabled */
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
}
@@ -1903,9 +1924,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
- if (intel_dp_is_edp(intel_dp))
- intel_alpm_configure(intel_dp, crtc_state);
-
/*
* Wa_16013835468
* Wa_14015648006
@@ -1925,7 +1943,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
if (!intel_dp->psr.panel_replay_enabled &&
(IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(dev_priv)))
+ display->platform.alderlake_p))
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
@@ -1936,10 +1954,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
0,
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
+
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ !intel_dp->psr.panel_replay_enabled)
+ intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
+
+ intel_alpm_configure(intel_dp, crtc_state);
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -1995,6 +2021,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
+ intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2006,8 +2033,9 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.sel_update_enabled ? "2" : "1");
/*
- * Enabling here only for PSR. Panel Replay enable bit is already
- * written at this point. See
+ * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
+ * bit is already written at this point. Sink ALPM is enabled here for
+ * PSR and Panel Replay. See
* intel_psr_panel_replay_enable_sink. Modifiers/options:
* - Selective Update
* - Region Early Transport
@@ -2024,7 +2052,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
- intel_dp->psr.paused = false;
+ intel_dp->psr.pause_counter = 0;
/*
* Link_ok is sticky and set here on PSR enable. We can assume link
@@ -2070,6 +2098,12 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
+ if (DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ false);
+
val = intel_de_rmw(display,
psr_ctl_reg(display, cpu_transcoder),
EDP_PSR_ENABLE, 0);
@@ -2104,7 +2138,6 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
lockdep_assert_held(&intel_dp->psr.lock);
@@ -2136,7 +2169,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(display,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
@@ -2144,16 +2177,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (intel_dp_is_edp(intel_dp))
intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
- /* Panel Replay on eDP is always using ALPM aux less. */
- if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
- intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
- ALPM_CTL_ALPM_ENABLE |
- ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
-
- intel_de_rmw(display,
- PORT_ALPM_CTL(cpu_transcoder),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
- }
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
+ intel_alpm_disable(intel_dp);
/* Disable PSR on Sink */
if (!intel_dp->psr.panel_replay_enabled) {
@@ -2164,12 +2189,19 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
DP_RECEIVER_ALPM_CONFIG, 0);
}
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ !intel_dp->psr.panel_replay_enabled)
+ intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
+
intel_dp->psr.enabled = false;
intel_dp->psr.panel_replay_enabled = false;
intel_dp->psr.sel_update_enabled = false;
intel_dp->psr.psr2_sel_fetch_enabled = false;
intel_dp->psr.su_region_et_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+ intel_dp->psr.active_non_psr_pipes = 0;
}
/**
@@ -2210,7 +2242,6 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2223,12 +2254,10 @@ void intel_psr_pause(struct intel_dp *intel_dp)
return;
}
- /* If we ever hit this, we will need to add refcount to pause/resume */
- drm_WARN_ON(display->drm, psr->paused);
-
- intel_psr_exit(intel_dp);
- intel_psr_wait_exit_locked(intel_dp);
- psr->paused = true;
+ if (intel_dp->psr.pause_counter++ == 0) {
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
+ }
mutex_unlock(&psr->lock);
@@ -2244,6 +2273,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
*/
void intel_psr_resume(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2251,28 +2281,36 @@ void intel_psr_resume(struct intel_dp *intel_dp)
mutex_lock(&psr->lock);
- if (!psr->paused)
- goto unlock;
+ if (!psr->enabled)
+ goto out;
- psr->paused = false;
- intel_psr_activate(intel_dp);
+ if (!psr->pause_counter) {
+ drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
+ goto out;
+ }
-unlock:
+ if (--intel_dp->psr.pause_counter == 0)
+ intel_psr_activate(intel_dp);
+
+out:
mutex_unlock(&psr->lock);
}
/**
- * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed
+ * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
+ * notification.
* @crtc_state: CRTC status
*
* We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
* prevent it in case of Panel Replay. Panel Replay switches main link off on
* DC entry. This means vblank interrupts are not fired and is a problem if
- * user-space is polling for vblank events.
+ * user-space is polling for vblank events. Also Wa_16025596647 needs
+ * information when vblank is enabled/disabled.
*/
-bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
+bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
@@ -2283,8 +2321,15 @@ bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp_is_edp(intel_dp) &&
- CAN_PANEL_REPLAY(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
+ continue;
+
+ if (CAN_PANEL_REPLAY(intel_dp))
+ return true;
+
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ CAN_PSR(intel_dp))
return true;
}
@@ -2312,37 +2357,76 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
CURSURFLIVE(display, crtc->pipe), 0);
}
-static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
+/**
+ * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
+ * @crtc_state: the crtc state
+ *
+ * Return minimum vblank delay needed by PSR.
+ */
+int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->has_psr)
+ return 0;
+
+ /* Wa_14015401596 */
+ if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
+ return 1;
+
+ /* Rest is for SRD_STATUS needed on LunarLake and onwards */
+ if (DISPLAY_VER(display) < 20)
+ return 0;
+
+ /*
+ * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
+ *
+ * To deterministically capture the transition of the state machine
+ * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
+ * one line after the non-delayed V. Blank.
+ *
+ * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
+ * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
+ * - TRANS_VTOTAL[ Vertical Active ])
+ *
+ * SRD_STATUS is used only by PSR1 on PantherLake.
+ * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
+ */
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
+ if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
+ crtc_state->has_sel_update))
+ return 0;
+ else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
+ intel_crtc_has_type(crtc_state,
+ INTEL_OUTPUT_EDP)))
+ return 0;
+ else
+ return 1;
+}
+
+static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
+{
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
PSR2_MAN_TRK_CTL_ENABLE;
}
static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
@@ -2405,8 +2489,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
bool full_update)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = man_trk_ctl_enable_bit_get(display);
/* SF partial frame enable has to be set even on full update */
@@ -2420,7 +2502,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
@@ -2474,13 +2556,12 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
- (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
+ (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
y_alignment = vdsc_cfg->slice_height;
else
y_alignment = crtc_state->su_y_granularity;
@@ -2601,12 +2682,11 @@ static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
/* Wa_14014971492 */
if (!crtc_state->has_panel_replay &&
((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) &&
+ display->platform.alderlake_p || display->platform.tigerlake)) &&
crtc_state->splitter.enable)
crtc_state->psr2_su_area.y1 = 0;
@@ -2807,7 +2887,6 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -2839,7 +2918,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
new_crtc_state->has_sel_update != psr->sel_update_enabled ||
new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
- (DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled))
+ (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
intel_psr_disable_locked(intel_dp);
else if (new_crtc_state->wm_level_disabled)
/* Wa_14015648006 */
@@ -3322,7 +3401,7 @@ void intel_psr_flush(struct intel_display *display,
* we have to ensure that the PSR is not activated until
* intel_psr_resume() is called.
*/
- if (intel_dp->psr.paused)
+ if (intel_dp->psr.pause_counter)
goto unlock;
if (origin == ORIGIN_FLIP ||
@@ -3419,29 +3498,14 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
static void psr_alpm_check(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
- struct drm_dp_aux *aux = &intel_dp->aux;
struct intel_psr *psr = &intel_dp->psr;
- u8 val;
- int r;
if (!psr->sel_update_enabled)
return;
- r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
- if (r != 1) {
- drm_err(display->drm, "Error reading ALPM status\n");
- return;
- }
-
- if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ if (intel_alpm_get_error(intel_dp)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(display->drm,
- "ALPM lock timeout error, disabling PSR\n");
-
- /* Clearing error */
- drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
}
}
@@ -3626,6 +3690,168 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
}
}
+/* Wa_16025596647 */
+static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ bool dc5_dc6_blocked;
+
+ if (!intel_dp->psr.active)
+ return;
+
+ dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
+
+ if (intel_dp->psr.sel_update_enabled)
+ psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
+ psr_compute_idle_frames(intel_dp));
+ else
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ dc5_dc6_blocked);
+}
+
+static void psr_dc5_dc6_wa_work(struct work_struct *work)
+{
+ struct intel_display *display = container_of(work, typeof(*display),
+ psr_dc5_dc6_wa_work);
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+/**
+ * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
+ * @display: intel atomic state
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
+ * psr_dc5_dc6_wa_work used for applying/removing the workaround.
+ */
+void intel_psr_notify_dc5_dc6(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ schedule_work(&display->psr_dc5_dc6_wa_work);
+}
+
+/**
+ * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
+ * @display: intel atomic state
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
+ * psr_dc5_dc6_wa_work used for applying the workaround.
+ */
+void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
+}
+
+/**
+ * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
+ * @state: intel atomic state
+ * @crtc: intel crtc
+ * @enable: enable/disable
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
+ * remove the workaround when pipe is getting enabled/disabled
+ */
+void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, bool enable)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u8 active_non_psr_pipes;
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
+ goto unlock;
+
+ active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
+
+ if (enable)
+ active_non_psr_pipes |= BIT(crtc->pipe);
+ else
+ active_non_psr_pipes &= ~BIT(crtc->pipe);
+
+ if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
+ goto unlock;
+
+ if ((enable && intel_dp->psr.active_non_psr_pipes) ||
+ (!enable && !intel_dp->psr.active_non_psr_pipes)) {
+ intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
+ goto unlock;
+ }
+
+ intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
+
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+unlock:
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+/**
+ * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
+ * @display: intel display struct
+ * @enable: enable/disable
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
+ * remove the workaround when vblank is getting enabled/disabled
+ */
+void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
+ bool enable)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (intel_dp->psr.panel_replay_enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ break;
+ }
+
+ if (intel_dp->psr.enabled)
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ return;
+ }
+
+ /*
+ * NOTE: intel_display_power_set_target_dc_state is used
+ * only by PSR * code for DC3CO handling. DC3CO target
+ * state is currently disabled in * PSR code. If DC3CO
+ * is taken into use we need take that into account here
+ * as well.
+ */
+ intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
+ DC_STATE_EN_UPTO_DC6);
+}
+
static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
@@ -3634,8 +3860,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
const char *status = "unknown";
u32 val, status_val;
- if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
- intel_dp->psr.panel_replay_enabled)) {
+ if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
+ (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
@@ -3728,10 +3954,9 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool enabled;
u32 val, psr2_ctl;
@@ -3740,7 +3965,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
if (!(psr->sink_support || psr->sink_panel_replay_support))
return 0;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&psr->lock);
intel_psr_print_mode(intel_dp, m);
@@ -3822,7 +4047,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
unlock:
mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -3853,9 +4078,7 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct intel_display *display = data;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- intel_wakeref_t wakeref;
int ret = -ENODEV;
if (!HAS_PSR(display))
@@ -3866,12 +4089,9 @@ i915_edp_psr_debug_set(void *data, u64 val)
drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
// TODO: split to each transcoder's PSR debug state
- ret = intel_psr_debug_set(intel_dp, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ ret = intel_psr_debug_set(intel_dp, val);
}
return ret;
@@ -4004,3 +4224,13 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
+
+bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * eDP Panel Replay uses always ALPM
+ * PSR2 uses ALPM but PSR1 doesn't
+ */
+ return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
+ crtc_state->has_panel_replay);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index a43a374cff55..73c3fa40844b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -59,7 +59,13 @@ void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
-bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state);
+bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state);
+void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, bool enable);
+void intel_psr_notify_dc5_dc6(struct intel_display *display);
+void intel_psr_dc5_dc6_wa_init(struct intel_display *display);
+void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
+ bool enable);
bool intel_psr_link_ok(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
@@ -67,7 +73,9 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
+bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 6e2d9929b4d7..8a38df2c0283 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -36,9 +36,9 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -214,18 +214,17 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
struct intel_display *display = to_intel_display(&intel_sdvo->base);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bval = val, cval = val;
int i;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_de_write(display, intel_sdvo->sdvo_reg, val);
intel_de_posting_read(display, intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
intel_de_write(display, intel_sdvo->sdvo_reg, val);
intel_de_posting_read(display, intel_sdvo->sdvo_reg);
}
@@ -1360,14 +1359,13 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_display_mode *mode = &pipe_config->hw.mode;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
pipe_config->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(pipe_config))
return -EINVAL;
@@ -1527,7 +1525,6 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_encoder);
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
@@ -1634,7 +1631,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1670,13 +1667,12 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
bool intel_sdvo_port_enabled(struct intel_display *display,
i915_reg_t sdvo_reg, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
val = intel_de_read(display, sdvo_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
*pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
else if (display->platform.cherryview)
*pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
@@ -1841,7 +1837,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
@@ -1861,7 +1856,7 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -2036,7 +2031,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
struct intel_display *display = to_intel_display(&intel_sdvo->base);
u16 hotplug;
- if (!I915_HAS_HOTPLUG(display))
+ if (!HAS_HOTPLUG(display))
return 0;
/*
@@ -3367,9 +3362,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
static bool is_sdvo_port_valid(struct intel_display *display, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
return port == PORT_B;
else
return port == PORT_B || port == PORT_C;
@@ -3384,7 +3377,6 @@ static bool assert_sdvo_port_valid(struct intel_display *display, enum port port
bool intel_sdvo_init(struct intel_display *display,
i915_reg_t sdvo_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
@@ -3427,7 +3419,7 @@ bool intel_sdvo_init(struct intel_display *display,
}
intel_encoder->compute_config = intel_sdvo_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index b9acd9fe160c..2b53ac9f4935 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,6 +5,8 @@
#include <linux/math.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 1ad6c8a94b3d..fd92e6b89b43 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -36,9 +36,10 @@
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "i9xx_plane.h"
#include "intel_atomic_plane.h"
#include "intel_de.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
index 1d0b84b464c1..4981cc34da05 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
@@ -3,21 +3,21 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_sprite_uapi.h"
-static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+static bool has_dst_key_in_primary_plane(struct intel_display *display)
{
- return DISPLAY_VER(dev_priv) >= 9;
+ return DISPLAY_VER(display) >= 9;
}
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
const struct drm_intel_sprite_colorkey *set)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
*key = *set;
@@ -34,7 +34,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
* On SKL+ we want dst key enabled on
* the primary and not on the sprite.
*/
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ if (DISPLAY_VER(display) >= 9 && plane->id != PLANE_PRIMARY &&
set->flags & I915_SET_COLORKEY_DESTINATION)
key->flags = 0;
}
@@ -43,7 +43,6 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
@@ -61,7 +60,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -74,7 +73,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
* Also multiple planes can't do destination keying on the same
* pipe simultaneously.
*/
- if (DISPLAY_VER(dev_priv) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
to_intel_plane(plane)->id >= PLANE_3 &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -99,7 +98,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
* On some platforms we have to configure
* the dst colorkey on the primary plane.
*/
- if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ if (!ret && has_dst_key_in_primary_plane(display)) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display,
to_intel_plane(plane)->pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b8d14ed8a56e..c1014e74791f 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -3,8 +3,10 @@
* Copyright © 2019 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -92,11 +94,6 @@ static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
return dig_port->tc;
}
-static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
-{
- return to_i915(tc->dig_port->base.base.dev);
-}
-
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
enum tc_port_mode mode)
{
@@ -219,10 +216,11 @@ __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain doma
static void
tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
{
+ struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
- drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
+ drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
#endif
__tc_cold_unblock(tc, domain, wakeref);
}
@@ -266,13 +264,13 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc)
static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 lane_mask;
- lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
- drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
@@ -281,13 +279,13 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 pin_mask;
- pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
+ pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
- drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
@@ -297,13 +295,12 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
intel_wakeref_t wakeref;
u32 val, pin_assignment;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
pin_assignment =
REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
@@ -369,7 +366,7 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
@@ -377,10 +374,10 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
assert_tc_cold_blocked(tc);
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return lnl_tc_port_get_max_lane_count(dig_port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return mtl_tc_port_get_max_lane_count(dig_port);
return intel_tc_port_get_max_lane_count(dig_port);
@@ -389,20 +386,20 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool lane_reversal = dig_port->lane_reversal;
u32 val;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
lane_reversal && tc->mode != TC_PORT_LEGACY);
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
switch (required_lanes) {
@@ -423,16 +420,16 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
MISSING_CASE(required_lanes);
}
- intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
}
static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
u32 live_status_mask)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 valid_hpd_mask;
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
if (hweight32(live_status_mask) != 1)
return;
@@ -447,7 +444,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
return;
/* If live status mismatches the VBT flag, trust the live status. */
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
tc->port_name, live_status_mask, valid_hpd_mask);
@@ -490,21 +487,20 @@ icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
- u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
intel_wakeref_t wakeref;
u32 fia_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
- fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
- pch_isr = intel_de_read(i915, SDEISR);
+ fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ pch_isr = intel_de_read(display, SDEISR);
}
if (fia_isr == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, nothing connected\n",
tc->port_name);
return mask;
@@ -531,14 +527,14 @@ static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -550,14 +546,14 @@ static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, can't %s ownership\n",
tc->port_name, take ? "take" : "release");
@@ -568,21 +564,21 @@ static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
if (take)
val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
- intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
return true;
}
static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assume not owned\n",
tc->port_name);
return false;
@@ -619,30 +615,30 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
int max_lanes;
max_lanes = intel_tc_port_max_lane_count(dig_port);
if (tc->mode == TC_PORT_LEGACY) {
- drm_WARN_ON(&i915->drm, max_lanes != 4);
+ drm_WARN_ON(display->drm, max_lanes != 4);
return true;
}
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
/*
* Now we have to re-check the live state, in case the port recently
* became disconnected. Not necessary for legacy mode.
*/
if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+ drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
tc->port_name);
return false;
}
if (max_lanes < required_lanes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY max lanes %d < required lanes %d\n",
tc->port_name,
max_lanes, required_lanes);
@@ -655,7 +651,7 @@ static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
static bool icl_tc_phy_connect(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
tc->lock_wakeref = tc_cold_block(tc);
@@ -664,8 +660,8 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
if ((!tc_phy_is_ready(tc) ||
!icl_tc_phy_take_ownership(tc, true)) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
tc->port_name,
str_yes_no(tc_phy_is_ready(tc)));
goto out_unblock_tc_cold;
@@ -733,14 +729,13 @@ tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static void tgl_tc_phy_init(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
intel_wakeref_t wakeref;
u32 val;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
- val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
+ val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
- drm_WARN_ON(&i915->drm, val == 0xffffffff);
+ drm_WARN_ON(display->drm, val == 0xffffffff);
tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
}
@@ -775,19 +770,18 @@ adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 cpu_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
@@ -810,15 +804,15 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
u32 val;
assert_display_core_power_enabled(tc);
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -830,12 +824,12 @@ static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
assert_tc_port_power_enabled(tc);
- intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
return true;
@@ -843,13 +837,13 @@ static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
u32 val;
assert_tc_port_power_enabled(tc);
- val = intel_de_read(i915, DDI_BUF_CTL(port));
+ val = intel_de_read(display, DDI_BUF_CTL(port));
return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -872,7 +866,6 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
@@ -885,15 +878,15 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
port_wakeref = intel_display_power_get(display, port_power_domain);
if (!adlp_tc_phy_take_ownership(tc, true) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
tc->port_name);
goto out_put_port_power;
}
if (!tc_phy_is_ready(tc) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
tc->port_name);
goto out_release_phy;
}
@@ -965,19 +958,18 @@ static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 pica_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
@@ -994,22 +986,22 @@ static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
static bool
xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
+ return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
}
static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: timeout waiting for TCSS power to get %s\n",
str_enabled_disabled(enabled),
tc->port_name);
@@ -1069,7 +1061,7 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
__xelpdp_tc_phy_enable_tcss_power(tc, enable);
@@ -1082,7 +1074,7 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl
return true;
out_disable:
- if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
+ if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
return false;
if (!enable)
@@ -1096,35 +1088,35 @@ out_disable:
static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, reg);
+ val = intel_de_read(display, reg);
if (take)
val |= XELPDP_TC_PHY_OWNERSHIP;
else
val &= ~XELPDP_TC_PHY_OWNERSHIP;
- intel_de_write(i915, reg, val);
+ intel_de_write(display, reg, val);
}
static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
+ return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
}
static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
intel_wakeref_t tc_cold_wref;
enum intel_display_power_domain domain;
@@ -1134,7 +1126,7 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
if (tc->mode != TC_PORT_DISCONNECTED)
tc->lock_wakeref = tc_cold_block(tc);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
(tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
!xelpdp_tc_phy_tcss_power_is_enabled(tc));
@@ -1207,13 +1199,13 @@ tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 mask;
mask = tc->phy_ops->hpd_live_status(tc);
/* The sink can be connected only in a single mode. */
- drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
+ drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
return mask;
}
@@ -1236,9 +1228,9 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
bool phy_is_ready, bool phy_is_owned)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
- drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
+ drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
return phy_is_ready && phy_is_owned;
}
@@ -1246,8 +1238,7 @@ static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
static bool tc_phy_is_connected(struct intel_tc_port *tc,
enum icl_port_dpll_id port_pll_type)
{
- struct intel_encoder *encoder = &tc->dig_port->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(tc->dig_port);
bool phy_is_ready = tc_phy_is_ready(tc);
bool phy_is_owned = tc_phy_is_owned(tc);
bool is_connected;
@@ -1257,7 +1248,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
else
is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
tc->port_name,
str_yes_no(is_connected),
@@ -1270,10 +1261,10 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(tc_phy_is_ready(tc), 500)) {
- drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
+ drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
tc->port_name);
return false;
@@ -1343,7 +1334,7 @@ get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
static enum tc_port_mode
tc_phy_get_current_mode(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
bool phy_is_ready;
bool phy_is_owned;
@@ -1363,11 +1354,11 @@ tc_phy_get_current_mode(struct intel_tc_port *tc)
if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
} else {
- drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
+ drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
tc->port_name,
tc_port_mode_name(mode),
@@ -1407,7 +1398,7 @@ tc_phy_get_target_mode(struct intel_tc_port *tc)
static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 live_status_mask = tc_phy_hpd_live_status(tc);
bool connected;
@@ -1421,7 +1412,7 @@ static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
connected = tc->phy_ops->connect(tc, required_lanes);
}
- drm_WARN_ON(&i915->drm, !connected);
+ drm_WARN_ON(display->drm, !connected);
}
static void tc_phy_disconnect(struct intel_tc_port *tc)
@@ -1491,12 +1482,12 @@ static void __intel_tc_port_put_link(struct intel_tc_port *tc)
static bool tc_port_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
assert_tc_port_power_enabled(tc);
- return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
+ return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
DDI_BUF_CTL_ENABLE;
}
@@ -1509,15 +1500,15 @@ static bool tc_port_is_enabled(struct intel_tc_port *tc)
*/
void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool update_mode = false;
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->lock_wakeref);
- drm_WARN_ON(&i915->drm, tc->link_refcount);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->lock_wakeref);
+ drm_WARN_ON(display->drm, tc->link_refcount);
tc_phy_get_hw_state(tc);
/*
@@ -1540,8 +1531,8 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
if (!tc_port_is_enabled(tc)) {
update_mode = true;
} else if (tc->mode == TC_PORT_DISCONNECTED) {
- drm_WARN_ON(&i915->drm, !tc->legacy_port);
- drm_err(&i915->drm,
+ drm_WARN_ON(display->drm, !tc->legacy_port);
+ drm_err(display->drm,
"Port %s: PHY disconnected on enabled port, connecting it\n",
tc->port_name);
update_mode = true;
@@ -1556,28 +1547,28 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
mutex_unlock(&tc->lock);
}
-static bool tc_port_has_active_links(struct intel_tc_port *tc,
- const struct intel_crtc_state *crtc_state)
+static bool tc_port_has_active_streams(struct intel_tc_port *tc,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
- int active_links = 0;
+ int active_streams = 0;
if (dig_port->dp.is_mst) {
/* TODO: get the PLL type for MST, once HW readout is done for it. */
- active_links = intel_dp_mst_encoder_active_links(dig_port);
+ active_streams = intel_dp_mst_active_streams(&dig_port->dp);
} else if (crtc_state && crtc_state->hw.active) {
pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
- active_links = 1;
+ active_streams = 1;
}
- if (active_links && !tc_phy_is_connected(tc, pll_type))
- drm_err(&i915->drm,
- "Port %s: PHY disconnected with %d active link(s)\n",
- tc->port_name, active_links);
+ if (active_streams && !tc_phy_is_connected(tc, pll_type))
+ drm_err(display->drm,
+ "Port %s: PHY disconnected with %d active stream(s)\n",
+ tc->port_name, active_streams);
- return active_links;
+ return active_streams;
}
/**
@@ -1595,13 +1586,13 @@ static bool tc_port_has_active_links(struct intel_tc_port *tc,
void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
- if (!tc_port_has_active_links(tc, crtc_state)) {
+ drm_WARN_ON(display->drm, tc->link_refcount != 1);
+ if (!tc_port_has_active_streams(tc, crtc_state)) {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
* held (regardless of the sink's connected live state), so
@@ -1610,7 +1601,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
if (tc->init_mode != TC_PORT_TBT_ALT &&
tc->init_mode != TC_PORT_DISCONNECTED)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
tc->port_name,
tc_port_mode_name(tc->init_mode));
@@ -1618,7 +1609,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
__intel_tc_port_put_link(tc);
}
- drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
tc->port_name,
tc_port_mode_name(tc->mode));
@@ -1637,12 +1628,12 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 mask = ~0;
- drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
if (tc->mode != TC_PORT_DISCONNECTED)
mask = BIT(tc->mode);
@@ -1677,14 +1668,14 @@ static int reset_link_commit(struct intel_tc_port *tc,
struct intel_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
struct intel_crtc *crtc;
u8 pipe_mask;
int ret;
- ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
if (ret)
return ret;
@@ -1695,7 +1686,7 @@ static int reset_link_commit(struct intel_tc_port *tc,
if (!pipe_mask)
return 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
@@ -1713,13 +1704,13 @@ static int reset_link_commit(struct intel_tc_port *tc,
static int reset_link(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *_state;
struct intel_atomic_state *state;
int ret;
- _state = drm_atomic_state_alloc(&i915->drm);
+ _state = drm_atomic_state_alloc(display->drm);
if (!_state)
return -ENOMEM;
@@ -1738,21 +1729,21 @@ static void intel_tc_port_link_reset_work(struct work_struct *work)
{
struct intel_tc_port *tc =
container_of(work, struct intel_tc_port, link_reset_work.work);
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
int ret;
if (!__intel_tc_port_link_needs_reset(tc))
return;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: TypeC DP-alt sink disconnected, resetting link\n",
tc->port_name);
ret = reset_link(tc);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
}
bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
@@ -1780,7 +1771,7 @@ void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
static void __intel_tc_port_lock(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
mutex_lock(&tc->lock);
@@ -1790,9 +1781,8 @@ static void __intel_tc_port_lock(struct intel_tc_port *tc,
intel_tc_port_update_mode(tc, required_lanes,
false);
- drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
- !tc_phy_is_owned(tc));
+ drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
}
void intel_tc_port_lock(struct intel_digital_port *dig_port)
@@ -1885,12 +1875,12 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc;
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
- if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
+ if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
return -EINVAL;
tc = kzalloc(sizeof(*tc), GFP_KERNEL);
@@ -1900,11 +1890,11 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
dig_port->tc = tc;
tc->dig_port = dig_port;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
tc->phy_ops = &xelpdp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
tc->phy_ops = &adlp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
tc->phy_ops = &tgl_tc_phy_ops;
else
tc->phy_ops = &icl_tc_phy_ops;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 5dbe857ea85b..acf0b3733908 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,15 +33,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_display_irq.h"
#include "intel_display_driver.h"
+#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
@@ -1585,19 +1585,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
- i915_disable_pipestat(dev_priv, 0,
+ spin_lock_irq(&display->irq.lock);
+ i915_disable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
save_tv_dac = tv_dac = intel_de_read(display, TV_DAC);
@@ -1668,11 +1666,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, 0,
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
return type;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 7b240ce681a0..139fa5deba80 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -224,12 +224,13 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
*/
if (DISPLAY_VER(display) >= 20 || display->platform.battlemage)
return 1;
- else if (DISPLAY_VER(display) == 2)
- return -1;
- else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return 2;
- else
+ else if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
+ return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ? 2 : 1;
+ else if (DISPLAY_VER(display) >= 3)
return 1;
+ else
+ return -1;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 3ed64c17bdff..8e799e225af1 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -9,6 +9,7 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "i915_utils.h"
#include "intel_crtc.h"
@@ -259,6 +260,15 @@ static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config
return 0;
}
+static bool is_dsi_dsc_1_1(struct intel_crtc_state *crtc_state)
+{
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+
+ return vdsc_cfg->dsc_version_major == 1 &&
+ vdsc_cfg->dsc_version_minor == 1 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI);
+}
+
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(pipe_config);
@@ -317,8 +327,19 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
* From XE_LPD onwards we supports compression bpps in steps of 1
* upto uncompressed bpp-1, hence add calculations for all the rc
* parameters
+ *
+ * We don't want to calculate all rc parameters when the panel
+ * is MIPI DSI and it's using DSC 1.1. The reason being that some
+ * DSI panels vendors have hardcoded PPS params in the VBT causing
+ * the parameters sent from the source which are derived through
+ * interpolation to differ from the params the panel expects.
+ * This causes a noise in the display.
+ * Furthermore for DSI panels we are currently using bits_per_pixel
+ * (compressed bpp) hardcoded from VBT, (unlike other encoders where we
+ * find the optimum compressed bpp) so dont need to rely on interpolation,
+ * as we can get the required rc parameters from the tables.
*/
- if (DISPLAY_VER(display) >= 13) {
+ if (DISPLAY_VER(display) >= 13 && !is_dsi_dsc_1_1(pipe_config)) {
calculate_rc_params(vdsc_cfg);
} else {
if ((compressed_bpp == 8 ||
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 684b5d1bc87c..05d140c8032d 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -4,15 +4,20 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include <video/vga.h>
+
#include "soc/intel_gmch.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display.h"
#include "intel_vga.h"
+#include "intel_vga_regs.h"
static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
{
@@ -24,16 +29,42 @@ static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
return VGACNTRL;
}
+static bool has_vga_pipe_sel(struct intel_display *display)
+{
+ if (display->platform.i845g ||
+ display->platform.i865g)
+ return false;
+
+ if (display->platform.valleyview ||
+ display->platform.cherryview)
+ return true;
+
+ return DISPLAY_VER(display) < 7;
+}
+
/* Disable the VGA plane that we never use */
void intel_vga_disable(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
+ enum pipe pipe;
+ u32 tmp;
u8 sr1;
- if (intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)
+ tmp = intel_de_read(display, vga_reg);
+ if (tmp & VGA_DISP_DISABLE)
return;
+ if (display->platform.cherryview)
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK_CHV, tmp);
+ else if (has_vga_pipe_sel(display))
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK, tmp);
+ else
+ pipe = PIPE_A;
+
+ drm_dbg_kms(display->drm, "Disabling VGA plane on pipe %c\n",
+ pipe_name(pipe));
+
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(0x01, VGA_SEQ_I);
@@ -46,39 +77,6 @@ void intel_vga_disable(struct intel_display *display)
intel_de_posting_read(display, vga_reg);
}
-void intel_vga_redisable_power_on(struct intel_display *display)
-{
- i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
-
- if (!(intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)) {
- drm_dbg_kms(display->drm,
- "Something enabled VGA plane, disabling it\n");
- intel_vga_disable(display);
- }
-}
-
-void intel_vga_redisable(struct intel_display *display)
-{
- intel_wakeref_t wakeref;
-
- /*
- * This function can be called both from intel_modeset_setup_hw_state or
- * at a very early point in our resume sequence, where the power well
- * structures are not yet restored. Since this function is at a very
- * paranoid "someone might have enabled VGA while we were not looking"
- * level, just check if the power well is enabled instead of trying to
- * follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses.
- */
- wakeref = intel_display_power_get_if_enabled(display, POWER_DOMAIN_VGA);
- if (!wakeref)
- return;
-
- intel_vga_redisable_power_on(display);
-
- intel_display_power_put(display, POWER_DOMAIN_VGA, wakeref);
-}
-
void intel_vga_reset_io_mem(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
index 824dfc32a199..16d699f3b641 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.h
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -10,8 +10,6 @@ struct intel_display;
void intel_vga_reset_io_mem(struct intel_display *display);
void intel_vga_disable(struct intel_display *display);
-void intel_vga_redisable(struct intel_display *display);
-void intel_vga_redisable_power_on(struct intel_display *display);
int intel_vga_register(struct intel_display *display);
void intel_vga_unregister(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_vga_regs.h b/drivers/gpu/drm/i915/display/intel_vga_regs.h
new file mode 100644
index 000000000000..cbacced1a69f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga_regs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_VGA_REGS_H__
+#define __INTEL_VGA_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define VGACNTRL _MMIO(0x71400)
+#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
+#define CPU_VGACNTRL _MMIO(0x41000)
+#define VGA_DISP_DISABLE REG_BIT(31)
+#define VGA_2X_MODE REG_BIT(30) /* pre-ilk */
+#define VGA_PIPE_SEL_MASK REG_BIT(29) /* pre-ivb */
+#define VGA_PIPE_SEL(pipe) REG_FIELD_PREP(VGA_PIPE_SEL_MASK, (pipe))
+#define VGA_PIPE_SEL_MASK_CHV REG_GENMASK(29, 28) /* chv */
+#define VGA_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(VGA_PIPE_SEL_MASK_CHV, (pipe))
+#define VGA_BORDER_ENABLE REG_BIT(26)
+#define VGA_PIPE_CSC_ENABLE REG_BIT(24) /* ilk+ */
+#define VGA_CENTERING_ENABLE_MASK REG_GENMASK(25, 24) /* pre-ilk */
+#define VGA_PALETTE_READ_SEL REG_BIT(23) /* pre-ivb */
+#define VGA_PALETTE_A_WRITE_DISABLE REG_BIT(22) /* pre-ivb */
+#define VGA_PALETTE_B_WRITE_DISABLE REG_BIT(21) /* pre-ivb */
+#define VGA_LEGACY_8BIT_PALETTE_ENABLE REG_BIT(20)
+#define VGA_PALETTE_BYPASS REG_BIT(19)
+#define VGA_NINE_DOT_DISABLE REG_BIT(18)
+#define VGA_PALETTE_READ_SEL_HI_CHV REG_BIT(15) /* chv */
+#define VGA_PALETTE_C_WRITE_DISABLE_CHV REG_BIT(14) /* chv */
+#define VGA_ACTIVE_THROTTLING_MASK REG_GENMASK(15, 12) /* ilk+ */
+#define VGA_BLANK_THROTTLING_MASK REG_GENMASK(11, 8) /* ilk+ */
+#define VGA_BLINK_DUTY_CYCLE_MASK REG_GENMASK(7, 6)
+#define VGA_VSYNC_BLINK_RATE_MASK REG_GENMASK(5, 0)
+
+#endif /* __INTEL_VGA_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index cac49319026d..c6565baf815a 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -4,6 +4,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -32,6 +34,8 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
fallthrough;
case DRM_MODE_CONNECTOR_DisplayPort:
+ if (connector->mst.dp)
+ return false;
intel_dp = intel_attached_dp(connector);
if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
@@ -182,7 +186,8 @@ is_cmrr_frac_required(struct intel_crtc_state *crtc_state)
int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- if (!HAS_CMRR(display))
+ /* Avoid CMRR for now till we have VRR with fixed timings working */
+ if (!HAS_CMRR(display) || true)
return false;
actual_refresh_k =
@@ -222,6 +227,121 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
return vtotal;
}
+static
+void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->cmrr.enable = true;
+ /*
+ * TODO: Compute precise target refresh rate to determine
+ * if video_mode_required should be true. Currently set to
+ * false due to uncertainty about the precise target
+ * refresh Rate.
+ */
+ crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
+ crtc_state->vrr.vmin = crtc_state->vrr.vmax;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+static
+void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->vrr.enable = true;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+/*
+ * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
+ * Vtotal value.
+ */
+static
+int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal;
+
+ if (DISPLAY_VER(display) >= 13)
+ return crtc_vtotal;
+ else
+ return crtc_vtotal -
+ intel_vrr_real_vblank_delay(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_vrr_fixed_rr_vtotal(crtc_state) -
+ intel_vrr_flipline_offset(display);
+}
+
+static
+int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmin(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmax(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ intel_vrr_fixed_rr_flipline(crtc_state) - 1);
+}
+
+static
+void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * For fixed rr, vmin = vmax = flipline.
+ * vmin is already set to crtc_vtotal set vmax and flipline the same.
+ */
+ crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmin(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * To make fixed rr and vrr work seamless the guardband/pipeline full
+ * should be set such that it satisfies both the fixed and variable
+ * timings.
+ * For this set the vmin as crtc_vtotal. With this we never need to
+ * change anything to do with the guardband.
+ */
+ return crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmax(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmax;
+
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ return vmax;
+}
+
void
intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -232,14 +352,9 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct intel_dp *intel_dp = intel_attached_dp(connector);
bool is_edp = intel_dp_is_edp(intel_dp);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
- /*
- * FIXME all joined pipes share the same transcoder.
- * Need to account for that during VRR toggle/push/etc.
- */
- if (crtc_state->joiner_pipes)
+ if (!HAS_VRR(display))
return;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -247,28 +362,40 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->vrr.in_range =
intel_vrr_is_in_range(connector, drm_mode_vrefresh(adjusted_mode));
- if (!crtc_state->vrr.in_range)
- return;
-
- if (HAS_LRR(display))
- crtc_state->update_lrr = true;
- vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
- adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
- vmax = adjusted_mode->crtc_clock * 1000 /
- (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ /*
+ * Allow fixed refresh rate with VRR Timing Generator.
+ * For now set the vrr.in_range to 0, to allow fixed_rr but skip actual
+ * VRR and LRR.
+ * #TODO For actual VRR with joiner, we need to figure out how to
+ * correctly sequence transcoder level stuff vs. pipe level stuff
+ * in the commit.
+ */
+ if (crtc_state->joiner_pipes)
+ crtc_state->vrr.in_range = false;
- vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
- vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+ vmin = intel_vrr_compute_vmin(crtc_state);
- if (vmin >= vmax)
- return;
+ if (crtc_state->vrr.in_range) {
+ if (HAS_LRR(display))
+ crtc_state->update_lrr = true;
+ vmax = intel_vrr_compute_vmax(connector, adjusted_mode);
+ } else {
+ vmax = vmin;
+ }
crtc_state->vrr.vmin = vmin;
crtc_state->vrr.vmax = vmax;
crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ if (crtc_state->uapi.vrr_enabled && vmin < vmax)
+ intel_vrr_compute_vrr_timings(crtc_state);
+ else if (is_cmrr_frac_required(crtc_state) && is_edp)
+ intel_vrr_compute_cmrr_timings(crtc_state);
+ else
+ intel_vrr_compute_fixed_rr_timings(crtc_state);
+
/*
* flipline determines the min vblank length the hardware will
* generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
@@ -276,29 +403,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
*/
crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
- /*
- * When panel is VRR capable and userspace has
- * not enabled adaptive sync mode then Fixed Average
- * Vtotal mode should be enabled.
- */
- if (crtc_state->uapi.vrr_enabled) {
- crtc_state->vrr.enable = true;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- } else if (is_cmrr_frac_required(crtc_state) && is_edp) {
- crtc_state->vrr.enable = true;
- crtc_state->cmrr.enable = true;
- /*
- * TODO: Compute precise target refresh rate to determine
- * if video_mode_required should be true. Currently set to
- * false due to uncertainty about the precise target
- * refresh Rate.
- */
- crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
- crtc_state->vrr.vmin = crtc_state->vrr.vmax;
- crtc_state->vrr.flipline = crtc_state->vrr.vmin;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- }
-
if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
@@ -340,7 +444,10 @@ static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(display) >= 13)
+ if (DISPLAY_VER(display) >= 14)
+ return VRR_CTL_FLIP_LINE_EN |
+ XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
+ else if (DISPLAY_VER(display) >= 13)
return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
else
@@ -380,14 +487,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
lower_32_bits(crtc_state->cmrr.cmrr_n));
}
- intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- crtc_state->vrr.vmin - 1);
- intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- crtc_state->vrr.vmax - 1);
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(crtc_state));
- intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- crtc_state->vrr.flipline - 1);
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+
+ if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable)
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
if (HAS_AS_SDP(display))
intel_de_write(display,
@@ -461,6 +565,17 @@ bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND;
}
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
+{
+ if (!HAS_VRR(display))
+ return false;
+
+ if (DISPLAY_VER(display) >= 30)
+ return true;
+
+ return false;
+}
+
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -469,16 +584,25 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
if (!crtc_state->vrr.enable)
return;
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ crtc_state->vrr.vmin - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ crtc_state->vrr.vmax - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ crtc_state->vrr.flipline - 1);
+
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
- if (crtc_state->cmrr.enable) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
- trans_vrr_ctl(crtc_state));
- } else {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ if (crtc_state->cmrr.enable) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
+ trans_vrr_ctl(crtc_state));
+ } else {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ }
}
}
@@ -490,24 +614,77 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->vrr.enable)
return;
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
+ intel_de_wait_for_clear(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+ }
+
+ intel_vrr_set_fixed_rr_timings(old_crtc_state);
+}
+
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
+ return;
+ }
+
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_EN);
+
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(display,
- TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+}
+
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0);
+
+ intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
}
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.flipline &&
+ crtc_state->vrr.flipline == crtc_state->vrr.vmax &&
+ crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state);
+}
+
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 trans_vrr_ctl, trans_vrr_vsync;
+ bool vrr_enable;
trans_vrr_ctl = intel_de_read(display,
TRANS_VRR_CTL(display, cpu_transcoder));
- crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
if (HAS_CMRR(display))
crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE);
@@ -536,6 +713,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not filled. Since for these platforms TRAN_VMIN is always
+ * filled with crtc_vtotal, use TRAN_VRR_VMIN to get the vtotal for
+ * adjusted_mode.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->hw.adjusted_mode.crtc_vtotal =
+ intel_vrr_vmin_vtotal(crtc_state);
+
if (HAS_AS_SDP(display)) {
trans_vrr_vsync =
intel_de_read(display,
@@ -547,6 +734,18 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
}
}
+ vrr_enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->vrr.enable = vrr_enable && !intel_vrr_is_fixed_rr(crtc_state);
+ else
+ crtc_state->vrr.enable = vrr_enable;
+
+ /*
+ * #TODO: For Both VRR and CMRR the flag I915_MODE_FLAG_VRR is set for mode_flags.
+ * Since CMRR is currently disabled, set this flag for VRR for now.
+ * Need to keep this in mind while re-enabling CMRR.
+ */
if (crtc_state->vrr.enable)
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 514822577e8a..38bf9996b883 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -13,6 +13,7 @@ struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_dsb;
+struct intel_display;
bool intel_vrr_is_capable(struct intel_connector *connector);
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh);
@@ -35,5 +36,10 @@ int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index f00f4cfc58e5..bba82e888db2 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -5,15 +5,18 @@
#include <linux/debugfs.h>
-#include "i915_drv.h"
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i9xx_wm.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_wm.h"
#include "skl_watermark.h"
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
- * @i915: i915 device
+ * @display: display device
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
@@ -44,10 +47,10 @@
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_i915_private *i915)
+void intel_update_watermarks(struct intel_display *display)
{
- if (i915->display.funcs.wm->update_wm)
- i915->display.funcs.wm->update_wm(i915);
+ if (display->funcs.wm->update_wm)
+ display->funcs.wm->update_wm(display);
}
int intel_wm_compute(struct intel_atomic_state *state,
@@ -64,10 +67,10 @@ int intel_wm_compute(struct intel_atomic_state *state,
bool intel_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->initial_watermarks) {
- i915->display.funcs.wm->initial_watermarks(state, crtc);
+ if (display->funcs.wm->initial_watermarks) {
+ display->funcs.wm->initial_watermarks(state, crtc);
return true;
}
@@ -77,41 +80,41 @@ bool intel_initial_watermarks(struct intel_atomic_state *state,
void intel_atomic_update_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->atomic_update_watermarks)
- i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
+ if (display->funcs.wm->atomic_update_watermarks)
+ display->funcs.wm->atomic_update_watermarks(state, crtc);
}
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->optimize_watermarks)
- i915->display.funcs.wm->optimize_watermarks(state, crtc);
+ if (display->funcs.wm->optimize_watermarks)
+ display->funcs.wm->optimize_watermarks(state, crtc);
}
int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->compute_global_watermarks)
- return i915->display.funcs.wm->compute_global_watermarks(state);
+ if (display->funcs.wm->compute_global_watermarks)
+ return display->funcs.wm->compute_global_watermarks(state);
return 0;
}
-void intel_wm_get_hw_state(struct drm_i915_private *i915)
+void intel_wm_get_hw_state(struct intel_display *display)
{
- if (i915->display.funcs.wm->get_hw_state)
- return i915->display.funcs.wm->get_hw_state(i915);
+ if (display->funcs.wm->get_hw_state)
+ return display->funcs.wm->get_hw_state(display);
}
-void intel_wm_sanitize(struct drm_i915_private *i915)
+void intel_wm_sanitize(struct intel_display *display)
{
- if (i915->display.funcs.wm->sanitize)
- return i915->display.funcs.wm->sanitize(i915);
+ if (display->funcs.wm->sanitize)
+ return display->funcs.wm->sanitize(display);
}
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
@@ -137,16 +140,16 @@ bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
return plane_state->uapi.visible;
}
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[])
{
int level;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
if (latency == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency not provided\n",
name, level);
continue;
@@ -156,43 +159,43 @@ void intel_print_wm_latency(struct drm_i915_private *dev_priv,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
latency *= 10;
else if (level > 0)
latency *= 5;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency %u (%u.%u usec)\n", name, level,
wm[level], latency / 10, latency % 10);
}
}
-void intel_wm_init(struct drm_i915_private *i915)
+void intel_wm_init(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
- skl_wm_init(i915);
+ if (DISPLAY_VER(display) >= 9)
+ skl_wm_init(display);
else
- i9xx_wm_init(i915);
+ i9xx_wm_init(display);
}
static void wm_latency_show(struct seq_file *m, const u16 wm[8])
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
int level;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
/*
* - WM1+ latency values in 0.5us units
* - latencies are in us on gen9/vlv/chv
*/
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.valleyview ||
+ display->platform.cherryview ||
+ display->platform.g4x)
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -201,18 +204,18 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
level, wm[level], latency / 10, latency % 10);
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
wm_latency_show(m, latencies);
@@ -221,13 +224,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
wm_latency_show(m, latencies);
@@ -236,13 +239,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
wm_latency_show(m, latencies);
@@ -251,39 +254,39 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x)
return -ENODEV;
- return single_open(file, pri_wm_latency_show, dev_priv);
+ return single_open(file, pri_wm_latency_show, display);
}
static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, spr_wm_latency_show, dev_priv);
+ return single_open(file, spr_wm_latency_show, display);
}
static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, cur_wm_latency_show, dev_priv);
+ return single_open(file, cur_wm_latency_show, display);
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp, u16 wm[8])
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 new[8] = {};
int level;
int ret;
@@ -300,15 +303,15 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
&new[0], &new[1], &new[2], &new[3],
&new[4], &new[5], &new[6], &new[7]);
- if (ret != dev_priv->display.wm.num_levels)
+ if (ret != display->wm.num_levels)
return -EINVAL;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
wm[level] = new[level];
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
return len;
}
@@ -317,13 +320,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -332,13 +335,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -347,13 +350,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -385,18 +388,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
.write = cur_wm_latency_write
};
-void intel_wm_debugfs_register(struct drm_i915_private *i915)
+void intel_wm_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_pri_wm_latency_fops);
+ display, &i915_pri_wm_latency_fops);
debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_spr_wm_latency_fops);
+ display, &i915_spr_wm_latency_fops);
debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_cur_wm_latency_fops);
+ display, &i915_cur_wm_latency_fops);
- skl_watermark_debugfs_register(i915);
+ skl_watermark_debugfs_register(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
index 7d3a447054b3..9ad4e9eae5ca 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.h
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -8,13 +8,13 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
-void intel_update_watermarks(struct drm_i915_private *i915);
+void intel_update_watermarks(struct intel_display *display);
int intel_wm_compute(struct intel_atomic_state *state,
struct intel_crtc *crtc);
bool intel_initial_watermarks(struct intel_atomic_state *state,
@@ -24,13 +24,13 @@ void intel_atomic_update_watermarks(struct intel_atomic_state *state,
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_compute_global_watermarks(struct intel_atomic_state *state);
-void intel_wm_get_hw_state(struct drm_i915_private *i915);
-void intel_wm_sanitize(struct drm_i915_private *i915);
+void intel_wm_get_hw_state(struct intel_display *display);
+void intel_wm_sanitize(struct intel_display *display);
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_print_wm_latency(struct drm_i915_private *i915,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[]);
-void intel_wm_init(struct drm_i915_private *i915);
-void intel_wm_debugfs_register(struct drm_i915_private *i915);
+void intel_wm_init(struct intel_display *display);
+void intel_wm_debugfs_register(struct intel_display *display);
#endif /* __INTEL_WM_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index ee81220a7c88..c855426544cf 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -3,8 +3,10 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 70e550539bb2..c7b336359a5e 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -601,7 +601,7 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane,
* Figure out what's going on here...
*/
if (display->platform.alderlake_p &&
- intel_plane_can_async_flip(plane, fb->modifier))
+ intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return mult * 16 * 1024;
switch (fb->modifier) {
@@ -2666,6 +2666,7 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = skl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs icl_plane_funcs = {
@@ -2675,6 +2676,7 @@ static const struct drm_plane_funcs icl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = icl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs tgl_plane_funcs = {
@@ -2684,28 +2686,29 @@ static const struct drm_plane_funcs tgl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = tgl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static void
skl_plane_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_enable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&display->irq.lock);
}
static void
skl_plane_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_disable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&display->irq.lock);
}
static bool skl_plane_has_rc_ccs(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 621e97943542..8080f777910a 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -19,6 +19,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fixed.h"
@@ -34,7 +35,7 @@
*/
#define DSB_EXE_TIME 100
-static void skl_sagv_disable(struct drm_i915_private *i915);
+static void skl_sagv_disable(struct intel_display *display);
/* Stores plane specific WM parameters */
struct skl_wm_params {
@@ -69,23 +70,21 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display)
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
*/
-static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
+static bool skl_needs_memory_bw_wa(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9;
+ return DISPLAY_VER(display) == 9;
}
bool
-intel_has_sagv(struct drm_i915_private *i915)
+intel_has_sagv(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
}
static u32
-intel_sagv_block_time(struct drm_i915_private *i915)
+intel_sagv_block_time(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 14) {
u32 val;
@@ -115,10 +114,8 @@ intel_sagv_block_time(struct drm_i915_private *i915)
}
}
-static void intel_sagv_init(struct drm_i915_private *i915)
+static void intel_sagv_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (!HAS_SAGV(display))
display->sagv.status = I915_SAGV_NOT_CONTROLLED;
@@ -127,14 +124,14 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* For icl+ this was already determined by intel_bw_init_hw().
*/
if (DISPLAY_VER(display) < 11)
- skl_sagv_disable(i915);
+ skl_sagv_disable(display);
drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
- display->sagv.block_time_us = intel_sagv_block_time(i915);
+ display->sagv.block_time_us = intel_sagv_block_time(display);
drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us);
+ str_yes_no(intel_has_sagv(display)), display->sagv.block_time_us);
/* avoid overflow when adding with wm0 latency/etc. */
if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
@@ -142,7 +139,7 @@ static void intel_sagv_init(struct drm_i915_private *i915)
display->sagv.block_time_us))
display->sagv.block_time_us = 0;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
display->sagv.block_time_us = 0;
}
@@ -157,17 +154,18 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* - All planes can enable watermarks for latencies >= SAGV engine block time
* - We're not using an interlaced display configuration
*/
-static void skl_sagv_enable(struct drm_i915_private *i915)
+static void skl_sagv_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_ENABLED)
+ if (display->sagv.status == I915_SAGV_ENABLED)
return;
- drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
+ drm_dbg_kms(display->drm, "Enabling SAGV\n");
ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
@@ -177,29 +175,30 @@ static void skl_sagv_enable(struct drm_i915_private *i915)
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to enable SAGV\n");
+ drm_err(display->drm, "Failed to enable SAGV\n");
return;
}
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
}
-static void skl_sagv_disable(struct drm_i915_private *i915)
+static void skl_sagv_disable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_DISABLED)
+ if (display->sagv.status == I915_SAGV_DISABLED)
return;
- drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
+ drm_dbg_kms(display->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
@@ -209,47 +208,47 @@ static void skl_sagv_disable(struct drm_i915_private *i915)
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
+ drm_err(display->drm, "Failed to disable SAGV (%d)\n", ret);
return;
}
- i915->display.sagv.status = I915_SAGV_DISABLED;
+ display->sagv.status = I915_SAGV_DISABLED;
}
static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
if (!new_bw_state)
return;
- if (!intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_disable(i915);
+ if (!intel_can_enable_sagv(display, new_bw_state))
+ skl_sagv_disable(display);
}
static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
if (!new_bw_state)
return;
- if (intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_enable(i915);
+ if (intel_can_enable_sagv(display, new_bw_state))
+ skl_sagv_enable(display);
}
static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state =
intel_atomic_get_old_bw_state(state);
const struct intel_bw_state *new_bw_state =
@@ -267,7 +266,7 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_bw_state->base.changed);
- drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
old_mask, new_mask);
/*
@@ -276,12 +275,12 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
* time. Also masking should be done before updating the configuration
* and unmasking afterwards.
*/
- icl_pcode_restrict_qgv_points(i915, new_mask);
+ icl_pcode_restrict_qgv_points(display, new_mask);
}
static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state =
intel_atomic_get_old_bw_state(state);
const struct intel_bw_state *new_bw_state =
@@ -299,7 +298,7 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_bw_state->base.changed);
- drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
old_mask, new_mask);
/*
@@ -308,12 +307,12 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
* time. Also masking should be done before updating the configuration
* and unmasking afterwards.
*/
- icl_pcode_restrict_qgv_points(i915, new_mask);
+ icl_pcode_restrict_qgv_points(display, new_mask);
}
void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -322,10 +321,10 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_pre_plane_update(state);
else
skl_sagv_pre_plane_update(state);
@@ -333,7 +332,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
void intel_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -342,10 +341,10 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_post_plane_update(state);
else
skl_sagv_post_plane_update(state);
@@ -353,12 +352,12 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
int max_level = INT_MAX;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return false;
if (!crtc_state->hw.active)
@@ -377,7 +376,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = i915->display.wm.num_levels - 1;
+ for (level = display->wm.num_levels - 1;
!wm->wm[level].enable; --level)
{ }
@@ -423,104 +422,37 @@ static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
return true;
}
-static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (!i915->display.params.enable_sagv)
+ if (!display->params.enable_sagv)
return false;
- if (DISPLAY_VER(i915) >= 12)
+ /*
+ * SAGV is initially forced off because its current
+ * state can't be queried from pcode. Allow SAGV to
+ * be enabled upon the first real commit.
+ */
+ if (crtc_state->inherited)
+ return false;
+
+ if (DISPLAY_VER(display) >= 12)
return tgl_crtc_can_enable_sagv(crtc_state);
else
return skl_crtc_can_enable_sagv(crtc_state);
}
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
+bool intel_can_enable_sagv(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
return false;
return bw_state->pipe_sagv_reject == 0;
}
-static int intel_compute_sagv_mask(struct intel_atomic_state *state)
-{
- struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- int ret;
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case).
- *
- * drm_atomic_check_only() gets upset if we pull more crtcs
- * into the state, so we have to calculate this based on the
- * individual intel_crtc_can_enable_sagv() rather than
- * the overall intel_can_enable_sagv(). Otherwise the
- * crtcs not included in the commit would not switch to the
- * SAGV watermarks when we are about to enable SAGV, and that
- * would lead to underruns. This does mean extra power draw
- * when only a subset of the crtcs are blocking SAGV as the
- * other crtcs can't be allowed to use the more optimal
- * normal (ie. non-SAGV) watermarks.
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
- DISPLAY_VER(i915) >= 12 &&
- intel_crtc_can_enable_sagv(new_crtc_state);
-
- if (intel_crtc_can_enable_sagv(new_crtc_state))
- new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
- else
- new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
- }
-
- if (!new_bw_state)
- return 0;
-
- new_bw_state->active_pipes =
- intel_calc_active_pipes(state, old_bw_state->active_pipes);
-
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- if (intel_can_enable_sagv(i915, new_bw_state) !=
- intel_can_enable_sagv(i915, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
u16 start, u16 end)
{
@@ -530,17 +462,17 @@ static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
return end;
}
-static int intel_dbuf_slice_size(struct drm_i915_private *i915)
+static int intel_dbuf_slice_size(struct intel_display *display)
{
- return DISPLAY_INFO(i915)->dbuf.size /
- hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask);
+ return DISPLAY_INFO(display)->dbuf.size /
+ hweight8(DISPLAY_INFO(display)->dbuf.slice_mask);
}
static void
-skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
+skl_ddb_entry_for_slices(struct intel_display *display, u8 slice_mask,
struct skl_ddb_entry *ddb)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
if (!slice_mask) {
ddb->start = 0;
@@ -552,10 +484,10 @@ skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
ddb->end = fls(slice_mask) * slice_size;
WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size);
+ WARN_ON(ddb->end > DISPLAY_INFO(display)->dbuf.size);
}
-static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+static unsigned int mbus_ddb_offset(struct intel_display *display, u8 slice_mask)
{
struct skl_ddb_entry ddb;
@@ -564,15 +496,15 @@ static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask
else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
slice_mask = BIT(DBUF_S3);
- skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+ skl_ddb_entry_for_slices(display, slice_mask, &ddb);
return ddb.start;
}
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
enum dbuf_slice start_slice, end_slice;
u8 slice_mask = 0;
@@ -618,15 +550,14 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
unsigned int *weight_end,
unsigned int *weight_total)
{
- struct drm_i915_private *i915 =
- to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
enum pipe pipe;
*weight_start = 0;
*weight_end = 0;
*weight_total = 0;
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
int weight = dbuf_state->weight[pipe];
/*
@@ -652,7 +583,7 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
static int
skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
unsigned int weight_total, weight_start, weight_end;
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
@@ -674,8 +605,8 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
dbuf_slice_mask = new_dbuf_state->slices[pipe];
- skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
- mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
+ skl_ddb_entry_for_slices(display, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(display, dbuf_slice_mask);
ddb_range_size = skl_ddb_entry_size(&ddb_slices);
intel_crtc_dbuf_weights(new_dbuf_state, pipe,
@@ -709,7 +640,7 @@ out:
crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
crtc->base.base.id, crtc->base.name,
old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
@@ -734,10 +665,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
-static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
+static unsigned int skl_wm_latency(struct intel_display *display, int level,
const struct skl_wm_params *wp)
{
- unsigned int latency = i915->display.wm.skl_latency[level];
+ unsigned int latency = display->wm.skl_latency[level];
if (latency == 0)
return 0;
@@ -746,11 +677,11 @@ static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
- if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
- skl_watermark_ipc_enabled(i915))
+ if ((display->platform.kabylake || display->platform.coffeelake ||
+ display->platform.cometlake) && skl_watermark_ipc_enabled(display))
latency += 4;
- if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
+ if (skl_needs_memory_bw_wa(display) && wp && wp->x_tiled)
latency += 15;
return latency;
@@ -760,8 +691,8 @@ static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
@@ -772,10 +703,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0, 0);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- for (level = 0; level < i915->display.wm.num_levels; level++) {
- unsigned int latency = skl_wm_latency(i915, level, &wp);
+ for (level = 0; level < display->wm.num_levels; level++) {
+ unsigned int latency = skl_wm_latency(display, level, &wp);
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
@@ -797,14 +728,13 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
}
static void
-skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
+skl_ddb_get_hw_plane_state(struct intel_display *display,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_entry *ddb,
struct skl_ddb_entry *ddb_y,
u16 *min_ddb, u16 *interim_ddb)
{
- struct intel_display *display = &i915->display;
u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
@@ -837,7 +767,6 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
u16 *min_ddb, u16 *interim_ddb)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -849,7 +778,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
return;
for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(i915, pipe,
+ skl_ddb_get_hw_plane_state(display, pipe,
plane_id,
&ddb[plane_id],
&ddb_y[plane_id],
@@ -1367,16 +1296,16 @@ static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbu
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (IS_DG2(i915))
+ if (display->platform.dg2)
return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 12)
+ else if (DISPLAY_VER(display) == 12)
return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/*
* For anything else just return one slice yet.
@@ -1416,8 +1345,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
u64 data_rate = 0;
@@ -1427,7 +1356,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
data_rate += crtc_state->rel_data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->rel_data_rate_y[plane_id];
}
@@ -1489,7 +1418,7 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
-static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
+static bool skl_need_wm_copy_wa(struct intel_display *display, int level,
const struct skl_plane_wm *wm)
{
/*
@@ -1543,7 +1472,6 @@ static int
skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_dbuf_state *dbuf_state =
@@ -1585,7 +1513,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
*/
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
@@ -1596,7 +1524,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
@@ -1615,9 +1543,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
if (level < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Requested display configuration exceeds system DDB limitations");
- drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
+ drm_dbg_kms(display->drm, "minimum required %d/%d\n",
blocks, iter.size);
return -EINVAL;
}
@@ -1645,7 +1573,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
if (plane_id == PLANE_CURSOR)
continue;
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
crtc_state->rel_data_rate_y[plane_id]);
@@ -1661,7 +1589,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
*interim_ddb = wm->sagv.wm0.min_ddb_alloc;
}
}
- drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
+ drm_WARN_ON(display->drm, iter.size != 0 || iter.data_rate != 0);
/*
* When we calculated watermark values we didn't know how high
@@ -1669,7 +1597,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* all levels as "enabled." Go back now and disable the ones
* that aren't actually possible.
*/
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1678,7 +1606,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id))
skl_check_nv12_wm_level(&wm->wm[level],
&wm->uv_wm[level],
@@ -1686,7 +1614,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
else
skl_check_wm_level(&wm->wm[level], ddb);
- if (skl_need_wm_copy_wa(i915, level, wm)) {
+ if (skl_need_wm_copy_wa(display, level, wm)) {
wm->wm[level].blocks = wm->wm[level - 1].blocks;
wm->wm[level].lines = wm->wm[level - 1].lines;
wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
@@ -1708,7 +1636,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_check_wm_level(&wm->trans_wm, ddb_y);
} else {
@@ -1734,7 +1662,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
+skl_wm_method1(struct intel_display *display, u32 pixel_rate,
u8 cpp, u32 latency, u32 dbuf_block_size)
{
u32 wm_intermediate_val;
@@ -1746,7 +1674,7 @@ skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
wm_intermediate_val = latency * pixel_rate * cpp;
ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
ret = add_fixed16_u32(ret, 1);
return ret;
@@ -1772,7 +1700,7 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
@@ -1782,7 +1710,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
pixel_rate = crtc_state->pixel_rate;
- if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
+ if (drm_WARN_ON(display->drm, pixel_rate == 0))
return u32_to_fixed16(0);
crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
@@ -1798,15 +1726,13 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane, unsigned int pan_x)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 interm_pbpl;
/* only planar format has two planes */
if (color_plane == 1 &&
!intel_format_info_is_yuv_semiplanar(format, modifier)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Non planar format have single plane\n");
return -EINVAL;
}
@@ -1824,7 +1750,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->cpp = format->cpp[color_plane];
wp->plane_pixel_rate = plane_pixel_rate;
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
@@ -1849,7 +1775,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines = 4;
}
- if (skl_needs_memory_bw_wa(i915))
+ if (skl_needs_memory_bw_wa(display))
wp->y_min_scanlines *= 2;
wp->plane_bytes_per_line = wp->width * wp->cpp;
@@ -1860,7 +1786,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(display) >= 30)
interm_pbpl += (pan_x != 0);
- else if (DISPLAY_VER(i915) >= 10)
+ else if (DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
@@ -1869,7 +1795,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
wp->dbuf_block_size);
- if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
+ if (!wp->x_tiled || DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -1906,18 +1832,18 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
plane_state->uapi.src.x1);
}
-static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
+static bool skl_wm_has_lines(struct intel_display *display, int level)
{
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
return true;
/* The number of lines are ignored for the level 0 watermark. */
return level > 0;
}
-static int skl_wm_max_lines(struct drm_i915_private *i915)
+static int skl_wm_max_lines(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 255;
else
return 31;
@@ -1938,7 +1864,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
u32 blocks, lines, min_ddb_alloc = 0;
@@ -1950,7 +1876,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
return;
}
- method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
+ method1 = skl_wm_method1(display, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
crtc_state->hw.pipe_mode.crtc_htotal,
@@ -1965,7 +1891,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
selected_result = min_fixed16(method1, method2);
else
selected_result = method2;
@@ -1975,7 +1901,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
blocks = fixed16_to_u32_round_up(selected_result);
- if (DISPLAY_VER(i915) < 30)
+ if (DISPLAY_VER(display) < 30)
blocks++;
/*
@@ -1994,13 +1920,13 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* channels' impact on the level 0 memory latency and the relevant
* wm calculations.
*/
- if (skl_wm_has_lines(i915, level))
+ if (skl_wm_has_lines(display, level))
blocks = max(blocks,
fixed16_to_u32_round_up(wp->plane_blocks_per_line));
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
- if (DISPLAY_VER(i915) == 9) {
+ if (DISPLAY_VER(display) == 9) {
/* Display WA #1125: skl,bxt,kbl */
if (level == 0 && wp->rc_surface)
blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
@@ -2025,7 +1951,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (DISPLAY_VER(i915) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (wp->y_tiled) {
int extra_lines;
@@ -2042,10 +1968,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (!skl_wm_has_lines(i915, level))
+ if (!skl_wm_has_lines(display, level))
lines = 0;
- if (lines > skl_wm_max_lines(i915)) {
+ if (lines > skl_wm_max_lines(display)) {
/* reject it */
result->min_ddb_alloc = U16_MAX;
return;
@@ -2064,8 +1990,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
result->enable = true;
result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
- if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
- result->can_sagv = latency >= i915->display.sagv.block_time_us;
+ if (DISPLAY_VER(display) < 12 && display->sagv.block_time_us)
+ result->can_sagv = latency >= display->sagv.block_time_us;
}
static void
@@ -2074,13 +2000,13 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *result_prev = &levels[0];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
- unsigned int latency = skl_wm_latency(i915, level, wm_params);
+ unsigned int latency = skl_wm_latency(display, level, wm_params);
skl_compute_plane_wm(crtc_state, plane, level, latency,
wm_params, result_prev, result);
@@ -2094,21 +2020,21 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_plane_wm *plane_wm)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
struct skl_wm_level *levels = plane_wm->wm;
unsigned int latency = 0;
- if (i915->display.sagv.block_time_us)
- latency = i915->display.sagv.block_time_us +
- skl_wm_latency(i915, 0, wm_params);
+ if (display->sagv.block_time_us)
+ latency = display->sagv.block_time_us +
+ skl_wm_latency(display, 0, wm_params);
skl_compute_plane_wm(crtc_state, plane, 0, latency,
wm_params, &levels[0],
sagv_wm);
}
-static void skl_compute_transition_wm(struct drm_i915_private *i915,
+static void skl_compute_transition_wm(struct intel_display *display,
struct skl_wm_level *trans_wm,
const struct skl_wm_level *wm0,
const struct skl_wm_params *wp)
@@ -2117,23 +2043,23 @@ static void skl_compute_transition_wm(struct drm_i915_private *i915,
u16 wm0_blocks, trans_offset, blocks;
/* Transition WM don't make any sense if ipc is disabled */
- if (!skl_watermark_ipc_enabled(i915))
+ if (!skl_watermark_ipc_enabled(display))
return;
/*
* WaDisableTWM:skl,kbl,cfl,bxt
* Transition WM are not recommended by HW team for GEN9
*/
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
trans_min = 4;
else
trans_min = 14;
/* Display WA #1140: glk,cnl */
- if (DISPLAY_VER(i915) == 10)
+ if (DISPLAY_VER(display) == 10)
trans_amount = 0;
else
trans_amount = 10; /* This is configurable amount */
@@ -2175,8 +2101,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct intel_plane *plane, int color_plane)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
struct skl_wm_params wm_params;
int ret;
@@ -2188,13 +2113,13 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
- skl_compute_transition_wm(i915, &wm->trans_wm,
+ skl_compute_transition_wm(display, &wm->trans_wm,
&wm->wm[0], &wm_params);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
- skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
+ skl_compute_transition_wm(display, &wm->sagv.trans_wm,
&wm->sagv.wm0, &wm_params);
}
@@ -2254,8 +2179,8 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
int ret;
@@ -2269,9 +2194,9 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!intel_wm_plane_visible(crtc_state, plane_state));
- drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
+ drm_WARN_ON(display->drm, !fb->format->is_yuv ||
fb->format->num_planes == 1);
ret = skl_build_plane_wm_single(crtc_state, plane_state,
@@ -2411,15 +2336,14 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
int wm0_lines)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
int level;
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
int latency;
/* FIXME should we care about the latency w/a's? */
- latency = skl_wm_latency(i915, level, NULL);
+ latency = skl_wm_latency(display, level, NULL);
if (latency == 0)
continue;
@@ -2436,8 +2360,8 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int wm0_lines, level;
if (!crtc_state->hw.active)
@@ -2453,9 +2377,9 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
* PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
* based on whether we're limited by the vblank duration.
*/
- crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1;
+ crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2471,10 +2395,10 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
}
}
- if (DISPLAY_VER(i915) >= 12 &&
- i915->display.sagv.block_time_us &&
+ if (DISPLAY_VER(display) >= 12 &&
+ display->sagv.block_time_us &&
skl_is_vblank_too_short(crtc_state, wm0_lines,
- i915->display.sagv.block_time_us)) {
+ display->sagv.block_time_us)) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2492,7 +2416,7 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
static int skl_build_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *plane_state;
@@ -2508,7 +2432,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
if (plane->pipe != crtc->pipe)
continue;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state);
else
ret = skl_build_plane_wm(crtc_state, plane_state);
@@ -2531,11 +2455,10 @@ static bool skl_wm_level_equals(const struct skl_wm_level *l1,
l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
}
-static bool skl_plane_wm_equals(struct drm_i915_private *i915,
+static bool skl_plane_wm_equals(struct intel_display *display,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
- struct intel_display *display = &i915->display;
int level;
for (level = 0; level < display->wm.num_levels; level++) {
@@ -2590,14 +2513,14 @@ static int
skl_ddb_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2608,7 +2531,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -2627,7 +2550,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
u8 enabled_slices;
enum pipe pipe;
@@ -2637,7 +2560,7 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
*/
enabled_slices = BIT(DBUF_S1);
- for_each_pipe(i915, pipe)
+ for_each_pipe(display, pipe)
enabled_slices |= dbuf_state->slices[pipe];
return enabled_slices;
@@ -2647,7 +2570,6 @@ static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
struct intel_dbuf_state *new_dbuf_state = NULL;
struct intel_crtc_state *new_crtc_state;
@@ -2686,7 +2608,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
}
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] =
@@ -2709,11 +2631,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (ret)
return ret;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- DISPLAY_INFO(i915)->dbuf.slice_mask,
+ DISPLAY_INFO(display)->dbuf.slice_mask,
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus));
}
@@ -2731,7 +2653,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
ret = skl_crtc_allocate_ddb(state, crtc);
if (ret)
return ret;
@@ -2758,7 +2680,7 @@ static char enast(bool enable)
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
struct intel_plane *plane;
@@ -2775,7 +2697,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
old_pipe_wm = &old_crtc_state->wm.skl.optimal;
new_pipe_wm = &new_crtc_state->wm.skl.optimal;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
@@ -2785,24 +2707,24 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name,
old->start, old->end, new->start, new->end,
skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_plane_wm *old_wm, *new_wm;
old_wm = &old_pipe_wm->planes[plane_id];
new_wm = &new_pipe_wm->planes[plane_id];
- if (skl_plane_wm_equals(i915, old_wm, new_wm))
+ if (skl_plane_wm_equals(display, old_wm, new_wm))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
" -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
plane->base.base.id, plane->base.name,
@@ -2821,7 +2743,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->sagv.wm0.enable),
enast(new_wm->sagv.trans_wm.enable));
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
" -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
plane->base.base.id, plane->base.name,
@@ -2848,7 +2770,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
@@ -2867,7 +2789,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
new_wm->sagv.wm0.blocks,
new_wm->sagv.trans_wm.blocks);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
@@ -2945,14 +2867,14 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2971,7 +2893,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -3002,7 +2924,6 @@ void
intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
struct intel_crtc_state *new_crtc_state;
u32 latency = LNL_PKG_C_LATENCY_MASK;
@@ -3028,7 +2949,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state)
added_wake_time = DSB_EXE_TIME +
display->sagv.block_time_us;
- latency = skl_watermark_max_latency(i915, 1);
+ latency = skl_watermark_max_latency(display, 1);
/* Wa_22020432604 */
if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
@@ -3055,6 +2976,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state)
static int
skl_compute_wm(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
@@ -3069,16 +2991,35 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = intel_compute_sagv_mask(state);
- if (ret)
- return ret;
-
/*
* skl_compute_ddb() will have adjusted the final watermarks
* based on how much ddb is available. Now we can actually
* check if the final watermarks changed.
*/
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case).
+ *
+ * drm_atomic_check_only() gets upset if we pull more crtcs
+ * into the state, so we have to calculate this based on the
+ * individual intel_crtc_can_enable_sagv() rather than
+ * the overall intel_can_enable_sagv(). Otherwise the
+ * crtcs not included in the commit would not switch to the
+ * SAGV watermarks when we are about to enable SAGV, and that
+ * would lead to underruns. This does mean extra power draw
+ * when only a subset of the crtcs are blocking SAGV as the
+ * other crtcs can't be allowed to use the more optimal
+ * normal (ie. non-SAGV) watermarks.
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
+ DISPLAY_VER(display) >= 12 &&
+ intel_crtc_can_enable_sagv(new_crtc_state);
+
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -3149,11 +3090,10 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
}
}
-static void skl_wm_get_hw_state(struct drm_i915_private *i915)
+static void skl_wm_get_hw_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct intel_crtc *crtc;
if (HAS_MBUS_JOINING(display))
@@ -3193,7 +3133,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
if (!crtc_state->hw.active)
continue;
- skl_ddb_get_hw_plane_state(i915, crtc->pipe,
+ skl_ddb_get_hw_plane_state(display, crtc->pipe,
plane_id, ddb, ddb_y,
min_ddb, interim_ddb);
@@ -3209,13 +3149,13 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
*/
slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
dbuf_state->joined_mbus);
- mbus_offset = mbus_ddb_offset(i915, slices);
+ mbus_offset = mbus_ddb_offset(display, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
/* The slices actually used by the planes on the pipe */
dbuf_state->slices[pipe] =
- skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
+ skl_ddb_dbuf_slice_mask(display, &crtc_state->wm.skl.ddb);
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
@@ -3228,49 +3168,52 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->enabled_slices = display->dbuf.enabled_slices;
}
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
+bool skl_watermark_ipc_enabled(struct intel_display *display)
{
- return i915->display.wm.ipc_enabled;
+ return display->wm.ipc_enabled;
}
-void skl_watermark_ipc_update(struct drm_i915_private *i915)
+void skl_watermark_ipc_update(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
- skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(display) ? DISP_IPC_ENABLE : 0);
}
-static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
+static bool skl_watermark_ipc_can_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
/* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(i915))
+ if (display->platform.skylake)
return false;
/* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915))
+ if (display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake)
return i915->dram_info.symmetric_memory;
return true;
}
-void skl_watermark_ipc_init(struct drm_i915_private *i915)
+void skl_watermark_ipc_init(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
+ display->wm.ipc_enabled = skl_watermark_ipc_can_enable(display);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
}
static void
-adjust_wm_latency(struct drm_i915_private *i915,
+adjust_wm_latency(struct intel_display *display,
u16 wm[], int num_levels, int read_latency)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
int i, level;
@@ -3311,31 +3254,32 @@ adjust_wm_latency(struct drm_i915_private *i915,
wm[0] += 1;
}
-static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
+ int num_levels = display->wm.num_levels;
u32 val;
- val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
+ val = intel_de_read(display, MTL_LATENCY_LP0_LP1);
wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
+ val = intel_de_read(display, MTL_LATENCY_LP2_LP3);
wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
+ val = intel_de_read(display, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- adjust_wm_latency(i915, wm, num_levels, 6);
+ adjust_wm_latency(display, wm, num_levels, 6);
}
-static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
- int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
- int mult = IS_DG2(i915) ? 2 : 1;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ int num_levels = display->wm.num_levels;
+ int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
+ int mult = display->platform.dg2 ? 2 : 1;
u32 val;
int ret;
@@ -3343,7 +3287,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
val = 0; /* data0 to be programmed to 0 for first set */
ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3356,7 +3300,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
val = 1; /* data0 to be programmed to 1 for second set */
ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3365,24 +3309,22 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
- adjust_wm_latency(i915, wm, num_levels, read_latency);
+ adjust_wm_latency(display, wm, num_levels, read_latency);
}
-static void skl_setup_wm_latency(struct drm_i915_private *i915)
+static void skl_setup_wm_latency(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (HAS_HW_SAGV_WM(display))
display->wm.num_levels = 6;
else
display->wm.num_levels = 8;
if (DISPLAY_VER(display) >= 14)
- mtl_read_wm_latency(i915, display->wm.skl_latency);
+ mtl_read_wm_latency(display, display->wm.skl_latency);
else
- skl_read_wm_latency(i915, display->wm.skl_latency);
+ skl_read_wm_latency(display, display->wm.skl_latency);
- intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency);
+ intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3410,19 +3352,18 @@ static const struct intel_global_state_funcs intel_dbuf_funcs = {
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *dbuf_state;
- dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
+ dbuf_state = intel_atomic_get_global_obj_state(state, &display->dbuf.obj);
if (IS_ERR(dbuf_state))
return ERR_CAST(dbuf_state);
return to_intel_dbuf_state(dbuf_state);
}
-int intel_dbuf_init(struct drm_i915_private *i915)
+int intel_dbuf_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state;
dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
@@ -3457,34 +3398,34 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 val = 0;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= MBUS_DBOX_I_CREDIT(2);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
}
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
/* Wa_22010947358:adl-p */
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
else
val |= MBUS_DBOX_A_CREDIT(2);
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
val |= MBUS_DBOX_B_CREDIT(0xA);
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(8);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(12);
} else {
@@ -3492,7 +3433,7 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
val |= MBUS_DBOX_B_CREDIT(8);
}
- if (DISPLAY_VERx100(i915) == 1400) {
+ if (DISPLAY_VERx100(display) == 1400) {
if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
val |= MBUS_DBOX_BW_8CREDITS_MTL;
else
@@ -3502,22 +3443,22 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
return val;
}
-static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915,
+static void pipe_mbus_dbox_ctl_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_crtc *crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes)
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe),
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, dbuf_state->active_pipes)
+ intel_de_write(display, PIPE_MBUS_DBOX_CTL(crtc->pipe),
pipe_mbus_dbox_ctl(crtc, dbuf_state));
}
static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return;
new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
@@ -3527,7 +3468,7 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
return;
- pipe_mbus_dbox_ctl_update(i915, new_dbuf_state);
+ pipe_mbus_dbox_ctl_update(display, new_dbuf_state);
}
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -3544,10 +3485,9 @@ int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
return intel_atomic_lock_global_state(&dbuf_state->base);
}
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus)
{
- struct intel_display *display = &i915->display;
enum dbuf_slice slice;
if (!HAS_MBUS_JOINING(display))
@@ -3571,7 +3511,7 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
@@ -3586,7 +3526,7 @@ static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state
mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
}
- intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
+ intel_dbuf_mdclk_cdclk_ratio_update(display, mdclk_cdclk_ratio,
new_dbuf_state->joined_mbus);
}
@@ -3594,13 +3534,12 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
- drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
+ drm_WARN_ON(display->drm, !dbuf_state->joined_mbus);
+ drm_WARN_ON(display->drm, !is_power_of_2(dbuf_state->active_pipes));
crtc = intel_crtc_for_pipe(display, pipe);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -3611,7 +3550,7 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
return INVALID_PIPE;
}
-static void mbus_ctl_join_update(struct drm_i915_private *i915,
+static void mbus_ctl_join_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state,
enum pipe pipe)
{
@@ -3627,7 +3566,7 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
else
mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
- intel_de_rmw(i915, MBUS_CTL,
+ intel_de_rmw(display, MBUS_CTL,
MBUS_HASHING_MODE_MASK | MBUS_JOIN |
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
}
@@ -3635,18 +3574,18 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
intel_atomic_get_new_dbuf_state(state);
- drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ drm_dbg_kms(display->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus),
pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
- mbus_ctl_join_update(i915, new_dbuf_state, pipe);
+ mbus_ctl_join_update(display, new_dbuf_state, pipe);
}
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
@@ -3751,9 +3690,8 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(display, new_slices);
}
-static void skl_mbus_sanitize(struct drm_i915_private *i915)
+static void skl_mbus_sanitize(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(display->dbuf.obj.state);
@@ -3768,28 +3706,28 @@ static void skl_mbus_sanitize(struct drm_i915_private *i915)
dbuf_state->active_pipes);
dbuf_state->joined_mbus = false;
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
dbuf_state->mdclk_cdclk_ratio,
dbuf_state->joined_mbus);
- pipe_mbus_dbox_ctl_update(i915, dbuf_state);
- mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE);
+ pipe_mbus_dbox_ctl_update(display, dbuf_state);
+ mbus_ctl_join_update(display, dbuf_state, INVALID_PIPE);
}
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+static bool skl_dbuf_is_misconfigured(struct intel_display *display)
{
const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
entries[crtc->pipe] = crtc_state->wm.skl.ddb;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
u8 slices;
@@ -3807,7 +3745,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
return false;
}
-static void skl_dbuf_sanitize(struct drm_i915_private *i915)
+static void skl_dbuf_sanitize(struct intel_display *display)
{
struct intel_crtc *crtc;
@@ -3822,12 +3760,12 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
* all the planes so that skl_commit_modeset_enables() can
* simply ignore them.
*/
- if (!skl_dbuf_is_misconfigured(i915))
+ if (!skl_dbuf_is_misconfigured(display))
return;
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+ drm_dbg_kms(display->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -3837,16 +3775,16 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
if (plane_state->uapi.visible)
intel_plane_disable_noatomic(crtc, plane);
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+ drm_WARN_ON(display->drm, crtc_state->active_planes != 0);
memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
}
}
-static void skl_wm_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct intel_display *display)
{
- skl_mbus_sanitize(i915);
- skl_dbuf_sanitize(i915);
+ skl_mbus_sanitize(display);
+ skl_dbuf_sanitize(display);
}
void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3897,7 +3835,6 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct skl_hw_state {
@@ -3912,7 +3849,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
u8 hw_enabled_slices;
int level;
- if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ if (DISPLAY_VER(display) < 9 || !new_crtc_state->hw.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -3925,26 +3862,26 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
hw_enabled_slices = intel_enabled_dbuf_slices_mask(display);
- if (DISPLAY_VER(i915) >= 11 &&
- hw_enabled_slices != i915->display.dbuf.enabled_slices)
- drm_err(&i915->drm,
+ if (DISPLAY_VER(display) >= 11 &&
+ hw_enabled_slices != display->dbuf.enabled_slices)
+ drm_err(display->drm,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- i915->display.dbuf.enabled_slices,
+ display->dbuf.enabled_slices,
hw_enabled_slices);
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
hw_wm_level = &hw->wm.planes[plane->id].wm[level];
sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
continue;
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name, level,
sw_wm_level->enable,
@@ -3959,7 +3896,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3975,7 +3912,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3991,7 +3928,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -4007,7 +3944,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
plane->base.base.id, plane->base.name,
sw_ddb_entry->start, sw_ddb_entry->end,
@@ -4024,29 +3961,29 @@ static const struct intel_wm_funcs skl_wm_funcs = {
.sanitize = skl_wm_sanitize,
};
-void skl_wm_init(struct drm_i915_private *i915)
+void skl_wm_init(struct intel_display *display)
{
- intel_sagv_init(i915);
+ intel_sagv_init(display);
- skl_setup_wm_latency(i915);
+ skl_setup_wm_latency(display);
- i915->display.funcs.wm = &skl_wm_funcs;
+ display->funcs.wm = &skl_wm_funcs;
}
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Isochronous Priority Control: %s\n",
- str_yes_no(skl_watermark_ipc_enabled(i915)));
+ str_yes_no(skl_watermark_ipc_enabled(display)));
return 0;
}
static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *i915 = inode->i_private;
+ struct intel_display *display = inode->i_private;
- return single_open(file, skl_watermark_ipc_status_show, i915);
+ return single_open(file, skl_watermark_ipc_status_show, display);
}
static ssize_t skl_watermark_ipc_status_write(struct file *file,
@@ -4054,8 +3991,7 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *i915 = m->private;
- intel_wakeref_t wakeref;
+ struct intel_display *display = m->private;
bool enable;
int ret;
@@ -4063,12 +3999,12 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
if (ret < 0)
return ret;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- if (!skl_watermark_ipc_enabled(i915) && enable)
- drm_info(&i915->drm,
+ with_intel_display_rpm(display) {
+ if (!skl_watermark_ipc_enabled(display) && enable)
+ drm_info(display->drm,
"Enabling IPC: WM will be proper only after next commit\n");
- i915->display.wm.ipc_enabled = enable;
- skl_watermark_ipc_update(i915);
+ display->wm.ipc_enabled = enable;
+ skl_watermark_ipc_update(display);
}
return len;
@@ -4085,7 +4021,7 @@ static const struct file_operations skl_watermark_ipc_status_fops = {
static int intel_sagv_status_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
static const char * const sagv_status[] = {
[I915_SAGV_UNKNOWN] = "unknown",
[I915_SAGV_DISABLED] = "disabled",
@@ -4093,37 +4029,36 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused)
[I915_SAGV_NOT_CONTROLLED] = "not controlled",
};
- seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
+ seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(display)));
seq_printf(m, "SAGV modparam: %s\n",
- str_enabled_disabled(i915->display.params.enable_sagv));
- seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
- seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
+ str_enabled_disabled(display->params.enable_sagv));
+ seq_printf(m, "SAGV status: %s\n", sagv_status[display->sagv.status]);
+ seq_printf(m, "SAGV block time: %d usec\n", display->sagv.block_time_us);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915)
+void skl_watermark_debugfs_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct drm_minor *minor = display->drm->primary;
if (HAS_IPC(display))
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
- &skl_watermark_ipc_status_fops);
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root,
+ display, &skl_watermark_ipc_status_fops);
if (HAS_SAGV(display))
- debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
- &intel_sagv_status_fops);
+ debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root,
+ display, &intel_sagv_status_fops);
}
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
+unsigned int skl_watermark_max_latency(struct intel_display *display, int initial_wm_level)
{
int level;
- for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
- unsigned int latency = skl_wm_latency(i915, level, NULL);
+ for (level = display->wm.num_levels - 1; level >= initial_wm_level; level--) {
+ unsigned int latency = skl_wm_latency(display, level, NULL);
if (latency)
return latency;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index d9cff6c54310..95b0b599d5c3 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -12,7 +12,6 @@
#include "intel_global_state.h"
#include "intel_wm_types.h"
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_bw_state;
struct intel_crtc;
@@ -27,11 +26,12 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state);
+bool intel_can_enable_sagv(struct intel_display *display,
const struct intel_bw_state *bw_state);
-bool intel_has_sagv(struct drm_i915_private *i915);
+bool intel_has_sagv(struct intel_display *display);
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
@@ -45,14 +45,14 @@ void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc);
void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane);
-void skl_watermark_ipc_init(struct drm_i915_private *i915);
-void skl_watermark_ipc_update(struct drm_i915_private *i915);
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915);
+void skl_watermark_ipc_init(struct intel_display *display);
+void skl_watermark_ipc_update(struct intel_display *display);
+bool skl_watermark_ipc_enabled(struct intel_display *display);
+void skl_watermark_debugfs_register(struct intel_display *display);
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915,
+unsigned int skl_watermark_max_latency(struct intel_display *display,
int initial_wm_level);
-void skl_wm_init(struct drm_i915_private *i915);
+void skl_wm_init(struct intel_display *display);
const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
@@ -86,13 +86,13 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
#define intel_atomic_get_new_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
-int intel_dbuf_init(struct drm_i915_private *i915);
+int intel_dbuf_init(struct intel_display *display);
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
int ratio);
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus);
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index af717df83197..346737f15fa9 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -251,8 +251,10 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
return 0;
}
-static void band_gap_reset(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
vlv_flisdsi_get(dev_priv);
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
@@ -269,13 +271,13 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -298,7 +300,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
else
pipe_config->pipe_bpp = 18;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/* Enable Frame time stamp based scanline reporting */
pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
@@ -468,7 +470,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
vlv_flisdsi_put(dev_priv);
/* bandgap reset is needed after everytime we do power gate */
- band_gap_reset(dev_priv);
+ band_gap_reset(display);
for_each_dsi_port(port, intel_dsi->ports) {
@@ -495,11 +497,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_device_ready(encoder);
else
vlv_dsi_device_ready(encoder);
@@ -559,23 +561,22 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
glk_dsi_disable_mipi_io(encoder);
}
-static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
+static i915_reg_t port_ctrl_reg(struct intel_display *display, enum port port)
{
- return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
+ return display->platform.geminilake || display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port);
}
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A);
intel_de_write(display, MIPI_DEVICE_READY(display, port),
@@ -594,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
- if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
+ if ((display->platform.broxton || port == PORT_A) &&
intel_de_wait_for_clear(display, port_ctrl,
AFE_LATCHOUT, 30))
drm_err(display->drm, "DSI LP not going Low\n");
@@ -612,7 +613,6 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -620,7 +620,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp = intel_dsi->pixel_overlap;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIXEL_OVERLAP_CNT_MASK,
@@ -633,7 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
u32 temp;
temp = intel_de_read(display, port_ctrl);
@@ -644,7 +644,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
- if (IS_BROXTON(dev_priv))
+ if (display->platform.broxton)
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
else
temp |= crtc->pipe ?
@@ -664,12 +664,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
/* de-assert ip_tg_enable signal */
intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0);
@@ -730,7 +729,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
bool glk_cold_boot = false;
@@ -745,7 +743,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
bxt_dsi_pll_enable(encoder, pipe_config);
} else {
@@ -753,7 +751,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
vlv_dsi_pll_enable(encoder, pipe_config);
}
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Add MIPI IO reset programming for modeset */
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
@@ -762,13 +760,13 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
- if (!IS_GEMINILAKE(dev_priv))
+ if (!display->platform.geminilake)
intel_dsi_prepare(encoder, pipe_config);
/* Give the panel time to power-on and then deassert its reset */
@@ -776,7 +774,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
msleep(intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
glk_cold_boot = glk_dsi_enable_io(encoder);
/* Prepare port in cold boot(s3/s4) scenario */
@@ -788,7 +786,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_device_ready(encoder);
/* Prepare port in normal boot scenario */
- if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot)
+ if (display->platform.geminilake && !glk_cold_boot)
intel_dsi_prepare(encoder, pipe_config);
/* Send initialization commands in LP mode */
@@ -836,11 +834,11 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
@@ -860,9 +858,9 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_clear_device_ready(encoder);
else
vlv_dsi_clear_device_ready(encoder);
@@ -874,13 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
intel_crtc_vblank_off(old_crtc_state);
skl_scaler_disable(old_crtc_state);
@@ -907,7 +904,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
/* Transition to LP-00 */
intel_dsi_clear_device_ready(encoder);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Power down DSI regulator to save power */
intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL,
@@ -917,12 +914,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
@@ -939,7 +936,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
@@ -957,13 +953,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- !bxt_dsi_pll_is_enabled(dev_priv))
+ if ((display->platform.geminilake || display->platform.broxton) &&
+ !bxt_dsi_pll_is_enabled(display))
goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE;
/*
@@ -971,10 +967,10 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
port == PORT_C)
enabled = intel_de_read(display,
- TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE;
+ TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
@@ -989,7 +985,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY))
continue;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
@@ -1177,15 +1173,15 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 pclk;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = bxt_dsi_get_pclk(encoder, pipe_config);
} else {
@@ -1218,7 +1214,6 @@ static void set_dsi_timings(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1253,7 +1248,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1307,7 +1302,6 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1327,7 +1321,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@@ -1342,7 +1336,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
intel_de_write(display, MIPI_CTRL(display, port),
tmp | READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
enum pipe pipe = crtc->pipe;
intel_de_rmw(display, MIPI_CTRL(display, port),
@@ -1377,7 +1371,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
tmp |= BXT_DPHY_DEFEATURE_EN;
if (!is_cmd_mode(intel_dsi))
tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
@@ -1424,7 +1418,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_INIT_COUNT(display, port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
!intel_dsi->dual_link) {
/*
* BXT spec says write MIPI_INIT_COUNT for
@@ -1461,7 +1455,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_LP_BYTECLK(display, port),
intel_dsi->lp_byte_clk);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port),
intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
@@ -1513,18 +1507,17 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return;
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
@@ -1596,8 +1589,8 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct intel_connector *connector = intel_dsi->attached_connector;
+ struct intel_display *display = to_intel_display(connector);
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
u32 ui_num, ui_den;
@@ -1645,7 +1638,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* For GEMINILAKE dphy_param_reg will be programmed in terms of
* HS byte clock count for other platform in HS ddr clock count
*/
- mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
+ mul = display->platform.geminilake ? 8 : 2;
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
@@ -1653,7 +1646,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+ drm_dbg_kms(display->drm, "prepare count too high %u\n",
prepare_cnt);
prepare_cnt = PREPARE_CNT_MAX;
}
@@ -1674,7 +1667,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
exit_zero_cnt += 1;
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+ drm_dbg_kms(display->drm, "exit zero count too high %u\n",
exit_zero_cnt);
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
}
@@ -1685,7 +1678,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* ui_den, ui_num * mul);
if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+ drm_dbg_kms(display->drm, "clock zero count too high %u\n",
clk_zero_cnt);
clk_zero_cnt = CLK_ZERO_CNT_MAX;
}
@@ -1695,7 +1688,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (trail_cnt > TRAIL_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+ drm_dbg_kms(display->drm, "trail count too high %u\n",
trail_cnt);
trail_cnt = TRAIL_CNT_MAX;
}
@@ -1761,7 +1754,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1770,7 +1763,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
*/
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return 320000;
/*
@@ -1778,7 +1771,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* picture gets unstable, despite that values are
* correct for DSI PLL and DE PLL.
*/
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return 158400;
return 0;
@@ -1903,9 +1896,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
{ }
};
-void vlv_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -1914,16 +1906,16 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
enum port port;
enum pipe pipe;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
if (!intel_bios_is_dsi_present(display, &port))
return;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE;
+ if (display->platform.geminilake || display->platform.broxton)
+ display->dsi.mmio_base = BXT_MIPI_BASE;
else
- dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE;
+ display->dsi.mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
@@ -1938,12 +1930,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
encoder = &intel_dsi->base;
intel_dsi->attached_connector = connector;
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_dsi_funcs,
DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
encoder->compute_config = intel_dsi_compute_config;
encoder->pre_enable = intel_dsi_pre_enable;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->enable = bxt_dsi_enable;
encoder->disable = intel_dsi_disable;
encoder->post_disable = intel_dsi_post_disable;
@@ -1963,7 +1955,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->pipe_mask = ~0;
else if (port == PORT_A)
encoder->pipe_mask = BIT(PIPE_A);
@@ -1979,10 +1971,10 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
@@ -1998,18 +1990,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- drm_dbg_kms(&dev_priv->drm, "no device found\n");
+ drm_dbg_kms(display->drm, "no device found\n");
goto err;
}
/* Use clock read-back from current hw-state for fastboot */
current_mode = intel_encoder_current_mode(encoder);
if (current_mode) {
- drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+ drm_dbg_kms(display->drm, "Calculated pclk %d GOP %d\n",
intel_dsi->pclk, current_mode->clock);
if (intel_fuzzy_clock_check(intel_dsi->pclk,
current_mode->clock)) {
- drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
+ drm_dbg_kms(display->drm, "Using GOP pclk\n");
intel_dsi->pclk = current_mode->clock;
}
@@ -2021,7 +2013,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi_vbt_gpio_init(intel_dsi,
intel_dsi_get_hw_state(encoder, &pipe));
- drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs,
+ drm_connector_init(display->drm, &connector->base, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs);
@@ -2030,12 +2022,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(connector, encoder);
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(connector);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!intel_panel_preferred_fixed_mode(connector)) {
- drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
+ drm_dbg_kms(display->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
index 277bacfbc551..ff349b5876c2 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -7,14 +7,14 @@
#define __VLV_DSI_H__
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsi;
#ifdef I915
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state);
-void vlv_dsi_init(struct drm_i915_private *dev_priv);
+void vlv_dsi_init(struct intel_display *display);
#else
static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
@@ -23,7 +23,7 @@ static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void vlv_dsi_init(struct drm_i915_private *dev_priv)
+static inline void vlv_dsi_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 2ed47e7d1051..7ce924a5ef90 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -57,7 +57,7 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
return dsi_clk_khz;
}
-static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+static int dsi_calc_mnp(struct intel_display *display,
struct intel_crtc_state *config,
int target_dsi_clk)
{
@@ -68,11 +68,11 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
- drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
+ drm_err(display->drm, "DSI CLK Out of Range\n");
return -ECHRNG;
}
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
ref_clk = 100000;
n = 4;
m_min = 70;
@@ -116,13 +116,13 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
static int vlv_dsi_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
- int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+ int refclk = display->platform.cherryview ? 100000 : 25000;
int i;
pll_ctl = config->dsi_pll.ctrl;
@@ -147,7 +147,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
p--;
if (!p) {
- drm_err(&dev_priv->drm, "wrong P1 divisor\n");
+ drm_err(display->drm, "wrong P1 divisor\n");
return 0;
}
@@ -157,7 +157,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
}
if (i == ARRAY_SIZE(lfsr_converts)) {
- drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
+ drm_err(display->drm, "wrong m_seed programmed\n");
return 0;
}
@@ -175,16 +175,16 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int pclk, dsi_clk, ret;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
- ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
+ ret = dsi_calc_mnp(display, config, dsi_clk);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
+ drm_dbg_kms(display->drm, "dsi_calc_mnp failed\n");
return ret;
}
@@ -196,7 +196,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
- drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+ drm_dbg_kms(display->drm, "dsi pll div %08x, ctrl %08x\n",
config->dsi_pll.div, config->dsi_pll.ctrl);
pclk = vlv_dsi_pclk(encoder, config);
@@ -213,9 +213,10 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
@@ -235,20 +236,21 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
DSI_PLL_LOCK, 20)) {
vlv_cck_put(dev_priv);
- drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
+ drm_err(display->drm, "DSI PLL lock failed\n");
return;
}
vlv_cck_put(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
@@ -260,14 +262,14 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
vlv_cck_put(dev_priv);
}
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
bool enabled;
u32 val;
u32 mask;
mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(display, BXT_DSI_PLL_ENABLE);
enabled = (val & mask) == mask;
if (!enabled)
@@ -281,17 +283,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
- val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
- if (IS_GEMINILAKE(dev_priv)) {
+ val = intel_de_read(display, BXT_DSI_PLL_CTL);
+ if (display->platform.geminilake) {
if (!(val & BXT_DSIA_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
} else {
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
}
@@ -301,29 +303,30 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
/*
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for PLL lock deassertion\n");
}
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pll_ctl, pll_div;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
@@ -352,14 +355,14 @@ static int bxt_dsi_pclk(struct intel_encoder *encoder,
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 pclk;
- config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
+ config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL);
pclk = bxt_dsi_pclk(encoder, config);
- drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
+ drm_dbg_kms(display->drm, "Calculated pclk=%u\n", pclk);
return pclk;
}
@@ -375,10 +378,9 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
-static void glk_dsi_program_esc_clock(struct drm_device *dev,
- const struct intel_crtc_state *config)
+static void glk_dsi_program_esc_clock(struct intel_display *display,
+ const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dsi_rate = 0;
u32 pll_ratio = 0;
u32 ddr_clk = 0;
@@ -415,17 +417,16 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
txesc2_div = min_t(u32, div2_value, 10);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV1,
(1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV2,
(1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
-static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+static void bxt_dsi_program_clocks(struct intel_display *display, enum port port,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
u32 dsi_rate = 0;
u32 pll_ratio = 0;
@@ -436,7 +437,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
u32 mipi_8by3_divider;
/* Clear old configurations */
- tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
@@ -472,13 +473,13 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
- intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp);
}
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
@@ -494,7 +495,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
} else {
@@ -503,11 +504,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
}
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Can't get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
} else
- drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
+ drm_dbg_kms(display->drm, "DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -519,7 +520,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
- if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
+ if (display->platform.broxton && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
pclk = bxt_dsi_pclk(encoder, config);
@@ -536,46 +537,45 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* Configure PLL vales */
- intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
- intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
+ intel_de_write(display, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ intel_de_posting_read(display, BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
- bxt_dsi_program_clocks(encoder->base.dev, port, config);
+ bxt_dsi_program_clocks(display, port, config);
} else {
- glk_dsi_program_esc_clock(encoder->base.dev, config);
+ glk_dsi_program_esc_clock(display, config);
}
/* Enable DSI PLL */
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
- if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DSI PLL to lock\n");
return;
}
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
/* Clear old configurations */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
index f975660fa609..f26e31a7dd69 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
@@ -9,7 +9,6 @@
#include <linux/types.h>
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
struct intel_display;
struct intel_encoder;
@@ -33,11 +32,11 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
#ifdef I915
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+bool bxt_dsi_pll_is_enabled(struct intel_display *display);
void assert_dsi_pll_enabled(struct intel_display *display);
void assert_dsi_pll_disabled(struct intel_display *display);
#else
-static inline bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+static inline bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index ddda468241ef..6e4d0ce3952f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 7d97ea2a653e..c4854c5b4e0f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
index e6c382973129..9d7ee1579900 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ab1af978911b..15835952352e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2011-2012 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index e5b0f66ea1fe..6e682a6a0574 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 67ac2586a0f3..0267c924634b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 9473050ac842..05e440643aa2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright 2012 Red Hat Inc
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 75a143d996e0..7a0cc51923b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 7796c4119ef5..ca7e9216934a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008,2010 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index ea7561ae6e13..232b984f60b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index 28d6526e32ab..8044d34707b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 388f90784d8a..f566191d843b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -48,8 +48,7 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
i915_gem_object_evictable(obj))
assert_object_held(obj);
#endif
- return mr && (mr->type == INTEL_MEMORY_LOCAL ||
- mr->type == INTEL_MEMORY_STOLEN_LOCAL);
+ return mr && intel_memory_type_is_local(mr->type);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c3dabb857960..f6d37dff320d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index 196417fd0f5c..946fb9825eb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 356530b599ce..1f38e367c60b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2017 Intel Corporation
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index a5f34542135c..c34f41605b46 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 68413c05c812..64600aa8227f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 8780aa243105..7f83f8bdc8fb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index ef85c6dc9fd5..f9e7cab140f8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 900c08337942..f0857c5c96df 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index bedf1e95941a..bd5bd2c5e7f9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index ae3343c81a64..19a3eb82dc6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
@@ -305,36 +304,20 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
.range_end = LLONG_MAX,
.for_reclaim = 1,
};
- unsigned long i;
+ struct folio *folio = NULL;
+ int error = 0;
/*
* Leave mmapings intact (GTT will have been revoked on unbinding,
- * leaving only CPU mmapings around) and add those pages to the LRU
+ * leaving only CPU mmapings around) and add those folios to the LRU
* instead of invoking writeback so they are aged and paged out
* as normal.
*/
-
- /* Begin writeback on each dirty page */
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- struct page *page;
-
- page = find_lock_page(mapping, i);
- if (!page)
- continue;
-
- if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
- int ret;
-
- SetPageReclaim(page);
- ret = mapping->a_ops->writepage(page, &wbc);
- if (!PageWriteback(page))
- ClearPageReclaim(page);
- if (!ret)
- goto put;
- }
- unlock_page(page);
-put:
- put_page(page);
+ while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
+ if (folio_mapped(folio))
+ folio_redirty_for_writepage(&wbc, folio);
+ else
+ error = shmem_writeout(folio, &wbc);
}
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index aec41f0f098f..b81e67504bbe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2015 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 9d958a6f377e..3380151edfc1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2012 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index af85d0c28168..8814cbcde5b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 5ac23ff3feff..5a296ba3758a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 09b68713ab32..307a18eede72 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2012-2014 Intel Corporation
*
- * Based on amdgpu_mn, which bears the following notice:
+ * Based on amdgpu_mn, which bears the following notice:
*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 1f55e62044a4..7127e90c1a8f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 46b9a17d6abc..65d84a93c525 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
index 5d835e44c4f6..16d4333c9a4e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 804f74084bd4..9c3f17e51885 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1837,6 +1837,8 @@ static int igt_mmap_revoke(void *arg)
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
+ int ret;
+ bool unuse_mm = false;
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
SUBTEST(igt_smoke_tiling),
@@ -1848,5 +1850,15 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_mmap_gpu),
};
- return i915_live_subtests(tests, i915);
+ if (!current->mm) {
+ kthread_use_mm(current->active_mm);
+ unuse_mm = true;
+ }
+
+ ret = i915_live_subtests(tests, i915);
+
+ if (unuse_mm)
+ kthread_unuse_mm(current->active_mm);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index f6c59f20832f..46a5aa4ab9c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -289,6 +289,14 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen8_ggtt_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN8_PAGE_PRESENT;
+ *is_local = pte & GEN12_GGTT_PTE_LM;
+
+ return pte & GEN12_GGTT_PTE_ADDR_MASK;
+}
+
static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
{
struct intel_gt *gt = ggtt->vm.gt;
@@ -435,6 +443,11 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
writeq(pte, addr);
}
+static gen8_pte_t gen8_get_pte(void __iomem *addr)
+{
+ return readq(addr);
+}
+
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@@ -450,6 +463,16 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local);
+}
+
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
dma_addr_t addr, u64 offset,
unsigned int pat_index, u32 flags)
@@ -605,6 +628,17 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset,
+ bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return vm->pte_decode(ioread32(pte), is_present, is_local);
+}
+
/*
* Binds an object into the global gtt with the specified cache level.
* The object will be accessible to the GPU via commands whose operands
@@ -769,6 +803,14 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ return ggtt->vm.read_entry(vm, offset, is_present, is_local);
+}
+
/*
* Reserve the top of the GuC address space for firmware images. Addresses
* beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
@@ -1245,6 +1287,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen8_ggtt_read_entry;
/*
* Serialize GTT updates with aperture access on BXT if VT-d is on,
@@ -1291,6 +1334,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+ ggtt->vm.pte_decode = gen8_ggtt_pte_decode;
+
return ggtt_probe_common(ggtt, size);
}
@@ -1390,6 +1435,14 @@ static u64 iris_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen6_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN6_PTE_VALID;
+ *is_local = false;
+
+ return ((pte & 0xff0) << 28) | (pte & ~0xfff);
+}
+
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -1428,6 +1481,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen6_ggtt_read_entry;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -1443,6 +1497,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_decode = gen6_pte_decode;
+
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
index 59eed0a0ce90..c5f5f0bdfb2c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -27,6 +27,13 @@ static void gmch_ggtt_insert_page(struct i915_address_space *vm,
intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}
+static dma_addr_t gmch_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ return intel_gmch_gtt_read_entry(offset >> PAGE_SHIFT,
+ is_present, is_local);
+}
+
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
@@ -103,6 +110,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
ggtt->vm.clear_range = gmch_ggtt_clear_range;
ggtt->vm.scratch_range = gmch_ggtt_clear_range;
+ ggtt->vm.read_entry = gmch_ggtt_read_entry;
ggtt->vm.cleanup = gmch_ggtt_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 30b128b1fde7..afbc5c769308 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -176,7 +176,6 @@ static void clear_vm_list(struct list_head *list)
i915_vma_destroy_locked(vma);
i915_gem_object_put(obj);
}
-
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 0a36ea751b63..9d3a3ad567a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -312,6 +312,7 @@ struct i915_address_space {
u64 (*pte_encode)(dma_addr_t addr,
unsigned int pat_index,
u32 flags); /* Create a valid PTE */
+ dma_addr_t (*pte_decode)(u64 pte, bool *is_present, bool *is_local);
#define PTE_READ_ONLY BIT(0)
#define PTE_LM BIT(1)
@@ -340,6 +341,8 @@ struct i915_address_space {
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags);
+ dma_addr_t (*read_entry)(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
void (*cleanup)(struct i915_address_space *vm);
void (*foreach)(struct i915_address_space *vm,
@@ -590,6 +593,9 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm,
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res);
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
+
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 51847a846002..c481b56fa67d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -751,7 +751,6 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
-
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
/*
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index cf41d325712e..5dd8121f4b15 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -314,7 +314,6 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
};
static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
-
/* UC */
MOCS_ENTRY(1, 0, L3_1_UC),
/* WB - L3 */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 6e9977b2d180..a876a34455f1 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -365,7 +365,13 @@ static void reset_prepare(struct intel_engine_cs *engine)
ENGINE_READ_FW(engine, RING_HEAD),
ENGINE_READ_FW(engine, RING_TAIL),
ENGINE_READ_FW(engine, RING_START));
- if (!stop_ring(engine)) {
+ /*
+ * Sometimes engine head failed to set to zero even after writing into it.
+ * Use wait_for_atomic() with 20ms delay to let engine resumes from
+ * correct RING_HEAD. Experimented different values and determined
+ * that 20ms works best based on testing.
+ */
+ if (wait_for_atomic((!stop_ring(engine) == 0), 20)) {
drm_err(&engine->i915->drm,
"failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 71ee01d9ef64..5abc5fcc2514 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -8,7 +8,7 @@
#include <drm/intel/i915_drm.h>
#include "display/intel_display.h"
-#include "display/intel_display_irq.h"
+#include "display/intel_display_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -550,6 +550,7 @@ static unsigned int init_emon(struct intel_uncore *uncore)
static bool gen5_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = &i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u8 fstart, vstart;
u32 rgvmodectl;
@@ -607,9 +608,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
rps->ips.last_time2 = ktime_get_raw_ns();
- spin_lock(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PCU_EVENT);
- spin_unlock(&i915->irq_lock);
+ ilk_display_rps_enable(display);
spin_unlock_irq(&mchdev_lock);
@@ -621,14 +620,13 @@ static bool gen5_rps_enable(struct intel_rps *rps)
static void gen5_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = &i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
- spin_lock(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PCU_EVENT);
- spin_unlock(&i915->irq_lock);
+ ilk_display_rps_disable(display);
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 5135b90a2a40..ece445109305 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -57,7 +57,7 @@ struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
- * i915->irq_lock
+ * gt->irq_lock
*/
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/gpu/drm/i915/gt/intel_wopcm.h b/drivers/gpu/drm/i915/gt/intel_wopcm.h
index 17d6aa86008a..d2038b6de5e7 100644
--- a/drivers/gpu/drm/i915/gt/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/gt/intel_wopcm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 116683ebe074..b37e400f74e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -156,7 +156,7 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
struct i915_wa *list;
- list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*list),
GFP_KERNEL);
if (!list) {
drm_err(&i915->drm, "No space for workaround init!\n");
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 22e750108c5f..23f04f6f8fba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
#include "gem/i915_gem_internal.h"
+#include "i915_drv.h"
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
@@ -859,6 +860,14 @@ static int live_lrc_timestamp(void *arg)
};
/*
+ * This test was designed to isolate a hardware bug.
+ * The bug was found and fixed in future generations but
+ * now the test pollutes our CI on previous generation.
+ */
+ if (GRAPHICS_VER(gt->i915) == 12)
+ return 0;
+
+ /*
* We want to verify that the timestamp is saved and restore across
* context switches and is monotonic.
*
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 401bee030dbc..32c762eb79ed 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -661,7 +661,7 @@ static int live_emit_pte_full_ring(void *arg)
out_rq:
i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */
timer_delete_sync(&st.timer);
- destroy_timer_on_stack(&st.timer);
+ timer_destroy_on_stack(&st.timer);
out_unpin:
intel_context_unpin(ce);
out_put:
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 908483ab0bc8..41716ed454b7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -33,15 +33,22 @@ int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
struct intel_rc6 *rc6 = &gt->rc6;
- u64 rc0_power, rc6_power;
+ struct intel_rps *rps = &gt->rps;
intel_wakeref_t wakeref;
+ u64 rc0_sample_energy[2];
+ u64 rc6_sample_energy[2];
+ u64 sleep_time = 1000;
+ u32 rc0_freq = 0;
+ u32 rc6_freq = 0;
+ u64 rc0_power;
+ u64 rc6_power;
bool has_power;
+ u64 threshold;
ktime_t dt;
u64 res[2];
int err = 0;
- u32 rc0_freq = 0;
- u32 rc6_freq = 0;
- struct intel_rps *rps = &gt->rps;
+ u64 diff;
+
/*
* Our claim is that we can "encourage" the GPU to enter rc6 at will.
@@ -60,14 +67,15 @@ int live_rc6_manual(void *arg)
/* Force RC6 off for starters */
__intel_rc6_disable(rc6);
- msleep(1); /* wakeup is not immediate, takes about 100us on icl */
+ /* wakeup is not immediate, takes about 100us on icl */
+ usleep_range(1000, 2000);
res[0] = rc6_residency(rc6);
dt = ktime_get();
- rc0_power = librapl_energy_uJ();
- msleep(1000);
- rc0_power = librapl_energy_uJ() - rc0_power;
+ rc0_sample_energy[0] = librapl_energy_uJ();
+ msleep(sleep_time);
+ rc0_sample_energy[1] = librapl_energy_uJ() - rc0_sample_energy[0];
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
rc0_freq = intel_rps_read_actual_frequency_fw(rps);
@@ -79,11 +87,12 @@ int live_rc6_manual(void *arg)
}
if (has_power) {
- rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_sample_energy[1],
ktime_to_ns(dt));
+
if (!rc0_power) {
if (rc0_freq)
- pr_debug("No power measured while in RC0! GPU Freq: %u in RC0\n",
+ pr_debug("No power measured while in RC0! GPU Freq: %uMHz in RC0\n",
rc0_freq);
else
pr_err("No power and freq measured while in RC0\n");
@@ -98,10 +107,10 @@ int live_rc6_manual(void *arg)
res[0] = rc6_residency(rc6);
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
dt = ktime_get();
- rc6_power = librapl_energy_uJ();
- msleep(1000);
+ rc6_sample_energy[0] = librapl_energy_uJ();
+ msleep(sleep_time);
rc6_freq = intel_rps_read_actual_frequency_fw(rps);
- rc6_power = librapl_energy_uJ() - rc6_power;
+ rc6_sample_energy[1] = librapl_energy_uJ() - rc6_sample_energy[0];
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if (res[1] == res[0]) {
@@ -113,13 +122,24 @@ int live_rc6_manual(void *arg)
}
if (has_power) {
- rc6_power = div64_u64(NSEC_PER_SEC * rc6_power,
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_sample_energy[1],
ktime_to_ns(dt));
- pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ pr_info("GPU consumed %lluuW in RC0 and %lluuW in RC6\n",
rc0_power, rc6_power);
+
if (2 * rc6_power > rc0_power) {
- pr_err("GPU leaked energy while in RC6! GPU Freq: %u in RC6 and %u in RC0\n",
- rc6_freq, rc0_freq);
+ pr_err("GPU leaked energy while in RC6!\n"
+ "GPU Freq: %uMHz in RC6 and %uMHz in RC0\n"
+ "RC0 energy before & after sleep respectively: %lluuJ %lluuJ\n"
+ "RC6 energy before & after sleep respectively: %lluuJ %lluuJ\n",
+ rc6_freq, rc0_freq, rc0_sample_energy[0], rc0_sample_energy[1],
+ rc6_sample_energy[0], rc6_sample_energy[1]);
+
+ diff = res[1] - res[0];
+ threshold = (9 * NSEC_PER_MSEC * sleep_time) / 10;
+ if (diff < threshold)
+ pr_err("Did not enter RC6 properly, RC6 start residency=%lluns, RC6 end residency=%lluns\n",
+ res[0], res[1]);
err = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 3941f2d6fa47..69ed946a39e5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -143,7 +143,7 @@ pte_tlbinv(struct intel_context *ce,
if (ce->engine->class == OTHER_CLASS)
msleep(200);
else
- msleep(10);
+ usleep_range(10000, 20000);
if (va == vb) {
if (!i915_request_completed(rq)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index 5dc0ccd07636..d550eb6edfb8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -230,7 +230,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x",
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build);
- return -EINVAL;
+ return -EINVAL;
}
if (min_ver.major) {
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index eedd1865bb98..62d14f82256f 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -46,6 +46,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
+ intel_wakeref_t wakeref;
int ret;
if (high_gm) {
@@ -63,12 +64,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&gt->ggtt->vm.mutex);
- mmio_hw_access_pre(gt);
+ wakeref = mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
@@ -226,7 +227,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
- intel_runtime_pm_put_unchecked(uncore->rpm);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index baccbf1761b7..673534f061ef 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -91,16 +91,17 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
.diff = 0,
};
struct diff_mmio *node, *next;
+ intel_wakeref_t wakeref;
INIT_LIST_HEAD(&param.diff_mmio_list);
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2fa7ca19ba5d..ae9b0ded3651 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -220,9 +220,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
static void ggtt_invalidate(struct intel_gt *gt)
{
- mmio_hw_access_pre(gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gt);
intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
}
static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 01d890999f25..1d10c16e6465 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -570,14 +570,15 @@ enum {
GVT_FAILSAFE_GUEST_ERR,
};
-static inline void mmio_hw_access_pre(struct intel_gt *gt)
+static inline intel_wakeref_t mmio_hw_access_pre(struct intel_gt *gt)
{
- intel_runtime_pm_get(gt->uncore->rpm);
+ return intel_runtime_pm_get(gt->uncore->rpm);
}
-static inline void mmio_hw_access_post(struct intel_gt *gt)
+static inline void mmio_hw_access_post(struct intel_gt *gt,
+ intel_wakeref_t wakeref)
{
- intel_runtime_pm_put_unchecked(gt->uncore->rpm);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 4efee6797873..1344e6d20a34 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -56,6 +56,7 @@
#include "display/intel_pps_regs.h"
#include "display/intel_psr_regs.h"
#include "display/intel_sprite_regs.h"
+#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
@@ -264,6 +265,7 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off);
+ intel_wakeref_t wakeref;
int ret;
ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
@@ -271,10 +273,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
return 0;
}
@@ -513,7 +515,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
switch (wrpll_ctl & WRPLL_REF_MASK) {
case WRPLL_REF_PCH_SSC:
- refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
+ refclk = 135000;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -544,7 +546,7 @@ out:
static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
{
u32 dp_br = 0;
- int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
+ int refclk = 100000;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
struct dpll clock = {};
@@ -1975,10 +1977,12 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
vgpu == gvt->scheduler.engine_owner[engine->id] ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
vgpu_vreg(vgpu, offset) =
intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -3209,10 +3213,12 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
int i, id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
@@ -3233,8 +3239,10 @@ void intel_gvt_restore_mmio(struct intel_gvt *gvt)
int id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 9f97f743aa71..6c2d68e88266 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -447,6 +447,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
if (!vgpu_data->active)
return;
@@ -465,7 +466,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
for_each_engine(engine, vgpu->gvt->gt, id) {
if (scheduler->engine_owner[engine->id] == vgpu) {
@@ -474,6 +475,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0d9e263913ff..967c0501e91e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,8 +66,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
-
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index ce3cc93ea211..273bc43468a0 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -62,7 +62,6 @@
#include "display/intel_pch_refclk.h"
#include "display/intel_pps.h"
#include "display/intel_sprite_uapi.h"
-#include "display/intel_vga.h"
#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
@@ -235,7 +234,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_uncore_mmio_debug_init_early(dev_priv);
- spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
intel_sbi_init(dev_priv);
@@ -263,9 +261,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
i915_gem_init_early(dev_priv);
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(dev_priv);
-
intel_irq_init(dev_priv);
intel_display_driver_early_probe(display);
intel_clock_gating_hooks_init(dev_priv);
@@ -578,7 +573,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
*/
intel_dram_detect(dev_priv);
- intel_bw_init_hw(dev_priv);
+ intel_bw_init_hw(display);
return 0;
@@ -622,11 +617,12 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
* Perform any steps necessary to make the driver available via kernel
* internal or userspace interfaces.
*/
-static void i915_driver_register(struct drm_i915_private *dev_priv)
+static int i915_driver_register(struct drm_i915_private *dev_priv)
{
struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
+ int ret;
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
@@ -634,10 +630,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
- if (drm_dev_register(&dev_priv->drm, 0)) {
- drm_err(&dev_priv->drm,
- "Failed to register driver for userspace access!\n");
- return;
+ ret = drm_dev_register(&dev_priv->drm, 0);
+ if (ret) {
+ i915_probe_error(dev_priv,
+ "Failed to register driver for userspace access!\n");
+ drm_dev_unregister(&dev_priv->drm);
+ i915_pmu_unregister(dev_priv);
+ i915_gem_driver_unregister(dev_priv);
+ return ret;
}
i915_debugfs_register(dev_priv);
@@ -660,6 +660,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
if (i915_switcheroo_register(dev_priv))
drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
+
+ return 0;
}
/**
@@ -834,7 +836,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_gem;
- i915_driver_register(i915);
+ ret = i915_driver_register(i915);
+ if (ret)
+ goto out_cleanup_gem;
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -845,6 +849,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_cleanup_gem:
+ intel_pxp_fini(i915);
i915_gem_suspend(i915);
i915_gem_driver_remove(i915);
i915_gem_driver_release(i915);
@@ -981,7 +986,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_dp_mst_suspend(display);
intel_irq_suspend(i915);
- intel_hpd_cancel_work(i915);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(i915))
intel_display_driver_suspend_access(display);
@@ -1064,7 +1069,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(dev_priv))
intel_display_driver_suspend_access(display);
@@ -1195,13 +1200,11 @@ static int i915_drm_resume(struct drm_device *dev)
i9xx_display_sr_restore(display);
- intel_vga_redisable(display);
-
intel_gmbus_reset(display);
intel_pps_unlock_regs_wa(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/*
* Interrupts have to be enabled before any batches are run. If not the
@@ -1227,7 +1230,7 @@ static int i915_drm_resume(struct drm_device *dev)
if (HAS_DISPLAY(dev_priv))
intel_display_driver_resume_access(display);
- intel_hpd_init(dev_priv);
+ intel_hpd_init(display);
intel_display_driver_resume(display);
@@ -1235,7 +1238,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -1575,7 +1578,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
drm_dbg(&dev_priv->drm, "Device suspended\n");
return 0;
@@ -1633,11 +1636,11 @@ static int intel_runtime_resume(struct device *kdev)
* everyone else do it here.
*/
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
}
- skl_watermark_ipc_update(dev_priv);
+ skl_watermark_ipc_update(display);
enable_rpm_wakeref_asserts(rpm);
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 4b67ad9a61cd..1e95ecb2a163 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -15,7 +15,6 @@ struct drm_printer;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_TIMESTAMP 1695980603
extern const struct dev_pm_ops i915_pm_ops;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54538b6f85df..d0e1980dcba2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,8 +49,6 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
-#include "soc/intel_pch.h"
-
#include "i915_drm_client.h"
#include "i915_gem.h"
#include "i915_gpu_error.h"
@@ -224,8 +222,6 @@ struct drm_i915_private {
};
unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
- /* protects the irq masks */
- spinlock_t irq_lock;
bool irqs_enabled;
/* LPT/WPT IOSF sideband protection */
@@ -272,9 +268,6 @@ struct drm_i915_private {
/* pm private clock gating functions */
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
- /* PCH chipset type */
- enum intel_pch pch_type;
-
unsigned long gem_quirks;
struct i915_gem_mm mm;
@@ -306,6 +299,7 @@ struct drm_i915_private {
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a6613eed3398..4f785cdbd155 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -726,13 +726,6 @@ static void err_print_gt_info(struct drm_i915_error_state_buf *m,
intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
}
-static void err_print_gt_display(struct drm_i915_error_state_buf *m,
- struct intel_gt_coredump *gt)
-{
- err_printf(m, "IER: 0x%08x\n", gt->ier);
- err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
-}
-
static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
struct intel_gt_coredump *gt)
{
@@ -878,7 +871,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
print_guc_capture = true;
- err_print_gt_display(m, error->gt);
err_print_gt_global_nonguc(m, error->gt);
err_print_gt_fences(m, error->gt);
@@ -1767,27 +1759,6 @@ gt_record_uc(struct intel_gt_coredump *gt,
return error_uc;
}
-/* Capture display registers. */
-static void gt_record_display_regs(struct intel_gt_coredump *gt)
-{
- struct intel_uncore *uncore = gt->_gt->uncore;
- struct drm_i915_private *i915 = uncore->i915;
-
- if (DISPLAY_VER(i915) >= 6 && DISPLAY_VER(i915) < 20)
- gt->derrmr = intel_uncore_read(uncore, DERRMR);
-
- if (GRAPHICS_VER(i915) >= 8)
- gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- else if (IS_VALLEYVIEW(i915))
- gt->ier = intel_uncore_read(uncore, VLV_IER);
- else if (HAS_PCH_SPLIT(i915))
- gt->ier = intel_uncore_read(uncore, DEIER);
- else if (GRAPHICS_VER(i915) == 2)
- gt->ier = intel_uncore_read16(uncore, GEN2_IER);
- else
- gt->ier = intel_uncore_read(uncore, GEN2_IER);
-}
-
/* Capture all other registers that GuC doesn't capture. */
static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
{
@@ -1821,9 +1792,12 @@ static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
gt->gtier[i] =
intel_uncore_read(uncore, GEN8_GT_IER(i));
gt->ngtier = 4;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (GRAPHICS_VER(i915) >= 5) {
gt->gtier[0] = intel_uncore_read(uncore, GTIER);
gt->ngtier = 1;
+ } else {
+ gt->gtier[0] = intel_uncore_read(uncore, GEN2_IER);
+ gt->ngtier = 1;
}
gt->eir = intel_uncore_read(uncore, EIR);
@@ -2043,7 +2017,6 @@ intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
gc->_gt = gt;
gc->awake = intel_gt_pm_is_awake(gt);
- gt_record_display_regs(gc);
gt_record_global_nonguc_regs(gc);
/*
@@ -2160,7 +2133,6 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump
void i915_error_state_store(struct i915_gpu_coredump *error)
{
struct drm_i915_private *i915;
- static bool warned;
if (IS_ERR_OR_NULL(error))
return;
@@ -2174,16 +2146,8 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
i915_gpu_coredump_get(error);
- if (!xchg(&warned, true) &&
- ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
- pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
- pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n");
- pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
- pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- i915->drm.primary->index);
- }
+ drm_info(&i915->drm, "GPU error state saved to /sys/class/drm/card%d/error\n",
+ i915->drm.primary->index);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 749e1c55613e..182324979278 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -146,7 +146,6 @@ struct intel_gt_coredump {
/* Generic register state */
u32 eir;
u32 pgtbl_er;
- u32 ier;
u32 gtier[6], ngtier;
u32 forcewake;
u32 error; /* gen6+ */
@@ -164,8 +163,6 @@ struct intel_gt_coredump {
u32 clock_frequency;
u32 clock_period_ns;
- /* Display related */
- u32 derrmr;
u32 sfc_done[I915_MAX_SFC]; /* gen12 */
u32 nfence;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 37ca4a35daf2..95042879bec4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -277,14 +277,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT))
@@ -306,12 +306,12 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -367,14 +367,14 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
@@ -392,12 +392,12 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -418,6 +418,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = &i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -458,9 +459,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (de_iir) {
raw_reg_write(regs, DEIIR, de_iir);
if (DISPLAY_VER(i915) >= 7)
- ivb_display_irq_handler(i915, de_iir);
+ ivb_display_irq_handler(display, de_iir);
else
- ilk_display_irq_handler(i915, de_iir);
+ ilk_display_irq_handler(display, de_iir);
ret = IRQ_HANDLED;
}
@@ -506,6 +507,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
u32 master_ctl;
@@ -524,7 +526,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- gen8_de_irq_handler(dev_priv, master_ctl);
+ gen8_de_irq_handler(display, master_ctl);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
@@ -556,6 +558,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = &i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
@@ -575,13 +578,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -613,6 +616,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs)
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
+ struct intel_display *display = &i915->display;
struct intel_gt *gt = to_gt(i915);
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 master_tile_ctl, master_ctl;
@@ -641,36 +645,22 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
gen11_gt_irq_handler(gt, master_ctl);
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
dg1_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
return IRQ_HANDLED;
}
-static void ibx_irq_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- if (HAS_PCH_NOP(dev_priv))
- return;
-
- gen2_irq_reset(uncore, SDE_IRQ_REGS);
-
- if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
- intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
-}
-
-/* drm_dma.h hooks
-*/
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen2_irq_reset(uncore, DE_IRQ_REGS);
@@ -686,45 +676,43 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
- ibx_irq_reset(dev_priv);
+ ibx_display_irq_reset(display);
}
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
gen5_gt_irq_reset(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_reset(display);
}
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen8_master_intr_disable(intel_uncore_regs(uncore));
gen8_gt_irq_reset(to_gt(dev_priv));
- gen8_display_irq_reset(dev_priv);
+ gen8_display_irq_reset(display);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
-
- if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_reset(dev_priv);
-
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -732,6 +720,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_gt *gt;
unsigned int i;
@@ -741,7 +730,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i)
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -751,6 +740,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
@@ -760,25 +750,25 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_reset(display);
}
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
- ilk_de_irq_postinstall(dev_priv);
+ ilk_de_irq_postinstall(display);
}
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_postinstall(display);
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
@@ -786,20 +776,23 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
gen11_gt_irq_postinstall(gt);
- gen11_de_irq_postinstall(dev_priv);
+ gen11_de_irq_postinstall(display);
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
@@ -809,6 +802,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
struct intel_gt *gt;
@@ -819,7 +813,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
- dg1_de_irq_postinstall(dev_priv);
+ dg1_de_irq_postinstall(display);
dg1_master_intr_enable(intel_uncore_regs(uncore));
intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
@@ -827,11 +821,11 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_postinstall(display);
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
@@ -900,9 +894,10 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -911,6 +906,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -932,26 +928,20 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_ASLE_INTERRUPT;
}
- if (I915_HAS_HOTPLUG(dev_priv)) {
+ if (HAS_HOTPLUG(dev_priv)) {
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
}
gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- i915_enable_asle_pipestat(dev_priv);
+ i915_display_irq_postinstall(display);
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -972,13 +962,13 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (I915_HAS_HOTPLUG(dev_priv) &&
+ if (HAS_HOTPLUG(dev_priv) &&
iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -992,9 +982,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i915_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -1006,9 +996,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -1036,6 +1027,7 @@ static u32 i965_error_mask(struct drm_i915_private *i915)
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -1061,20 +1053,13 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- i915_enable_asle_pipestat(dev_priv);
+ i965_display_irq_postinstall(display);
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1096,11 +1081,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -1119,9 +1104,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i965_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, IRQ_HANDLED);
@@ -1280,6 +1265,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
@@ -1289,7 +1275,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
free_irq(irq, dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
dev_priv->irqs_enabled = false;
}
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 76e2801619f0..c33bd3d83069 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -100,7 +100,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
r.mm = vma->vm_mm;
r.pfn = pfn;
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
@@ -140,7 +140,7 @@ int remap_io_sg(struct vm_area_struct *vma,
};
int err;
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
while (offset >= r.sgt.max >> PAGE_SHIFT) {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index de0b413600a1..1658f1246c6f 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1666,6 +1666,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
struct i915_perf *perf = stream->perf;
struct intel_gt *gt = stream->engine->gt;
struct i915_perf_group *g = stream->engine->oa_group;
+ int m;
if (WARN_ON(stream != g->exclusive_stream))
return;
@@ -1690,10 +1691,9 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_configs(stream);
free_noa_wait(stream);
- if (perf->spurious_report_rs.missed) {
- gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n",
- perf->spurious_report_rs.missed);
- }
+ m = ratelimit_state_get_miss(&perf->spurious_report_rs);
+ if (m)
+ gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n", m);
}
static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c5064eebe063..2e4190da3e0d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -866,6 +866,7 @@
#define FP_M2_DIV_MASK 0x0000003f
#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
+
#define DPLL_TEST _MMIO(0x606c)
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
@@ -877,11 +878,13 @@
#define DPLLA_TEST_N_BYPASS (1 << 3)
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
#define D_STATE _MMIO(0x6104)
#define DSTATE_GFX_RESET_I830 (1 << 6)
#define DSTATE_PLL_D3_OFF (1 << 3)
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
+
#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
@@ -1050,7 +1053,6 @@
/*
* Overlay regs
*/
-
#define OVADD _MMIO(0x30000)
#define DOVSTA _MMIO(0x30008)
#define OC_BUF (0x3 << 20)
@@ -1077,6 +1079,7 @@
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
+#define DG2_DPFC_GATING_DIS REG_BIT(31)
#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
#define DPCE_GATING_DIS REG_BIT(17)
@@ -1105,7 +1108,6 @@
/*
* Display engine regs
*/
-
/* Pipe/transcoder A timing regs */
#define _TRANS_HTOTAL_A 0x60000
#define _TRANS_HTOTAL_B 0x61000
@@ -1396,88 +1398,50 @@
#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
-#define DP_PORT_EN (1 << 31)
-#define DP_PIPE_SEL_SHIFT 30
-#define DP_PIPE_SEL_MASK (1 << 30)
-#define DP_PIPE_SEL(pipe) ((pipe) << 30)
-#define DP_PIPE_SEL_SHIFT_IVB 29
-#define DP_PIPE_SEL_MASK_IVB (3 << 29)
-#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define DP_PORT_EN REG_BIT(31)
+#define DP_PIPE_SEL_MASK REG_GENMASK(30, 30)
+#define DP_PIPE_SEL(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK, (pipe))
+#define DP_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29)
+#define DP_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_IVB, (pipe))
#define DP_PIPE_SEL_SHIFT_CHV 16
-#define DP_PIPE_SEL_MASK_CHV (3 << 16)
-#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
-
-/* Link training mode - select a suitable mode for each stage */
-#define DP_LINK_TRAIN_PAT_1 (0 << 28)
-#define DP_LINK_TRAIN_PAT_2 (1 << 28)
-#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
-#define DP_LINK_TRAIN_OFF (3 << 28)
-#define DP_LINK_TRAIN_MASK (3 << 28)
-#define DP_LINK_TRAIN_SHIFT 28
-
-/* CPT Link training mode */
-#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
-#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
-#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
-#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
-#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
-#define DP_LINK_TRAIN_SHIFT_CPT 8
-
-/* Signal voltages. These are mostly controlled by the other end */
-#define DP_VOLTAGE_0_4 (0 << 25)
-#define DP_VOLTAGE_0_6 (1 << 25)
-#define DP_VOLTAGE_0_8 (2 << 25)
-#define DP_VOLTAGE_1_2 (3 << 25)
-#define DP_VOLTAGE_MASK (7 << 25)
-#define DP_VOLTAGE_SHIFT 25
-
-/* Signal pre-emphasis levels, like voltages, the other end tells us what
- * they want
- */
-#define DP_PRE_EMPHASIS_0 (0 << 22)
-#define DP_PRE_EMPHASIS_3_5 (1 << 22)
-#define DP_PRE_EMPHASIS_6 (2 << 22)
-#define DP_PRE_EMPHASIS_9_5 (3 << 22)
-#define DP_PRE_EMPHASIS_MASK (7 << 22)
-#define DP_PRE_EMPHASIS_SHIFT 22
-
-/* How many wires to use. I guess 3 was too hard */
-#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
-#define DP_PORT_WIDTH_MASK (7 << 19)
-#define DP_PORT_WIDTH_SHIFT 19
-
-/* Mystic DPCD version 1.1 special mode */
-#define DP_ENHANCED_FRAMING (1 << 18)
-
-/* eDP */
-#define DP_PLL_FREQ_270MHZ (0 << 16)
-#define DP_PLL_FREQ_162MHZ (1 << 16)
-#define DP_PLL_FREQ_MASK (3 << 16)
-
-/* locked once port is enabled */
-#define DP_PORT_REVERSAL (1 << 15)
-
-/* eDP */
-#define DP_PLL_ENABLE (1 << 14)
-
-/* sends the clock on lane 15 of the PEG for debug */
-#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
-
-#define DP_SCRAMBLING_DISABLE (1 << 12)
-#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
-
-/* limit RGB values to avoid confusing TVs */
-#define DP_COLOR_RANGE_16_235 (1 << 8)
-
-/* Turn on the audio link */
-#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
-
-/* vs and hs sync polarity */
-#define DP_SYNC_VS_HIGH (1 << 4)
-#define DP_SYNC_HS_HIGH (1 << 3)
-
-/* A fantasy */
-#define DP_DETECTED (1 << 2)
+#define DP_PIPE_SEL_MASK_CHV REG_GENMASK(17, 16)
+#define DP_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_CHV, (pipe))
+#define DP_LINK_TRAIN_MASK REG_GENMASK(29, 28)
+#define DP_LINK_TRAIN_PAT_1 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 0)
+#define DP_LINK_TRAIN_PAT_2 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 1)
+#define DP_LINK_TRAIN_PAT_IDLE REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 2)
+#define DP_LINK_TRAIN_OFF REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 3)
+#define DP_LINK_TRAIN_MASK_CPT REG_GENMASK(10, 8)
+#define DP_LINK_TRAIN_PAT_1_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 0)
+#define DP_LINK_TRAIN_PAT_2_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 1)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 2)
+#define DP_LINK_TRAIN_OFF_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 3)
+#define DP_VOLTAGE_MASK REG_GENMASK(27, 25)
+#define DP_VOLTAGE_0_4 REG_FIELD_PREP(DP_VOLTAGE_MASK, 0)
+#define DP_VOLTAGE_0_6 REG_FIELD_PREP(DP_VOLTAGE_MASK, 1)
+#define DP_VOLTAGE_0_8 REG_FIELD_PREP(DP_VOLTAGE_MASK, 2)
+#define DP_VOLTAGE_1_2 REG_FIELD_PREP(DP_VOLTAGE_MASK, 3)
+#define DP_PRE_EMPHASIS_MASK REG_GENMASK(24, 22)
+#define DP_PRE_EMPHASIS_0 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 0)
+#define DP_PRE_EMPHASIS_3_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 1)
+#define DP_PRE_EMPHASIS_6 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 2)
+#define DP_PRE_EMPHASIS_9_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 3)
+#define DP_PORT_WIDTH_MASK REG_GENMASK(21, 19)
+#define DP_PORT_WIDTH(width) REG_FIELD_PREP(DP_PORT_WIDTH_MASK, (width) - 1)
+#define DP_ENHANCED_FRAMING REG_BIT(18)
+#define EDP_PLL_FREQ_MASK REG_GENMASK(17, 16)
+#define EDP_PLL_FREQ_270MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 0)
+#define EDP_PLL_FREQ_162MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 1)
+#define DP_PORT_REVERSAL REG_BIT(15)
+#define EDP_PLL_ENABLE REG_BIT(14)
+#define DP_CLOCK_OUTPUT_ENABLE REG_BIT(13)
+#define DP_SCRAMBLING_DISABLE REG_BIT(12)
+#define DP_SCRAMBLING_DISABLE_ILK REG_BIT(7)
+#define DP_COLOR_RANGE_16_235 REG_BIT(8)
+#define DP_AUDIO_OUTPUT_ENABLE REG_BIT(6)
+#define DP_SYNC_VS_HIGH REG_BIT(4)
+#define DP_SYNC_HS_HIGH REG_BIT(3)
+#define DP_DETECTED REG_BIT(2)
/*
* Computing GMCH M and N values for the Display Port link
@@ -1811,18 +1775,6 @@
#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
-/* VBIOS regs */
-#define VGACNTRL _MMIO(0x71400)
-# define VGA_DISP_DISABLE (1 << 31)
-# define VGA_2X_MODE (1 << 30)
-# define VGA_PIPE_B_SELECT (1 << 29)
-
-#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
-
-/* Ironlake */
-
-#define CPU_VGACNTRL _MMIO(0x41000)
-
#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
@@ -2783,7 +2735,6 @@
* functionality covered in PCH_PORT_HOTPLUG is split into
* SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
*/
-
#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
@@ -2863,7 +2814,6 @@
#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
/* transcoder */
-
#define _PCH_TRANS_HTOTAL_A 0xe0000
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
@@ -3794,7 +3744,6 @@ enum skl_power_gate {
/*
* SKL Clocks
*/
-
/* CDCLK_CTL */
#define CDCLK_CTL _MMIO(0x46000)
#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
@@ -4242,6 +4191,11 @@ enum skl_power_gate {
#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
+#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
+#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
+#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
+#define MTL_DPFC_GATING_DIS REG_BIT(6)
+
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 609214231ffc..f7fb40cfdb70 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -40,8 +40,6 @@
struct drm_i915_private;
struct timer_list;
-#define FDO_BUG_URL "https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html"
-
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 76d84cbb8361..d581a9d2c063 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -21,6 +21,7 @@
#include "display/intel_pfit_regs.h"
#include "display/intel_psr_regs.h"
#include "display/intel_sprite_regs.h"
+#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index d40ee1b42110..59bd603e6deb 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -171,6 +171,17 @@ intel_memory_region_by_type(struct drm_i915_private *i915,
return NULL;
}
+bool intel_memory_type_is_local(enum intel_memory_type mem_type)
+{
+ switch (mem_type) {
+ case INTEL_MEMORY_LOCAL:
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* intel_memory_region_reserve - Reserve a memory range
* @mem: The region for which we want to reserve a range.
@@ -216,7 +227,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
-static const char *region_type_str(u16 type)
+const char *intel_memory_type_str(enum intel_memory_type type)
{
switch (type) {
case INTEL_MEMORY_SYSTEM:
@@ -260,7 +271,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->instance = instance;
snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
- region_type_str(type), instance);
+ intel_memory_type_str(type), instance);
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 5973b6fe13cf..b3b75be9ced5 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -85,6 +85,8 @@ struct intel_memory_region {
void *region_private;
};
+bool intel_memory_type_is_local(enum intel_memory_type mem_type);
+
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance);
@@ -107,6 +109,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915);
struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private *i915,
enum intel_memory_type mem_type);
+const char *intel_memory_type_str(enum intel_memory_type type);
__printf(2, 3) void
intel_memory_region_set_name(struct intel_memory_region *mem,
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 48836ef52d40..a2894a56e18f 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -7,8 +7,6 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
-#include <drm/drm_print.h>
-
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
@@ -16,11 +14,13 @@
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/ref_tracker.h>
-#include <linux/slab.h>
-#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+struct drm_printer;
+struct intel_runtime_pm;
+struct intel_wakeref;
+
typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_REFTRACK_DEAD_COUNT 16
@@ -32,9 +32,6 @@ typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-struct intel_runtime_pm;
-struct intel_wakeref;
-
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index d5ecc68155da..522ad49406ce 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -77,7 +77,7 @@ void timed_fence_fini(struct timed_fence *tf)
if (timer_delete_sync(&tf->timer))
i915_sw_fence_commit(&tf->fence);
- destroy_timer_on_stack(&tf->timer);
+ timer_destroy_on_stack(&tf->timer);
i915_sw_fence_fini(&tf->fence);
}
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
index eb03b5b28bad..25b8726b9dff 100644
--- a/drivers/gpu/drm/i915/selftests/librapl.c
+++ b/drivers/gpu/drm/i915/selftests/librapl.c
@@ -22,12 +22,12 @@ u64 librapl_energy_uJ(void)
unsigned long long power;
u32 units;
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
+ if (rdmsrq_safe(MSR_RAPL_POWER_UNIT, &power))
return 0;
units = (power & 0x1f00) >> 8;
- if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
+ if (rdmsrq_safe(MSR_PP1_ENERGY_STATUS, &power))
return 0;
return (1000000 * power) >> units; /* convert to uJ */
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index f60eedb0e92c..eee5c4f45a43 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -33,8 +33,14 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
DRAM_TYPE_STR(LPDDR4),
+ DRAM_TYPE_STR(DDR5),
+ DRAM_TYPE_STR(LPDDR5),
+ DRAM_TYPE_STR(GDDR),
+ DRAM_TYPE_STR(GDDR_ECC),
};
+ BUILD_BUG_ON(ARRAY_SIZE(str) != __INTEL_DRAM_TYPE_MAX);
+
if (type >= ARRAY_SIZE(str))
type = INTEL_DRAM_UNKNOWN;
@@ -444,8 +450,6 @@ skl_get_dram_info(struct drm_i915_private *i915)
int ret;
dram_info->type = skl_get_dram_type(i915);
- drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
- intel_dram_type_str(dram_info->type));
ret = skl_dram_get_channels_info(i915);
if (ret)
@@ -560,10 +564,9 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dram_info->type != type);
drm_dbg_kms(&i915->drm,
- "CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n",
+ "CH%u DIMM size: %u Gb, width: X%u, ranks: %u\n",
i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
+ dimm.size, dimm.width, dimm.ranks);
if (valid_ranks == 0)
valid_ranks = dimm.ranks;
@@ -730,6 +733,10 @@ void intel_dram_detect(struct drm_i915_private *i915)
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
+
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
if (ret)
return;
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
deleted file mode 100644
index 82dc7fbd1a3e..000000000000
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ /dev/null
@@ -1,316 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright 2019 Intel Corporation.
- */
-
-#include "i915_drv.h"
-#include "i915_utils.h"
-#include "intel_pch.h"
-
-#define INTEL_PCH_DEVICE_ID_MASK 0xff80
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
-#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
-#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
-#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
-#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
-#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
-#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
-#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
-#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
-#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
-#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
-#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
-#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
-#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
-#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
-#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
-#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
-#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
-#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
-#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
-#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-
-/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
-static enum intel_pch
-intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
-{
- switch (id) {
- case INTEL_PCH_IBX_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
- drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 5);
- return PCH_IBX;
- case INTEL_PCH_CPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- return PCH_CPT;
- case INTEL_PCH_PPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- /* PPT is CPT compatible */
- return PCH_CPT;
- case INTEL_PCH_LPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT_H;
- case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT_LP;
- case INTEL_PCH_WPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
- /* WPT is LPT compatible */
- return PCH_LPT_H;
- case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
- /* WPT is LPT compatible */
- return PCH_LPT_LP;
- case INTEL_PCH_SPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_KBP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- /* KBP is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_CNP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm,
- "Found Cannon Lake LP PCH (CNP-LP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CMP_DEVICE_ID_TYPE:
- case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv));
- /* CMP is CNP compatible */
- return PCH_CNP;
- case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- /* CMP-V is based on KBP, which is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_ICP_DEVICE_ID_TYPE:
- case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- return PCH_ICP;
- case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
- drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
- IS_ELKHARTLAKE(dev_priv)));
- /* MCC is TGP compatible */
- return PCH_TGP;
- case INTEL_PCH_TGP_DEVICE_ID_TYPE:
- case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv) &&
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_TGP;
- case INTEL_PCH_JSP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
- IS_ELKHARTLAKE(dev_priv)));
- /* JSP is ICP compatible */
- return PCH_ICP;
- case INTEL_PCH_ADP_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
- !IS_ALDERLAKE_P(dev_priv));
- return PCH_ADP;
- default:
- return PCH_NONE;
- }
-}
-
-static bool intel_is_virt_pch(unsigned short id,
- unsigned short svendor, unsigned short sdevice)
-{
- return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
- id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
- svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
- sdevice == PCI_SUBDEVICE_ID_QEMU));
-}
-
-static void
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
- unsigned short *pch_id, enum intel_pch *pch_type)
-{
- unsigned short id = 0;
-
- /*
- * In a virtualized passthrough environment we can be in a
- * setup where the ISA bridge is not able to be passed through.
- * In this case, a south bridge can be emulated and we have to
- * make an educated guess as to which PCH is really there.
- */
-
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
- id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
- else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
- id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
- else if (IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
- id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (GRAPHICS_VER(dev_priv) == 6 || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
- else if (GRAPHICS_VER(dev_priv) == 5)
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-
- if (id)
- drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id);
- else
- drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
-
- *pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (drm_WARN_ON(&dev_priv->drm,
- id && *pch_type == PCH_NONE))
- id = 0;
-
- *pch_id = id;
-}
-
-void intel_detect_pch(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pch = NULL;
- unsigned short id;
- enum intel_pch pch_type;
-
- /*
- * South display engine on the same PCI device: just assign the fake
- * PCH.
- */
- if (DISPLAY_VER(dev_priv) >= 20) {
- dev_priv->pch_type = PCH_LNL;
- return;
- } else if (IS_BATTLEMAGE(dev_priv) || IS_METEORLAKE(dev_priv)) {
- /*
- * Both north display and south display are on the SoC die.
- * The real PCH (if it even exists) is uninvolved in display.
- */
- dev_priv->pch_type = PCH_MTL;
- return;
- } else if (IS_DG2(dev_priv)) {
- dev_priv->pch_type = PCH_DG2;
- return;
- } else if (IS_DG1(dev_priv)) {
- dev_priv->pch_type = PCH_DG1;
- return;
- }
-
- /*
- * The reason to probe ISA bridge instead of Dev31:Fun0 is to
- * make graphics device passthrough work easy for VMM, that only
- * need to expose ISA bridge to let driver know the real hardware
- * underneath. This is a requirement from virtualization team.
- *
- * In some virtualized environments (e.g. XEN), there is irrelevant
- * ISA bridge in the system. To work reliably, we should scan through
- * all the ISA bridge devices and check for the first match, instead
- * of only checking the first one.
- */
- while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- if (pch->vendor != PCI_VENDOR_ID_INTEL)
- continue;
-
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-
- pch_type = intel_pch_type(dev_priv, id);
- if (pch_type != PCH_NONE) {
- dev_priv->pch_type = pch_type;
- break;
- } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
- pch->subsystem_device)) {
- intel_virt_detect_pch(dev_priv, &id, &pch_type);
- dev_priv->pch_type = pch_type;
- break;
- }
- }
-
- /*
- * Use PCH_NOP (PCH but no South Display) for PCH platforms without
- * display.
- */
- if (pch && !HAS_DISPLAY(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "Display disabled, reverting to NOP PCH\n");
- dev_priv->pch_type = PCH_NOP;
- } else if (!pch) {
- if (i915_run_as_guest() && HAS_DISPLAY(dev_priv)) {
- intel_virt_detect_pch(dev_priv, &id, &pch_type);
- dev_priv->pch_type = pch_type;
- } else {
- drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
- }
- }
-
- pci_dev_put(pch);
-}
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h
deleted file mode 100644
index 635aea7a5539..000000000000
--- a/drivers/gpu/drm/i915/soc/intel_pch.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright 2019 Intel Corporation.
- */
-
-#ifndef __INTEL_PCH__
-#define __INTEL_PCH__
-
-struct drm_i915_private;
-
-/*
- * Sorted by south display engine compatibility.
- * If the new PCH comes with a south display engine that is not
- * inherited from the latest item, please do not add it to the
- * end. Instead, add it right after its "parent" PCH.
- */
-enum intel_pch {
- PCH_NOP = -1, /* PCH without south display */
- PCH_NONE = 0, /* No PCH present */
- PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
- PCH_LPT_H, /* Lynxpoint/Wildcatpoint H PCH */
- PCH_LPT_LP, /* Lynxpoint/Wildcatpoint LP PCH */
- PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
- PCH_CNP, /* Cannon/Comet Lake PCH */
- PCH_ICP, /* Ice Lake/Jasper Lake PCH */
- PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
- PCH_ADP, /* Alder Lake PCH */
-
- /* Fake PCHs, functionality handled on the same PCI dev */
- PCH_DG1 = 1024,
- PCH_DG2,
- PCH_MTL,
- PCH_LNL,
-};
-
-#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
-#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
-#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
-#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
-#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
-#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
-#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
-#define HAS_PCH_LPT_H(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H)
-#define HAS_PCH_LPT_LP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H || \
- INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
-#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
-#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
-#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-
-void intel_detect_pch(struct drm_i915_private *dev_priv);
-
-#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index 3d9d4d40fb80..7cca66f00a38 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -12,8 +12,10 @@ powervr-y := \
pvr_fw.o \
pvr_fw_meta.o \
pvr_fw_mips.o \
+ pvr_fw_riscv.o \
pvr_fw_startstop.o \
pvr_fw_trace.o \
+ pvr_fw_util.o \
pvr_gem.o \
pvr_hwrt.o \
pvr_job.o \
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c
index 6b77c9b4bde8..c7ce7daaa87a 100644
--- a/drivers/gpu/drm/imagination/pvr_debugfs.c
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.c
@@ -28,9 +28,8 @@ pvr_debugfs_init(struct drm_minor *minor)
struct drm_device *drm_dev = minor->dev;
struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
struct dentry *root = minor->debugfs_root;
- size_t i;
- for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
+ for (size_t i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i];
struct dentry *dir;
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
index 1704c0268589..8b9ba4983c4c 100644
--- a/drivers/gpu/drm/imagination/pvr_device.c
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/types.h>
@@ -120,6 +121,21 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
return 0;
}
+static int pvr_device_reset_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct reset_control *reset;
+
+ reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
+ "failed to get gpu reset line\n");
+
+ pvr_dev->reset = reset;
+
+ return 0;
+}
+
/**
* pvr_device_process_active_queues() - Process all queue related events.
* @pvr_dev: PowerVR device to check
@@ -146,9 +162,61 @@ static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
mutex_unlock(&pvr_dev->queues.lock);
}
+static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
+{
+ u32 events;
+
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
+
+ return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
+}
+
+static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
+{
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
+ ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
+}
+
+static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u32 events;
+
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
+
+ /* Handle only these events on the host and leave the rest to the FW. */
+ events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
+ ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
+
+ if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
+ u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
+
+ drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
+ }
+
+ if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
+ /*
+ * The watchdog timer is disabled by the driver so this event
+ * should never be fired.
+ */
+ drm_info(drm_dev, "Safety event: Watchdog timeout\n");
+ }
+}
+
static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
{
struct pvr_device *pvr_dev = data;
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
irqreturn_t ret = IRQ_NONE;
/* We are in the threaded handler, we can keep dequeuing events until we
@@ -164,30 +232,76 @@ static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
pvr_device_process_active_queues(pvr_dev);
}
- pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev);
+ pm_runtime_mark_last_busy(drm_dev->dev);
ret = IRQ_HANDLED;
}
- /* Unmask FW irqs before returning, so new interrupts can be received. */
- pvr_fw_irq_enable(pvr_dev);
+ if (pvr_dev->has_safety_events) {
+ int err;
+
+ /*
+ * Ensure the GPU is powered on since some safety events (such
+ * as ECC faults) can happen outside of job submissions, which
+ * are otherwise the only time a power reference is held.
+ */
+ err = pvr_power_get(pvr_dev);
+ if (err) {
+ drm_err_ratelimited(drm_dev,
+ "%s: could not take power reference (%d)\n",
+ __func__, err);
+ return ret;
+ }
+
+ while (pvr_device_safety_irq_pending(pvr_dev)) {
+ pvr_device_safety_irq_clear(pvr_dev);
+ pvr_device_handle_safety_events(pvr_dev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ pvr_power_put(pvr_dev);
+ }
+
return ret;
}
static irqreturn_t pvr_device_irq_handler(int irq, void *data)
{
struct pvr_device *pvr_dev = data;
+ bool safety_irq_pending = false;
+
+ if (pvr_dev->has_safety_events)
+ safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
- if (!pvr_fw_irq_pending(pvr_dev))
+ if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
return IRQ_NONE; /* Spurious IRQ - ignore. */
- /* Mask the FW interrupts before waking up the thread. Will be unmasked
- * when the thread handler is done processing events.
- */
- pvr_fw_irq_disable(pvr_dev);
return IRQ_WAKE_THREAD;
}
+static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
+{
+ u32 num_ecc_rams = 0;
+
+ /*
+ * Safety events are an optional feature of the RogueXE platform. They
+ * are only enabled if at least one of ECC memory or the watchdog timer
+ * are present in HW. While safety events can be generated by other
+ * systems, that will never happen if the above mentioned hardware is
+ * not present.
+ */
+ if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
+ pvr_dev->has_safety_events = false;
+ return;
+ }
+
+ PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
+
+ pvr_dev->has_safety_events =
+ num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
+}
+
/**
* pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
* @pvr_dev: Target PowerVR device.
@@ -205,17 +319,25 @@ pvr_device_irq_init(struct pvr_device *pvr_dev)
init_waitqueue_head(&pvr_dev->kccb.rtn_q);
+ pvr_device_safety_irq_init(pvr_dev);
+
pvr_dev->irq = platform_get_irq(plat_dev, 0);
if (pvr_dev->irq < 0)
return pvr_dev->irq;
/* Clear any pending events before requesting the IRQ line. */
pvr_fw_irq_clear(pvr_dev);
- pvr_fw_irq_enable(pvr_dev);
+ if (pvr_dev->has_safety_events)
+ pvr_device_safety_irq_clear(pvr_dev);
+
+ /*
+ * The ONESHOT flag ensures IRQs are masked while the thread handler is
+ * running.
+ */
return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
pvr_device_irq_thread_handler,
- IRQF_SHARED, "gpu", pvr_dev);
+ IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
}
/**
@@ -509,6 +631,11 @@ pvr_device_init(struct pvr_device *pvr_dev)
if (err)
return err;
+ /* Get the reset line for the GPU */
+ err = pvr_device_reset_init(pvr_dev);
+ if (err)
+ return err;
+
/* Explicitly power the GPU so we can access control registers before the FW is booted. */
err = pm_runtime_resume_and_get(dev);
if (err)
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index 6d0dfacb677b..7cb01c38d2a9 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -18,6 +18,7 @@
#include <linux/bits.h>
#include <linux/compiler_attributes.h>
#include <linux/compiler_types.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -131,6 +132,22 @@ struct pvr_device {
*/
struct clk *mem_clk;
+ struct pvr_device_power {
+ struct device **domain_devs;
+ struct device_link **domain_links;
+
+ u32 domain_count;
+ } power;
+
+ /**
+ * @reset: Optional reset line.
+ *
+ * This may be used on some platforms to provide a reset line that needs to be de-asserted
+ * after power-up procedure. It would also need to be asserted after the power-down
+ * procedure.
+ */
+ struct reset_control *reset;
+
/** @irq: IRQ number. */
int irq;
@@ -300,6 +317,9 @@ struct pvr_device {
* struct pvr_file.
*/
spinlock_t ctx_list_lock;
+
+ /** @has_safety_events: Whether this device can raise safety events. */
+ bool has_safety_events;
};
/**
@@ -728,8 +748,22 @@ pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
__union_size, __member_size); \
})
-#define PVR_FW_PROCESSOR_TYPE_META 0
-#define PVR_FW_PROCESSOR_TYPE_MIPS 1
-#define PVR_FW_PROCESSOR_TYPE_RISCV 2
+/*
+ * These utility functions should more properly be placed in pvr_fw.h, but that
+ * would cause a dependency cycle between that header and this one. Since
+ * they're primarily used in pvr_device.c, let's put them in here for now.
+ */
+
+static __always_inline bool
+pvr_fw_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_dev->fw_dev.defs->irq_pending(pvr_dev);
+}
+
+static __always_inline void
+pvr_fw_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_dev->fw_dev.defs->irq_clear(pvr_dev);
+}
#endif /* PVR_DEVICE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index 0639502137b4..b058ec183bb3 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -44,6 +44,7 @@
* This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
*
* * AXE-1-16M (found in Texas Instruments AM62)
+ * * BXS-4-64 MC1 (found in Texas Instruments J721S2/AM68)
*/
/**
@@ -1411,6 +1412,10 @@ pvr_probe(struct platform_device *plat_dev)
platform_set_drvdata(plat_dev, drm_dev);
+ err = pvr_power_domains_init(pvr_dev);
+ if (err)
+ return err;
+
init_rwsem(&pvr_dev->reset_sem);
pvr_context_device_init(pvr_dev);
@@ -1450,6 +1455,8 @@ err_watchdog_fini:
err_context_fini:
pvr_context_device_fini(pvr_dev);
+ pvr_power_domains_fini(pvr_dev);
+
return err;
}
@@ -1470,9 +1477,17 @@ static void pvr_remove(struct platform_device *plat_dev)
pvr_watchdog_fini(pvr_dev);
pvr_queue_device_fini(pvr_dev);
pvr_context_device_fini(pvr_dev);
+ pvr_power_domains_fini(pvr_dev);
}
static const struct of_device_id dt_match[] = {
+ { .compatible = "img,img-rogue", .data = NULL },
+
+ /*
+ * This legacy compatible string was introduced early on before the more generic
+ * "img,img-rogue" was added. Keep it around here for compatibility, but never use
+ * "img,img-axe" in new devicetrees.
+ */
{ .compatible = "img,img-axe", .data = NULL },
{}
};
@@ -1498,3 +1513,4 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
+MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
index 5e51bc980751..5228e214491c 100644
--- a/drivers/gpu/drm/imagination/pvr_free_list.c
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -237,11 +237,10 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
u64 dma_pfn = dma_addr >>
ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
- u32 dma_addr_offset;
BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
- for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
+ for (u32 dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) {
WARN_ON_ONCE(dma_pfn >> 32);
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
index d09c4c684116..b2f8cba77346 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.c
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -50,9 +50,8 @@ pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == id)
return &layout_entries[entry];
}
@@ -65,9 +64,8 @@ pvr_fw_find_private_data(struct pvr_device *pvr_dev)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == META_PRIVATE_DATA ||
layout_entries[entry].id == MIPS_PRIVATE_DATA ||
layout_entries[entry].id == RISCV_PRIVATE_DATA)
@@ -97,7 +95,6 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
const u8 *fw = firmware->data;
u32 fw_offset = firmware->size - SZ_4K;
u32 layout_table_size;
- u32 entry;
if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE))
return -EINVAL;
@@ -144,7 +141,7 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
return -EINVAL;
layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
- for (entry = 0; entry < header->layout_entry_num; entry++) {
+ for (u32 entry = 0; entry < header->layout_entry_num; entry++) {
u32 start_addr = layout_entries[entry].base_addr;
u32 end_addr = start_addr + layout_entries[entry].alloc_size;
@@ -233,13 +230,12 @@ pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u32 end_addr = addr + size;
- int entry = 0;
/* Ensure requested range is not zero, and size is not causing addr to overflow. */
if (end_addr <= addr)
return -EINVAL;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (int entry = 0; entry < num_layout_entries; entry++) {
u32 entry_start_addr = layout_entries[entry].base_addr;
u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
@@ -441,6 +437,9 @@ fw_runtime_cfg_init(void *cpu_ptr, void *priv)
runtime_cfg->active_pm_latency_persistant = true;
WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
&runtime_cfg->default_dusts_num_init) != 0);
+
+ /* Keep watchdog timer disabled. */
+ runtime_cfg->wdg_period_us = 0;
}
static void
@@ -663,7 +662,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
return PTR_ERR(fw_code_ptr);
}
- if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) {
+ if (pvr_dev->fw_dev.defs->has_fixed_data_addr) {
u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
fw_data_ptr =
@@ -939,18 +938,22 @@ pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
int
pvr_fw_init(struct pvr_device *pvr_dev)
{
+ static const struct pvr_fw_defs *fw_defs[PVR_FW_PROCESSOR_TYPE_COUNT] = {
+ [PVR_FW_PROCESSOR_TYPE_META] = &pvr_fw_defs_meta,
+ [PVR_FW_PROCESSOR_TYPE_MIPS] = &pvr_fw_defs_mips,
+ [PVR_FW_PROCESSOR_TYPE_RISCV] = &pvr_fw_defs_riscv,
+ };
+
u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn);
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
int err;
- if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META)
- fw_dev->defs = &pvr_fw_defs_meta;
- else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS)
- fw_dev->defs = &pvr_fw_defs_mips;
- else
+ if (fw_dev->processor_type >= PVR_FW_PROCESSOR_TYPE_COUNT)
return -EINVAL;
+ fw_dev->defs = fw_defs[fw_dev->processor_type];
+
err = fw_dev->defs->init(pvr_dev);
if (err)
return err;
@@ -1456,6 +1459,15 @@ void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset,
*fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset);
}
+u64
+pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ return fw_dev->fw_heap_info.gpu_addr + fw_obj->fw_addr_offset;
+}
+
/*
* pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
* structures
diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h
index b7966bd574a9..1404dd492d7c 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.h
+++ b/drivers/gpu/drm/imagination/pvr_fw.h
@@ -167,47 +167,30 @@ struct pvr_fw_defs {
int (*wrapper_init)(struct pvr_device *pvr_dev);
/**
- * @has_fixed_data_addr:
+ * @irq_pending: Check interrupt status register for pending interrupts.
*
- * Called to check if firmware fixed data must be loaded at the address given by the
- * firmware layout table.
+ * @pvr_dev: Target PowerVR device.
*
* This function is mandatory.
+ */
+ bool (*irq_pending)(struct pvr_device *pvr_dev);
+
+ /**
+ * @irq_clear: Clear pending interrupts.
*
- * Returns:
- * * %true if firmware fixed data must be loaded at the address given by the firmware
- * layout table.
- * * %false otherwise.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function is mandatory.
*/
- bool (*has_fixed_data_addr)(void);
+ void (*irq_clear)(struct pvr_device *pvr_dev);
/**
- * @irq: FW Interrupt information.
+ * @has_fixed_data_addr: Specify whether the firmware fixed data must be loaded at the
+ * address given by the firmware layout table.
*
- * Those are processor dependent, and should be initialized by the
- * processor backend in pvr_fw_funcs::init().
+ * This value is mandatory.
*/
- struct {
- /** @enable_reg: FW interrupt enable register. */
- u32 enable_reg;
-
- /** @status_reg: FW interrupt status register. */
- u32 status_reg;
-
- /**
- * @clear_reg: FW interrupt clear register.
- *
- * If @status_reg == @clear_reg, we clear by write a bit to zero,
- * otherwise we clear by writing a bit to one.
- */
- u32 clear_reg;
-
- /** @event_mask: Bitmask of events to listen for. */
- u32 event_mask;
-
- /** @clear_mask: Value to write to the clear_reg in order to clear FW IRQs. */
- u32 clear_mask;
- } irq;
+ bool has_fixed_data_addr;
};
/**
@@ -400,26 +383,16 @@ struct pvr_fw_device {
} fw_objs;
};
-#define pvr_fw_irq_read_reg(pvr_dev, name) \
- pvr_cr_read32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg)
-
-#define pvr_fw_irq_write_reg(pvr_dev, name, value) \
- pvr_cr_write32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg, value)
-
-#define pvr_fw_irq_pending(pvr_dev) \
- (pvr_fw_irq_read_reg(pvr_dev, status) & (pvr_dev)->fw_dev.defs->irq.event_mask)
-
-#define pvr_fw_irq_clear(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, clear, (pvr_dev)->fw_dev.defs->irq.clear_mask)
-
-#define pvr_fw_irq_enable(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, enable, (pvr_dev)->fw_dev.defs->irq.event_mask)
-
-#define pvr_fw_irq_disable(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, enable, 0)
+enum pvr_fw_processor_type {
+ PVR_FW_PROCESSOR_TYPE_META = 0,
+ PVR_FW_PROCESSOR_TYPE_MIPS,
+ PVR_FW_PROCESSOR_TYPE_RISCV,
+ PVR_FW_PROCESSOR_TYPE_COUNT,
+};
extern const struct pvr_fw_defs pvr_fw_defs_meta;
extern const struct pvr_fw_defs pvr_fw_defs_mips;
+extern const struct pvr_fw_defs pvr_fw_defs_riscv;
int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev);
int pvr_fw_init(struct pvr_device *pvr_dev);
@@ -506,4 +479,18 @@ pvr_fw_object_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out)
pvr_fw_object_get_fw_addr_offset(fw_obj, 0, fw_addr_out);
}
+u64
+pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj);
+
+static __always_inline size_t
+pvr_fw_obj_get_object_size(struct pvr_fw_object *fw_obj)
+{
+ return pvr_gem_object_size(fw_obj->gem);
+}
+
+/* Util functions defined in pvr_fw_util.c. These are intended for use in pvr_fw_<arch>.c files. */
+int
+pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr);
+
#endif /* PVR_FW_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index 6d13864851fc..60db3668ad3c 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -370,13 +370,12 @@ configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr)
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u64 seg_out_addr_top;
- u32 i;
seg_out_addr_top =
ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV,
ROGUE_FW_SEGMMU_META_BIFDM_ID);
- for (i = 0; i < num_layout_entries; i++) {
+ for (u32 i = 0; i < num_layout_entries; i++) {
/*
* FW code is using the bootloader segment which is already
* configured on boot. FW coremem code and data don't use the
@@ -534,9 +533,17 @@ pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
}
static bool
-pvr_meta_has_fixed_data_addr(void)
+pvr_meta_irq_pending(struct pvr_device *pvr_dev)
{
- return false;
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS) &
+ ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN;
+}
+
+static void
+pvr_meta_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS,
+ ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK);
}
const struct pvr_fw_defs pvr_fw_defs_meta = {
@@ -546,12 +553,7 @@ const struct pvr_fw_defs pvr_fw_defs_meta = {
.vm_unmap = pvr_meta_vm_unmap,
.get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset,
.wrapper_init = pvr_meta_wrapper_init,
- .has_fixed_data_addr = pvr_meta_has_fixed_data_addr,
- .irq = {
- .enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE,
- .status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
- .clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
- .event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN,
- .clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK,
- },
+ .irq_pending = pvr_meta_irq_pending,
+ .irq_clear = pvr_meta_irq_clear,
+ .has_fixed_data_addr = false,
};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c
index 0bed0257e2ab..6914fc46db50 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c
@@ -8,7 +8,6 @@
#include "pvr_rogue_mips.h"
#include "pvr_vm_mips.h"
-#include <linux/elf.h>
#include <linux/err.h>
#include <linux/types.h>
@@ -16,60 +15,6 @@
#define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */
#define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M
-/**
- * process_elf_command_stream() - Process ELF firmware image and populate
- * firmware sections
- * @pvr_dev: Device pointer.
- * @fw: Pointer to firmware image.
- * @fw_code_ptr: Pointer to FW code section.
- * @fw_data_ptr: Pointer to FW data section.
- * @fw_core_code_ptr: Pointer to FW coremem code section.
- * @fw_core_data_ptr: Pointer to FW coremem data section.
- *
- * Returns :
- * * 0 on success, or
- * * -EINVAL on any error in ELF command stream.
- */
-static int
-process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
- u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
-{
- struct elf32_hdr *header = (struct elf32_hdr *)fw;
- struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
- struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 entry;
- int err;
-
- for (entry = 0; entry < header->e_phnum; entry++, program_header++) {
- void *write_addr;
-
- /* Only consider loadable entries in the ELF segment table */
- if (program_header->p_type != PT_LOAD)
- continue;
-
- err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
- program_header->p_memsz, fw_code_ptr, fw_data_ptr,
- fw_core_code_ptr, fw_core_data_ptr, &write_addr);
- if (err) {
- drm_err(drm_dev,
- "Addr 0x%x (size: %d) not found in any firmware segment",
- program_header->p_vaddr, program_header->p_memsz);
- return err;
- }
-
- /* Write to FW allocation only if available */
- if (write_addr) {
- memcpy(write_addr, fw + program_header->p_offset,
- program_header->p_filesz);
-
- memset((u8 *)write_addr + program_header->p_filesz, 0,
- program_header->p_memsz - program_header->p_filesz);
- }
- }
-
- return 0;
-}
-
static int
pvr_mips_init(struct pvr_device *pvr_dev)
{
@@ -97,11 +42,10 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
const struct pvr_fw_layout_entry *stack_entry;
struct rogue_mipsfw_boot_data *boot_data;
dma_addr_t dma_addr;
- u32 page_nr;
int err;
- err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
- fw_core_data_ptr);
+ err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr);
if (err)
return err;
@@ -132,7 +76,7 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
boot_data->reg_base = pvr_dev->regs_resource->start;
- for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
+ for (u32 page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
/* Firmware expects 4k pages, but host page size might be different. */
u32 src_page_nr = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) >> PAGE_SHIFT;
u32 page_offset = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) & ~PAGE_MASK;
@@ -228,9 +172,17 @@ pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
}
static bool
-pvr_mips_has_fixed_data_addr(void)
+pvr_mips_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS) &
+ ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN;
+}
+
+static void
+pvr_mips_irq_clear(struct pvr_device *pvr_dev)
{
- return true;
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
+ ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN);
}
const struct pvr_fw_defs pvr_fw_defs_mips = {
@@ -241,12 +193,7 @@ const struct pvr_fw_defs pvr_fw_defs_mips = {
.vm_unmap = pvr_vm_mips_unmap,
.get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset,
.wrapper_init = pvr_mips_wrapper_init,
- .has_fixed_data_addr = pvr_mips_has_fixed_data_addr,
- .irq = {
- .enable_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE,
- .status_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS,
- .clear_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
- .event_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN,
- .clear_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN,
- },
+ .irq_pending = pvr_mips_irq_pending,
+ .irq_clear = pvr_mips_irq_clear,
+ .has_fixed_data_addr = true,
};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_riscv.c b/drivers/gpu/drm/imagination/pvr_fw_riscv.c
new file mode 100644
index 000000000000..fc13d483be9a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_riscv.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_riscv.h"
+#include "pvr_vm.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_RISCV_SHIFT 25 /* 32 MB */
+#define ROGUE_FW_HEAP_RISCV_SIZE (1u << ROGUE_FW_HEAP_RISCV_SHIFT)
+
+static int
+pvr_riscv_wrapper_init(struct pvr_device *pvr_dev)
+{
+ const u64 common_opts =
+ ((u64)(ROGUE_FW_HEAP_RISCV_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
+ << ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT) |
+ ((u64)MMU_CONTEXT_MAPPING_FWPRIV
+ << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT);
+
+ u64 code_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.code_obj);
+ u64 data_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.data_obj);
+
+ /* This condition allows us to OR the addresses into the register directly. */
+ static_assert(ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT ==
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT);
+
+ WARN_ON(code_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
+ WARN_ON(data_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
+
+ pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_CODE),
+ code_addr | common_opts | ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN);
+
+ pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_DATA),
+ data_addr | common_opts |
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN);
+
+ /* Garten IDLE bit controlled by RISC-V. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG,
+ ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+ return 0;
+}
+
+struct rogue_riscv_fw_boot_data {
+ u64 coremem_code_dev_vaddr;
+ u64 coremem_data_dev_vaddr;
+ u32 coremem_code_fw_addr;
+ u32 coremem_data_fw_addr;
+ u32 coremem_code_size;
+ u32 coremem_data_size;
+ u32 flags;
+ u32 reserved;
+};
+
+static int
+pvr_riscv_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+ u32 core_code_alloc_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+ struct rogue_riscv_fw_boot_data *boot_data;
+ int err;
+
+ err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr);
+ if (err)
+ goto err_out;
+
+ boot_data = (struct rogue_riscv_fw_boot_data *)fw_data_ptr;
+
+ if (fw_mem->core_code_obj) {
+ boot_data->coremem_code_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_code_obj);
+ pvr_fw_object_get_fw_addr(fw_mem->core_code_obj, &boot_data->coremem_code_fw_addr);
+ boot_data->coremem_code_size = pvr_fw_obj_get_object_size(fw_mem->core_code_obj);
+ }
+
+ if (fw_mem->core_data_obj) {
+ boot_data->coremem_data_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_data_obj);
+ pvr_fw_object_get_fw_addr(fw_mem->core_data_obj, &boot_data->coremem_data_fw_addr);
+ boot_data->coremem_data_size = pvr_fw_obj_get_object_size(fw_mem->core_data_obj);
+ }
+
+ return 0;
+
+err_out:
+ return err;
+}
+
+static int
+pvr_riscv_init(struct pvr_device *pvr_dev)
+{
+ pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_RISCV_SHIFT, 0);
+
+ return 0;
+}
+
+static u32
+pvr_riscv_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+ u32 fw_addr = fw_obj->fw_addr_offset + offset;
+
+ /* RISC-V cacheability is determined by address. */
+ if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+ fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_UNCACHED_DATA);
+ else
+ fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_CACHED_DATA);
+
+ return fw_addr;
+}
+
+static int
+pvr_riscv_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
+ pvr_gem_object_size(pvr_obj));
+}
+
+static void
+pvr_riscv_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
+ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
+}
+
+static bool
+pvr_riscv_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_STATUS) &
+ ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN;
+}
+
+static void
+pvr_riscv_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_cr_write32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_CLEAR,
+ ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN);
+}
+
+const struct pvr_fw_defs pvr_fw_defs_riscv = {
+ .init = pvr_riscv_init,
+ .fw_process = pvr_riscv_fw_process,
+ .vm_map = pvr_riscv_vm_map,
+ .vm_unmap = pvr_riscv_vm_unmap,
+ .get_fw_addr_with_offset = pvr_riscv_get_fw_addr_with_offset,
+ .wrapper_init = pvr_riscv_wrapper_init,
+ .irq_pending = pvr_riscv_irq_pending,
+ .irq_clear = pvr_riscv_irq_clear,
+ .has_fixed_data_addr = false,
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.c b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
index 36cec227cfe3..dcbb9903e791 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_startstop.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
@@ -49,6 +49,14 @@ rogue_bif_init(struct pvr_device *pvr_dev)
pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV),
pc_addr);
+
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
+ pc_addr = (((u64)pc_dma_addr >> ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT)
+ << ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) &
+ ~ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK;
+
+ pvr_cr_write64(pvr_dev, FWCORE_MEM_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV), pc_addr);
+ }
}
static int
@@ -114,6 +122,9 @@ pvr_fw_start(struct pvr_device *pvr_dev)
(void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */
}
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV)
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 0);
+
/* Set Rogue in soft-reset. */
pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
if (has_reset2)
@@ -167,6 +178,12 @@ pvr_fw_start(struct pvr_device *pvr_dev)
/* ... and afterwards. */
udelay(3);
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
+ /* Boot the FW. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 1);
+ udelay(3);
+ }
+
return 0;
err_reset:
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 5dbb636d7d4f..a1098b521485 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -21,7 +21,6 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
{
struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
struct pvr_fw_trace *fw_trace = priv;
- u32 thread_nr;
tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
tracebuf_ctrl->tracebuf_flags = 0;
@@ -31,7 +30,7 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
else
tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -48,10 +47,9 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 thread_nr;
int err;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
trace_buffer->buf =
@@ -88,7 +86,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
ARRAY_SIZE(fw_trace->buffers));
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -99,7 +97,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
return 0;
err_free_buf:
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
if (trace_buffer->buf)
@@ -112,9 +110,8 @@ err_free_buf:
void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
@@ -122,8 +119,6 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
}
-#if defined(CONFIG_DEBUG_FS)
-
/**
* update_logtype() - Send KCCB command to trigger FW to update logtype
* @pvr_dev: Target PowerVR device
@@ -184,9 +179,7 @@ struct pvr_fw_trace_seq_data {
static u32 find_sfid(u32 id)
{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
+ for (u32 i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
if (stid_fmts[i].id == id)
return i;
}
@@ -285,12 +278,11 @@ static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
{
struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
- u32 i;
/* Reset trace index, then advance to *pos. */
fw_trace_get_first(trace_seq_data);
- for (i = 0; i < *pos; i++) {
+ for (u32 i = 0; i < *pos; i++) {
if (!fw_trace_get_next(trace_seq_data))
return NULL;
}
@@ -447,7 +439,7 @@ static const struct file_operations pvr_fw_trace_fops = {
void
pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask)
{
- if (old_mask != new_mask)
+ if (IS_ENABLED(CONFIG_DEBUG_FS) && old_mask != new_mask)
update_logtype(pvr_dev, new_mask);
}
@@ -455,12 +447,14 @@ void
pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
"The filename buffer is only large enough for a single-digit thread count");
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
char filename[8];
snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
@@ -469,4 +463,3 @@ pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
&pvr_fw_trace_fops);
}
}
-#endif
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.h b/drivers/gpu/drm/imagination/pvr_fw_trace.h
index 0074d2b18da0..1d0ef937427a 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.h
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.h
@@ -65,7 +65,6 @@ struct pvr_fw_trace {
int pvr_fw_trace_init(struct pvr_device *pvr_dev);
void pvr_fw_trace_fini(struct pvr_device *pvr_dev);
-#if defined(CONFIG_DEBUG_FS)
/* Forward declaration from <linux/dcache.h>. */
struct dentry;
@@ -73,6 +72,5 @@ void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask,
u32 new_mask);
void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
-#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* PVR_FW_TRACE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_util.c b/drivers/gpu/drm/imagination/pvr_fw_util.c
new file mode 100644
index 000000000000..377fe72d86b8
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_util.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include <linux/elf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/**
+ * pvr_fw_process_elf_command_stream() - Process ELF firmware image and populate
+ * firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ *
+ * Returns :
+ * * 0 on success, or
+ * * -EINVAL on any error in ELF command stream.
+ */
+int
+pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr,
+ u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
+{
+ struct elf32_hdr *header = (struct elf32_hdr *)fw;
+ struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ int err;
+
+ for (u32 entry = 0; entry < header->e_phnum; entry++, program_header++) {
+ void *write_addr;
+
+ /* Only consider loadable entries in the ELF segment table */
+ if (program_header->p_type != PT_LOAD)
+ continue;
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
+ program_header->p_memsz, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ program_header->p_vaddr, program_header->p_memsz);
+ return err;
+ }
+
+ /* Write to FW allocation only if available */
+ if (write_addr) {
+ memcpy(write_addr, fw + program_header->p_offset,
+ program_header->p_filesz);
+
+ memset((u8 *)write_addr + program_header->p_filesz, 0,
+ program_header->p_memsz - program_header->p_filesz);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index 6a8c81fe8c1e..a66cf082af24 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -19,6 +19,7 @@
#include <linux/log2.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#include <linux/property.h>
#include <linux/refcount.h>
#include <linux/scatterlist.h>
@@ -76,8 +77,6 @@ pvr_gem_object_flags_validate(u64 flags)
DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
};
- int i;
-
/*
* Check for bits set in undefined regions. Reserved regions refer to
* options that can only be set by the kernel. These are explicitly
@@ -91,7 +90,7 @@ pvr_gem_object_flags_validate(u64 flags)
* Check for all combinations of flags marked as invalid in the array
* above.
*/
- for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
+ for (int i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
u64 combo = invalid_combinations[i];
if ((flags & combo) == combo)
@@ -203,7 +202,7 @@ pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
dma_resv_lock(obj->resv, NULL);
- err = drm_gem_shmem_vmap(shmem_obj, &map);
+ err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
if (err)
goto err_unlock;
@@ -257,7 +256,7 @@ pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
}
- drm_gem_shmem_vunmap(shmem_obj, &map);
+ drm_gem_shmem_vunmap_locked(shmem_obj, &map);
dma_resv_unlock(obj->resv);
}
@@ -336,6 +335,7 @@ struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t
struct pvr_gem_object *
pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
struct drm_gem_shmem_object *shmem_obj;
struct pvr_gem_object *pvr_obj;
struct sg_table *sgt;
@@ -345,7 +345,10 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
if (size == 0 || !pvr_gem_object_flags_validate(flags))
return ERR_PTR(-EINVAL);
- shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size);
+ if (device_get_dma_attr(drm_dev->dev) == DEV_DMA_COHERENT)
+ flags |= PVR_BO_CPU_CACHED;
+
+ shmem_obj = drm_gem_shmem_create(drm_dev, size);
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
@@ -360,8 +363,7 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
goto err_shmem_object_free;
}
- dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt,
- DMA_BIDIRECTIONAL);
+ dma_sync_sgtable_for_device(drm_dev->dev, sgt, DMA_BIDIRECTIONAL);
/*
* Do this last because pvr_gem_object_zero() requires a fully
diff --git a/drivers/gpu/drm/imagination/pvr_gem.h b/drivers/gpu/drm/imagination/pvr_gem.h
index e0e5ea509a2e..c99f30cc6208 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.h
+++ b/drivers/gpu/drm/imagination/pvr_gem.h
@@ -44,8 +44,10 @@ struct pvr_file;
* Bits not defined anywhere are "undefined".
*
* CPU mapping options
- * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this
- * flag to override this behaviour and map the object cached.
+ * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set
+ * this flag to override this behaviour and map the object cached. If the dma_coherent
+ * property is present in devicetree, all allocations will be mapped as if this flag was set.
+ * This does not require any additional consideration at allocation time.
*
* Firmware options
* :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c
index 54f88d6c01e5..dc0c25fa1847 100644
--- a/drivers/gpu/drm/imagination/pvr_hwrt.c
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.c
@@ -44,13 +44,12 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
{
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
int err;
- int i;
hwrt->pvr_dev = pvr_dev;
hwrt->max_rts = args->layers;
/* Get pointers to the free lists */
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file, args->free_list_handles[i]);
if (!hwrt->free_lists[i]) {
err = -EINVAL;
@@ -67,7 +66,7 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
return 0;
err_put_free_lists:
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -78,9 +77,7 @@ err_put_free_lists:
static void
hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -363,13 +360,12 @@ hwrt_data_init_fw_structure(struct pvr_file *pvr_file,
struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
struct rogue_fwif_rta_ctl *rta_ctl;
- int free_list_i;
int err;
pvr_fw_object_get_fw_addr(hwrt->common_fw_obj,
&hwrt_data->data.hwrt_data_common_fw_addr);
- for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
+ for (int free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
pvr_fw_object_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj,
&hwrt_data->data.freelists_fw_addr[free_list_i]);
}
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c
index 4fe70610ed94..450d476d183f 100644
--- a/drivers/gpu/drm/imagination/pvr_mmu.c
+++ b/drivers/gpu/drm/imagination/pvr_mmu.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/kmemleak.h>
#include <linux/minmax.h>
+#include <linux/property.h>
#include <linux/sizes.h>
#define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_))
@@ -259,6 +260,7 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
struct device *dev = from_pvr_device(pvr_dev)->dev;
struct page *raw_page;
+ pgprot_t prot;
int err;
dma_addr_t dma_addr;
@@ -268,7 +270,11 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
if (!raw_page)
return -ENOMEM;
- host_ptr = vmap(&raw_page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ prot = PAGE_KERNEL;
+ if (device_get_dma_attr(dev) != DEV_DMA_COHERENT)
+ prot = pgprot_writecombine(prot);
+
+ host_ptr = vmap(&raw_page, 1, VM_MAP, prot);
if (!host_ptr) {
err = -ENOMEM;
goto err_free_page;
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index ba7816fd28ec..41f5d89e78b8 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -10,11 +10,15 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -252,6 +256,8 @@ pvr_power_device_suspend(struct device *dev)
clk_disable_unprepare(pvr_dev->sys_clk);
clk_disable_unprepare(pvr_dev->core_clk);
+ err = reset_control_assert(pvr_dev->reset);
+
err_drm_dev_exit:
drm_dev_exit(idx);
@@ -282,16 +288,33 @@ pvr_power_device_resume(struct device *dev)
if (err)
goto err_sys_clk_disable;
+ /*
+ * According to the hardware manual, a delay of at least 32 clock
+ * cycles is required between de-asserting the clkgen reset and
+ * de-asserting the GPU reset. Assuming a worst-case scenario with
+ * a very high GPU clock frequency, a delay of 1 microsecond is
+ * sufficient to ensure this requirement is met across all
+ * feasible GPU clock speeds.
+ */
+ udelay(1);
+
+ err = reset_control_deassert(pvr_dev->reset);
+ if (err)
+ goto err_mem_clk_disable;
+
if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_enable(pvr_dev);
if (err)
- goto err_mem_clk_disable;
+ goto err_reset_assert;
}
drm_dev_exit(idx);
return 0;
+err_reset_assert:
+ reset_control_assert(pvr_dev->reset);
+
err_mem_clk_disable:
clk_disable_unprepare(pvr_dev->mem_clk);
@@ -431,3 +454,114 @@ pvr_watchdog_fini(struct pvr_device *pvr_dev)
{
cancel_delayed_work_sync(&pvr_dev->watchdog.work);
}
+
+int pvr_power_domains_init(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+ struct device_link **domain_links __free(kfree) = NULL;
+ struct device **domain_devs __free(kfree) = NULL;
+ int domain_count;
+ int link_count;
+
+ char dev_name[2] = "a";
+ int err;
+ int i;
+
+ domain_count = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (domain_count < 0)
+ return domain_count;
+
+ if (domain_count <= 1)
+ return 0;
+
+ link_count = domain_count + (domain_count - 1);
+
+ domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL);
+ if (!domain_devs)
+ return -ENOMEM;
+
+ domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL);
+ if (!domain_links)
+ return -ENOMEM;
+
+ for (i = 0; i < domain_count; i++) {
+ struct device *domain_dev;
+
+ dev_name[0] = 'a' + i;
+ domain_dev = dev_pm_domain_attach_by_name(dev, dev_name);
+ if (IS_ERR_OR_NULL(domain_dev)) {
+ err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV;
+ goto err_detach;
+ }
+
+ domain_devs[i] = domain_dev;
+ }
+
+ for (i = 0; i < domain_count; i++) {
+ struct device_link *link;
+
+ link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ err = -ENODEV;
+ goto err_unlink;
+ }
+
+ domain_links[i] = link;
+ }
+
+ for (i = domain_count; i < link_count; i++) {
+ struct device_link *link;
+
+ link = device_link_add(domain_devs[i - domain_count + 1],
+ domain_devs[i - domain_count],
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ err = -ENODEV;
+ goto err_unlink;
+ }
+
+ domain_links[i] = link;
+ }
+
+ pvr_dev->power = (struct pvr_device_power){
+ .domain_devs = no_free_ptr(domain_devs),
+ .domain_links = no_free_ptr(domain_links),
+ .domain_count = domain_count,
+ };
+
+ return 0;
+
+err_unlink:
+ while (--i >= 0)
+ device_link_del(domain_links[i]);
+
+ i = domain_count;
+
+err_detach:
+ while (--i >= 0)
+ dev_pm_domain_detach(domain_devs[i], true);
+
+ return err;
+}
+
+void pvr_power_domains_fini(struct pvr_device *pvr_dev)
+{
+ const int domain_count = pvr_dev->power.domain_count;
+
+ int i = domain_count + (domain_count - 1);
+
+ while (--i >= 0)
+ device_link_del(pvr_dev->power.domain_links[i]);
+
+ i = domain_count;
+
+ while (--i >= 0)
+ dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true);
+
+ kfree(pvr_dev->power.domain_links);
+ kfree(pvr_dev->power.domain_devs);
+
+ pvr_dev->power = (struct pvr_device_power){ 0 };
+}
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
index 9a9312dcb2da..ada85674a7ca 100644
--- a/drivers/gpu/drm/imagination/pvr_power.h
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -38,4 +38,7 @@ pvr_power_put(struct pvr_device *pvr_dev)
return pm_runtime_put(drm_dev->dev);
}
+int pvr_power_domains_init(struct pvr_device *pvr_dev);
+void pvr_power_domains_fini(struct pvr_device *pvr_dev);
+
#endif /* PVR_POWER_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
index 2a90d02796d3..790c97f80a2a 100644
--- a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
@@ -827,6 +827,120 @@
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U
+/* Register ROGUE_CR_EVENT_CLEAR */
+#define ROGUE_CR_EVENT_CLEAR 0x0138U
+#define ROGUE_CR_EVENT_CLEAR__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL
+#define ROGUE_CR_EVENT_CLEAR__SIGNALS__MASKFULL 0x00000000E007FFFFULL
+#define ROGUE_CR_EVENT_CLEAR_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT 31U
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN 0x80000000U
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT 30U
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN 0x40000000U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT 29U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT 28U
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN 0x10000000U
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT 27U
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN 0x08000000U
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT 26U
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN 0x04000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT 25U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN 0x02000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT 24U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN 0x01000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT 23U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN 0x00800000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT 22U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN 0x00400000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT 21U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN 0x00200000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT 20U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN 0x00100000U
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_SHIFT 20U
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_EN 0x00100000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT 19U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN 0x00080000U
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_SHIFT 19U
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_EN 0x00080000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT 17U
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_EN 0x00020000U
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT 17U
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT 16U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN 0x00010000U
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT 15U
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK 0xFFFF7FFFU
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_EN 0x00008000U
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT 14U
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_EN 0x00004000U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_SHIFT 13U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_EN 0x00002000U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_SHIFT 12U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_EN 0x00001000U
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_SHIFT 11U
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_EN 0x00000800U
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT 10U
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_EN 0x00000400U
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT 9U
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN 0x00000200U
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT 8U
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN 0x00000100U
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT 7U
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN 0x00000080U
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT 6U
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_EN 0x00000040U
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_SHIFT 5U
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_EN 0x00000020U
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT 4U
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN 0x00000010U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT 3U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN 0x00000008U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT 2U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN 0x00000004U
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT 1U
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_EN 0x00000002U
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT 0U
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_EN 0x00000001U
+
/* Register ROGUE_CR_TIMER */
#define ROGUE_CR_TIMER 0x0160U
#define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL
@@ -6031,25 +6145,6 @@
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
-/* Register ROGUE_CR_ECC_RAM_ERR_INJ */
-#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U
-
/* Register ROGUE_CR_ECC_RAM_INIT_KICK */
#define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U
#define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL
@@ -6163,6 +6258,26 @@
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+/* Register ROGUE_CR_FAULT_FW_STATUS */
+#define ROGUE_CR_FAULT_FW_STATUS 0xF3B0U
+#define ROGUE_CR_FAULT_FW_STATUS_MASKFULL 0x0000000000010001ULL
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT 16U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_EN 0x00010000U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT 0U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_EN 0x00000001U
+
+/* Register ROGUE_CR_FAULT_FW_CLEAR */
+#define ROGUE_CR_FAULT_FW_CLEAR 0xF3B8U
+#define ROGUE_CR_FAULT_FW_CLEAR_MASKFULL 0x0000000000010001ULL
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT 16U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN 0x00010000U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT 0U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_EN 0x00000001U
+
/* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_riscv.h b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h
new file mode 100644
index 000000000000..9a070e24fa6a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_RISCV_H
+#define PVR_ROGUE_RISCV_H
+
+#include "pvr_rogue_cr_defs.h"
+
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define ROGUE_RISCVFW_REGION_SIZE SZ_256M
+#define ROGUE_RISCVFW_REGION_SHIFT __ffs(ROGUE_RISCVFW_REGION_SIZE)
+
+enum rogue_riscvfw_region {
+ ROGUE_RISCV_REGION__RESERVED_0 = 0,
+ ROGUE_RISCV_REGION__RESERVED_1,
+ ROGUE_RISCV_REGION_SOCIF,
+ ROGUE_RISCV_REGION__RESERVED_3,
+ ROGUE_RISCV_REGION__RESERVED_4,
+ ROGUE_RISCV_REGION_BOOTLDR_DATA,
+ ROGUE_RISCV_REGION_SHARED_CACHED_DATA,
+ ROGUE_RISCV_REGION__RESERVED_7,
+ ROGUE_RISCV_REGION_COREMEM,
+ ROGUE_RISCV_REGION__RESERVED_9,
+ ROGUE_RISCV_REGION__RESERVED_A,
+ ROGUE_RISCV_REGION__RESERVED_B,
+ ROGUE_RISCV_REGION_BOOTLDR_CODE,
+ ROGUE_RISCV_REGION_SHARED_UNCACHED_DATA,
+ ROGUE_RISCV_REGION__RESERVED_E,
+ ROGUE_RISCV_REGION__RESERVED_F,
+
+ ROGUE_RISCV_REGION__COUNT,
+};
+
+#define ROGUE_RISCVFW_REGION_BASE(r) ((u32)(ROGUE_RISCV_REGION_##r) << ROGUE_RISCVFW_REGION_SHIFT)
+#define ROGUE_RISCVFW_REGION_REMAP_CR(r) \
+ (ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 + (u32)(ROGUE_RISCV_REGION_##r) * 8U)
+
+#endif /* PVR_ROGUE_RISCV_H */
diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c
index 975336a4facf..679aa618b7a9 100644
--- a/drivers/gpu/drm/imagination/pvr_stream.c
+++ b/drivers/gpu/drm/imagination/pvr_stream.c
@@ -67,9 +67,8 @@ pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *st
u8 *dest, u32 dest_size, u32 *stream_offset_out)
{
int err = 0;
- u32 i;
- for (i = 0; i < nr_entries; i++) {
+ for (u32 i = 0; i < nr_entries; i++) {
if (stream_def[i].offset >= dest_size) {
err = -EINVAL;
break;
@@ -131,7 +130,6 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX];
u32 ext_header;
int err = 0;
- u32 i;
/* Copy "must have" mask from device. We clear this as we process the stream. */
memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type],
@@ -159,7 +157,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
musthave_masks[type] &= ~data;
- for (i = 0; i < header->ext_streams_num; i++) {
+ for (u32 i = 0; i < header->ext_streams_num; i++) {
const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i];
if (!(ext_header & ext_def->header_mask))
@@ -181,7 +179,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
* Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks
* for this command was not present.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (musthave_masks[i])
return -EINVAL;
}
@@ -245,13 +243,11 @@ pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs
if (err)
return err;
} else {
- u32 i;
-
/*
* If we don't have an extension stream then there must not be any "must have"
* quirks for this command.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i])
return -EINVAL;
}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index 94af854547d6..5847a1c92bea 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -100,10 +100,9 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
- int page_nr;
vunmap(mips_data->pt);
- for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
+ for (int page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
dma_unmap_page(from_pvr_device(pvr_dev)->dev,
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 9e66eb77b1eb..6d8325c76697 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -162,11 +162,12 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
}
static int imx_pd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
- return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, imxpd->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 20b93fff0239..f851e9ffdb28 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -791,11 +791,12 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
}
static int ingenic_drm_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
- struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder);
+ struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(encoder);
- return drm_bridge_attach(bridge->encoder, ib->next_bridge,
+ return drm_bridge_attach(encoder, ib->next_bridge,
&ib->bridge, flags);
}
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 9bb997dbb4b9..5deec673c11e 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
mapping_set_unevictable(mapping);
}
@@ -195,7 +195,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_vmap(&bo->base, map);
+ return drm_gem_shmem_vmap_locked(&bo->base, map);
}
static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 825135a26aa4..7934098e651b 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else {
buffer_chunk->size = lima_bo_size(bo);
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
kvfree(et);
goto out;
@@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
}
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 395449a72f0a..a3423459dd7a 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1048,6 +1048,7 @@ void mcde_dsi_disable(struct drm_bridge *bridge)
}
static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
@@ -1059,7 +1060,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the DSI bridge to the output (panel etc) bridge */
- return drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
+ return drm_bridge_attach(encoder, d->bridge_out, bridge, flags);
}
static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
@@ -1137,7 +1138,6 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
d->bridge_out = bridge;
/* Create a bridge for this DSI channel */
- d->bridge.funcs = &mcde_dsi_bridge_funcs;
d->bridge.of_node = dev->of_node;
drm_bridge_add(&d->bridge);
@@ -1173,9 +1173,9 @@ static int mcde_dsi_probe(struct platform_device *pdev)
u32 dsi_id;
int ret;
- d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
+ d = devm_drm_bridge_alloc(dev, struct mcde_dsi, bridge, &mcde_dsi_bridge_funcs);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
d->dev = dev;
platform_set_drvdata(pdev, d);
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 32a2ed6c0cfe..43afd0a26d14 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -21,10 +21,8 @@ mediatek-drm-y := mtk_crtc.o \
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
-mediatek-drm-hdmi-objs := mtk_cec.o \
- mtk_hdmi.o \
- mtk_hdmi_ddc.o
-
-obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_cec.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi_ddc.o
obj-$(CONFIG_DRM_MEDIATEK_DP) += mtk_dp.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index b42c0d87eba3..c7be530ca041 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include "mtk_cec.h"
-#include "mtk_hdmi.h"
#include "mtk_drm_drv.h"
#define TR_CONFIG 0x00
@@ -102,6 +101,7 @@ void mtk_cec_set_hpd_event(struct device *dev,
cec->hpd_event = hpd_event;
spin_unlock_irqrestore(&cec->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(mtk_cec_set_hpd_event, "DRM_MTK_HDMI_V1");
bool mtk_cec_hpd_high(struct device *dev)
{
@@ -112,6 +112,7 @@ bool mtk_cec_hpd_high(struct device *dev)
return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
}
+EXPORT_SYMBOL_NS_GPL(mtk_cec_hpd_high, "DRM_MTK_HDMI_V1");
static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
{
@@ -247,3 +248,7 @@ struct platform_driver mtk_cec_driver = {
.of_match_table = mtk_cec_of_ids,
},
};
+module_platform_driver(mtk_cec_driver);
+
+MODULE_DESCRIPTION("MediaTek HDMI CEC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index fa0e95dd29a0..fe97bb97e004 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -492,11 +492,6 @@ static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = {
{ /* sentinel */ }
};
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node,
enum mtk_ovl_adaptor_comp_type *ctype)
{
@@ -567,7 +562,7 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
priv->ovl_adaptor_comp[id] = &comp_pdev->dev;
- drm_of_component_match_add(dev, match, compare_of, node);
+ drm_of_component_match_add(dev, match, component_compare_of, node);
dev_dbg(dev, "Adding component match for %pOF\n", node);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index fed3307d3374..b2408abb9d49 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2287,6 +2287,7 @@ static void mtk_dp_poweroff(struct mtk_dp *mtk_dp)
}
static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
@@ -2310,7 +2311,7 @@ static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
goto err_aux_register;
if (mtk_dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge,
+ ret = drm_bridge_attach(encoder, mtk_dp->next_bridge,
&mtk_dp->bridge, flags);
if (ret) {
drm_warn(mtk_dp->drm_dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0fd13e6dd3f1..6fb85bc6487a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -59,7 +59,8 @@ enum mtk_dpi_out_channel_swap {
enum mtk_dpi_out_color_format {
MTK_DPI_COLOR_FORMAT_RGB,
- MTK_DPI_COLOR_FORMAT_YCBCR_422
+ MTK_DPI_COLOR_FORMAT_YCBCR_422,
+ MTK_DPI_COLOR_FORMAT_YCBCR_444
};
struct mtk_dpi {
@@ -450,9 +451,16 @@ static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
enum mtk_dpi_out_color_format format)
{
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+ mtk_dpi_config_channel_swap(dpi, dpi->channel_swap);
- if (format == MTK_DPI_COLOR_FORMAT_YCBCR_422) {
+ switch (format) {
+ case MTK_DPI_COLOR_FORMAT_YCBCR_444:
+ mtk_dpi_config_yuv422_enable(dpi, false);
+ mtk_dpi_config_csc_enable(dpi, true);
+ if (dpi->conf->swap_input_support)
+ mtk_dpi_config_swap_input(dpi, false);
+ break;
+ case MTK_DPI_COLOR_FORMAT_YCBCR_422:
mtk_dpi_config_yuv422_enable(dpi, true);
mtk_dpi_config_csc_enable(dpi, true);
@@ -463,11 +471,14 @@ static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
mtk_dpi_mask(dpi, DPI_MATRIX_SET, dpi->mode.hdisplay <= 720 ?
MATRIX_SEL_RGB_TO_BT601 : MATRIX_SEL_RGB_TO_JPEG,
INT_MATRIX_SEL_MASK);
- } else {
+ break;
+ default:
+ case MTK_DPI_COLOR_FORMAT_RGB:
mtk_dpi_config_yuv422_enable(dpi, false);
mtk_dpi_config_csc_enable(dpi, false);
if (dpi->conf->swap_input_support)
mtk_dpi_config_swap_input(dpi, false);
+ break;
}
}
@@ -734,6 +745,65 @@ static u32 *mtk_dpi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+static unsigned int mtk_dpi_bus_fmt_bit_num(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ return MTK_DPI_OUT_BIT_NUM_8BITS;
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_OUT_BIT_NUM_10BITS;
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_OUT_BIT_NUM_12BITS;
+ }
+}
+
+static unsigned int mtk_dpi_bus_fmt_channel_swap(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_OUT_CHANNEL_SWAP_BGR;
+ }
+}
+
+static unsigned int mtk_dpi_bus_fmt_color_format(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ return MTK_DPI_COLOR_FORMAT_RGB;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_COLOR_FORMAT_YCBCR_422;
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_COLOR_FORMAT_YCBCR_444;
+ }
+}
+
static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -753,18 +823,16 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
bridge_state->output_bus_cfg.format);
dpi->output_fmt = out_bus_format;
- dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
- dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ dpi->bit_num = mtk_dpi_bus_fmt_bit_num(out_bus_format);
+ dpi->channel_swap = mtk_dpi_bus_fmt_channel_swap(out_bus_format);
dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
- if (out_bus_format == MEDIA_BUS_FMT_YUYV8_1X16)
- dpi->color_format = MTK_DPI_COLOR_FORMAT_YCBCR_422;
- else
- dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
+ dpi->color_format = mtk_dpi_bus_fmt_color_format(out_bus_format);
return 0;
}
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
@@ -783,7 +851,7 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
"Failed to get bridge\n");
}
- return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
+ return drm_bridge_attach(encoder, dpi->next_bridge,
&dpi->bridge, flags);
}
@@ -1026,9 +1094,29 @@ static const u32 mt8183_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_2X12_BE,
};
-static const u32 mt8195_output_fmts[] = {
+static const u32 mt8195_dpi_output_fmts[] = {
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB101010_1X30,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUV10_1X30,
+};
+
+static const u32 mt8195_dp_intf_output_fmts[] = {
+ MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB101010_1X30,
MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUV10_1X30,
};
static const struct mtk_dpi_factor dpi_factor_mt2701[] = {
@@ -1141,8 +1229,8 @@ static const struct mtk_dpi_conf mt8192_conf = {
static const struct mtk_dpi_conf mt8195_conf = {
.max_clock_khz = 594000,
- .output_fmts = mt8183_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
+ .output_fmts = mt8195_dpi_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8195_dpi_output_fmts),
.pixels_per_iter = 1,
.is_ck_de_pol = true,
.swap_input_support = true,
@@ -1161,8 +1249,8 @@ static const struct mtk_dpi_conf mt8195_dpintf_conf = {
.dpi_factor = dpi_factor_mt8195_dp_intf,
.num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8195_dp_intf),
.max_clock_khz = 600000,
- .output_fmts = mt8195_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
+ .output_fmts = mt8195_dp_intf_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8195_dp_intf_output_fmts),
.pixels_per_iter = 4,
.dimension_mask = DPINTF_HPW_MASK,
.hvsize_mask = DPINTF_HSIZE_MASK,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 74158b9d6503..7c0c12dde488 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -470,7 +470,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = drmm_mode_config_init(drm);
if (ret)
- goto put_mutex_dev;
+ return ret;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
@@ -488,8 +488,11 @@ static int mtk_drm_kms_init(struct drm_device *drm)
for (i = 0; i < private->data->mmsys_dev_num; i++) {
drm->dev_private = private->all_drm_private[i];
ret = component_bind_all(private->all_drm_private[i]->dev, drm);
- if (ret)
- goto put_mutex_dev;
+ if (ret) {
+ while (--i >= 0)
+ component_unbind_all(private->all_drm_private[i]->dev, drm);
+ return ret;
+ }
}
/*
@@ -582,9 +585,6 @@ static int mtk_drm_kms_init(struct drm_device *drm)
err_component_unbind:
for (i = 0; i < private->data->mmsys_dev_num; i++)
component_unbind_all(private->all_drm_private[i]->dev, drm);
-put_mutex_dev:
- for (i = 0; i < private->data->mmsys_dev_num; i++)
- put_device(private->all_drm_private[i]->mutex_dev);
return ret;
}
@@ -655,8 +655,10 @@ static int mtk_drm_bind(struct device *dev)
return 0;
drm = drm_dev_alloc(&mtk_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ if (IS_ERR(drm)) {
+ ret = PTR_ERR(drm);
+ goto err_put_dev;
+ }
private->drm_master = true;
drm->dev_private = private;
@@ -682,18 +684,31 @@ err_free:
drm_dev_put(drm);
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = NULL;
+err_put_dev:
+ for (i = 0; i < private->data->mmsys_dev_num; i++) {
+ /* For device_find_child in mtk_drm_get_all_priv() */
+ put_device(private->all_drm_private[i]->dev);
+ }
+ put_device(private->mutex_dev);
return ret;
}
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
+ int i;
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
+
+ for (i = 0; i < private->data->mmsys_dev_num; i++) {
+ /* For device_find_child in mtk_drm_get_all_priv() */
+ put_device(private->all_drm_private[i]->dev);
+ }
+ put_device(private->mutex_dev);
}
private->mtk_drm_bound = false;
private->drm_master = false;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d1f407fb7eb1..4fe1f38a3c4b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -807,12 +807,13 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
static int mtk_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge,
&dsi->bridge, flags);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 06e4fac152b7..8803cd4a8bc9 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -31,7 +31,6 @@
#include <drm/drm_probe_helper.h>
#include "mtk_cec.h"
-#include "mtk_hdmi.h"
#include "mtk_hdmi_regs.h"
#define NCTS_BYTES 7
@@ -165,7 +164,7 @@ struct mtk_hdmi {
bool dvi_mode;
struct regmap *sys_regmap;
unsigned int sys_offset;
- void __iomem *regs;
+ struct regmap *regs;
struct platform_device *audio_pdev;
struct hdmi_audio_param aud_param;
bool audio_enable;
@@ -181,50 +180,10 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
return container_of(b, struct mtk_hdmi, bridge);
}
-static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
-{
- return readl(hdmi->regs + offset);
-}
-
-static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
-{
- writel(val, hdmi->regs + offset);
-}
-
-static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp &= ~bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp |= bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- writel(tmp, reg);
-}
-
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
- mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
- VIDEO_SOURCE_SEL);
+ regmap_update_bits(hdmi->regs, VIDEO_SOURCE_SEL,
+ VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH);
}
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
@@ -259,12 +218,12 @@ static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+ regmap_set_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+ regmap_clear_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
@@ -273,25 +232,25 @@ static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
HDMI_RST, HDMI_RST);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
HDMI_RST, 0);
- mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
+ regmap_clear_bits(hdmi->regs, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
ANLG_ON, ANLG_ON);
}
static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
{
- mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
- CFG2_NOTICE_EN);
+ regmap_update_bits(hdmi->regs, GRL_CFG2, CFG2_NOTICE_EN,
+ enable_notice ? CFG2_NOTICE_EN : 0);
}
static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
{
- mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
+ regmap_write(hdmi->regs, GRL_INT_MASK, int_mask);
}
static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
+ regmap_update_bits(hdmi->regs, GRL_CFG1, CFG1_DVI, enable ? CFG1_DVI : 0);
}
static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
@@ -337,22 +296,22 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
dev_err(hdmi->dev, "Unknown infoframe type %d\n", frame_type);
return;
}
- mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
+ regmap_clear_bits(hdmi->regs, ctrl_reg, ctrl_frame_en);
+ regmap_write(hdmi->regs, GRL_INFOFRM_TYPE, frame_type);
+ regmap_write(hdmi->regs, GRL_INFOFRM_VER, frame_ver);
+ regmap_write(hdmi->regs, GRL_INFOFRM_LNG, frame_len);
- mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
+ regmap_write(hdmi->regs, GRL_IFM_PORT, checksum);
for (i = 0; i < frame_len; i++)
- mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
+ regmap_write(hdmi->regs, GRL_IFM_PORT, frame_data[i]);
- mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
+ regmap_set_bits(hdmi->regs, ctrl_reg, ctrl_frame_en);
}
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
- AUDIO_PACKET_OFF);
+ regmap_update_bits(hdmi->regs, AUDIO_PACKET_OFF,
+ GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF);
}
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
@@ -373,44 +332,44 @@ static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ regmap_clear_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE);
usleep_range(2000, 4000);
- mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ regmap_set_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE);
}
static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
- CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_EN);
usleep_range(2000, 4000);
- mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
- CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_SET);
}
static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
{
- mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
- CTS_CTRL_SOFT);
+ regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, CTS_CTRL_SOFT,
+ on ? 0 : CTS_CTRL_SOFT);
}
static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
- NCTS_WRI_ANYTIME);
+ regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, NCTS_WRI_ANYTIME,
+ enable ? NCTS_WRI_ANYTIME : 0);
}
static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
- mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
+ regmap_clear_bits(hdmi->regs, GRL_CFG4, CFG4_MHL_MODE);
if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
mode->clock == 74250 &&
mode->vdisplay == 1080)
- mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL);
else
- mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ regmap_set_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL);
}
static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
@@ -438,7 +397,7 @@ static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
swap_bit = LFE_CC_SWAP;
break;
}
- mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
+ regmap_update_bits(hdmi->regs, GRL_CH_SWAP, 0xff, swap_bit);
}
static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
@@ -459,7 +418,7 @@ static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
break;
}
- mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
+ regmap_update_bits(hdmi->regs, GRL_AOUT_CFG, AOUT_BNUM_SEL_MASK, val);
}
static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
@@ -467,7 +426,7 @@ static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG0);
+ regmap_read(hdmi->regs, GRL_CFG0, &val);
val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
switch (i2s_fmt) {
@@ -491,7 +450,7 @@ static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
break;
}
- mtk_hdmi_write(hdmi, GRL_CFG0, val);
+ regmap_write(hdmi->regs, GRL_CFG0, val);
}
static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
@@ -500,14 +459,14 @@ static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
u8 val;
/* Disable high bitrate, set DST packet normal/double */
- mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
+ regmap_clear_bits(hdmi->regs, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
if (dst)
val = DST_NORMAL_DOUBLE | SACD_DST;
else
val = 0;
- mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
+ regmap_update_bits(hdmi->regs, GRL_AUDIO_CFG, mask, val);
}
static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
@@ -548,10 +507,10 @@ static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
i2s_uv = I2S_UV_CH_EN(0);
}
- mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
- mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
- mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
- mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
+ regmap_write(hdmi->regs, GRL_CH_SW0, ch_switch & 0xff);
+ regmap_write(hdmi->regs, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
+ regmap_write(hdmi->regs, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
+ regmap_write(hdmi->regs, GRL_I2S_UV, i2s_uv);
}
static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
@@ -559,7 +518,7 @@ static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG1);
+ regmap_read(hdmi->regs, GRL_CFG1, &val);
if (input_type == HDMI_AUD_INPUT_I2S &&
(val & CFG1_SPDIF) == CFG1_SPDIF) {
val &= ~CFG1_SPDIF;
@@ -567,7 +526,7 @@ static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
(val & CFG1_SPDIF) == 0) {
val |= CFG1_SPDIF;
}
- mtk_hdmi_write(hdmi, GRL_CFG1, val);
+ regmap_write(hdmi->regs, GRL_CFG1, val);
}
static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
@@ -576,13 +535,13 @@ static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
int i;
for (i = 0; i < 5; i++) {
- mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
- mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
- mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, channel_status[i]);
}
for (; i < 24; i++) {
- mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
- mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
+ regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, 0);
+ regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, 0);
}
}
@@ -590,13 +549,13 @@ static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ regmap_read(hdmi->regs, GRL_MIX_CTRL, &val);
if (val & MIX_CTRL_SRC_EN) {
val &= ~MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
usleep_range(255, 512);
val |= MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
}
}
@@ -604,10 +563,10 @@ static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ regmap_read(hdmi->regs, GRL_MIX_CTRL, &val);
val &= ~MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
- mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_SHIFT_L1, 0x00);
}
static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
@@ -615,7 +574,7 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG5);
+ regmap_read(hdmi->regs, GRL_CFG5, &val);
val &= CFG5_CD_RATIO_MASK;
switch (mclk) {
@@ -638,7 +597,7 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
val |= CFG5_FS256;
break;
}
- mtk_hdmi_write(hdmi, GRL_CFG5, val);
+ regmap_write(hdmi->regs, GRL_CFG5, val);
}
struct hdmi_acr_n {
@@ -716,15 +675,22 @@ static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
128 * audio_sample_rate);
}
+static void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock,
+ unsigned int *n, unsigned int *cts)
+{
+ *n = hdmi_recommended_n(sample_rate, clock);
+ *cts = hdmi_expected_cts(sample_rate, clock, *n);
+}
+
static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
unsigned int cts)
{
unsigned char val[NCTS_BYTES];
int i;
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
memset(val, 0, sizeof(val));
val[0] = (cts >> 24) & 0xff;
@@ -737,7 +703,7 @@ static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
val[6] = n & 0xff;
for (i = 0; i < NCTS_BYTES; i++)
- mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
+ regmap_write(hdmi->regs, GRL_NCTS, val[i]);
}
static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
@@ -746,14 +712,12 @@ static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
{
unsigned int n, cts;
- n = hdmi_recommended_n(sample_rate, clock);
- cts = hdmi_expected_cts(sample_rate, clock, n);
+ mtk_hdmi_get_ncts(sample_rate, clock, &n, &cts);
dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
__func__, sample_rate, clock, n, cts);
- mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
- AUDIO_I2S_NCTS_SEL);
+ regmap_update_bits(hdmi->regs, DUMMY_304, AUDIO_I2S_NCTS_SEL, AUDIO_I2S_NCTS_SEL_64);
do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
}
@@ -873,7 +837,7 @@ static void mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
bool dst;
mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
- mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
+ regmap_set_bits(hdmi->regs, GRL_MIX_CTRL, MIX_CTRL_FLAT);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
@@ -905,7 +869,7 @@ static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
mtk_hdmi_hw_ncts_enable(hdmi, false);
mtk_hdmi_hw_aud_src_disable(hdmi);
- mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
+ regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_ACLK_INV);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
switch (sample_rate) {
@@ -1061,20 +1025,6 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
return 0;
}
-static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
-{
- struct hdmi_audio_param *aud_param = &hdmi->aud_param;
-
- aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
- aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
- aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
-
- return 0;
-}
-
static void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
{
mtk_hdmi_hw_send_aud_packet(hdmi, true);
@@ -1087,20 +1037,6 @@ static void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
hdmi->audio_enable = false;
}
-static int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
- struct hdmi_audio_param *param)
-{
- if (!hdmi->audio_enable) {
- dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
- return -EINVAL;
- }
- dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
- param->aud_codec, param->aud_input_type,
- param->aud_input_chan_type, param->codec_params.sample_rate);
- memcpy(&hdmi->aud_param, param, sizeof(*param));
- return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
-}
-
static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
@@ -1269,6 +1205,7 @@ static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridg
}
static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1281,7 +1218,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
}
if (hdmi->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ ret = drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
if (ret)
return ret;
@@ -1407,30 +1344,20 @@ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
.edid_read = mtk_hdmi_bridge_edid_read,
};
-static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
- struct platform_device *pdev)
+static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct device_node *cec_np, *remote, *i2c_np;
struct platform_device *cec_pdev;
- struct regmap *regmap;
+ struct device_node *cec_np;
int ret;
ret = mtk_hdmi_get_all_clk(hdmi, np);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clocks: %d\n", ret);
-
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
/* The CEC module handles HDMI hotplug detection */
cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
- if (!cec_np) {
- dev_err(dev, "Failed to find CEC node\n");
- return -EINVAL;
- }
+ if (!cec_np)
+ return dev_err_probe(dev, -EINVAL, "Failed to find CEC node\n");
cec_pdev = of_find_device_by_node(cec_np);
if (!cec_pdev) {
@@ -1440,82 +1367,77 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
return -EPROBE_DEFER;
}
of_node_put(cec_np);
- hdmi->cec_dev = &cec_pdev->dev;
/*
* The mediatek,syscon-hdmi property contains a phandle link to the
* MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
* registers it contains.
*/
- regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
- ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
- &hdmi->sys_offset);
- if (IS_ERR(regmap))
- ret = PTR_ERR(regmap);
- if (ret) {
- dev_err(dev,
- "Failed to get system configuration registers: %d\n",
- ret);
- goto put_device;
- }
- hdmi->sys_regmap = regmap;
+ hdmi->sys_regmap = syscon_regmap_lookup_by_phandle_args(np, "mediatek,syscon-hdmi",
+ 1, &hdmi->sys_offset);
+ if (IS_ERR(hdmi->sys_regmap))
+ return dev_err_probe(dev, PTR_ERR(hdmi->sys_regmap),
+ "Failed to get system configuration registers\n");
- hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hdmi->regs)) {
- ret = PTR_ERR(hdmi->regs);
- goto put_device;
- }
+ hdmi->cec_dev = &cec_pdev->dev;
+ return 0;
+}
+
+static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *remote, *i2c_np;
+ int ret;
+
+ ret = mtk_hdmi_get_all_clk(hdmi, np);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ hdmi->regs = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
remote = of_graph_get_remote_node(np, 1, 0);
- if (!remote) {
- ret = -EINVAL;
- goto put_device;
- }
+ if (!remote)
+ return -EINVAL;
if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote);
if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote);
- ret = -EPROBE_DEFER;
- goto put_device;
+ return -EPROBE_DEFER;
}
}
i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
- if (!i2c_np) {
- dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
- remote);
- of_node_put(remote);
- ret = -EINVAL;
- goto put_device;
- }
of_node_put(remote);
+ if (!i2c_np)
+ return dev_err_probe(dev, -EINVAL, "No ddc-i2c-bus in connector\n");
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
of_node_put(i2c_np);
- if (!hdmi->ddc_adpt) {
- dev_err(dev, "Failed to get ddc i2c adapter by node\n");
- ret = -EINVAL;
- goto put_device;
- }
+ if (!hdmi->ddc_adpt)
+ return dev_err_probe(dev, -EINVAL, "Failed to get ddc i2c adapter by node\n");
+
+ ret = mtk_hdmi_get_cec_dev(hdmi, dev, np);
+ if (ret)
+ return ret;
return 0;
-put_device:
- put_device(hdmi->cec_dev);
- return ret;
}
/*
* HDMI audio codec callbacks
*/
-static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+static int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- struct hdmi_audio_param hdmi_params;
+ struct hdmi_audio_param aud_params = { 0 };
unsigned int chan = params->cea.channels;
dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
@@ -1526,16 +1448,16 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (chan) {
case 2:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
break;
case 4:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
break;
case 6:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
break;
case 8:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
break;
default:
dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
@@ -1559,27 +1481,45 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (daifmt->fmt) {
case HDMI_I2S:
- hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
- hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_params.aud_mclk = HDMI_AUD_MCLK_128FS;
break;
case HDMI_SPDIF:
- hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
break;
default:
dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
daifmt->fmt);
return -EINVAL;
}
+ memcpy(&aud_params.codec_params, params, sizeof(aud_params.codec_params));
+ memcpy(&hdmi->aud_param, &aud_params, sizeof(aud_params));
+
+ dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
+ aud_params.aud_codec, aud_params.aud_input_type,
+ aud_params.aud_input_chan_type, aud_params.codec_params.sample_rate);
- memcpy(&hdmi_params.codec_params, params,
- sizeof(hdmi_params.codec_params));
+ return 0;
+}
+
+static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
+ if (!hdmi->audio_enable) {
+ dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
+ return -EINVAL;
+ }
+
+ mtk_hdmi_audio_params(hdmi, daifmt, params);
+ mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
return 0;
}
@@ -1625,17 +1565,22 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
return 0;
}
-static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
+static void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
- struct mtk_hdmi *hdmi = data;
-
mutex_lock(&hdmi->update_plugged_status_lock);
hdmi->plugged_cb = fn;
hdmi->codec_dev = codec_dev;
mutex_unlock(&hdmi->update_plugged_status_lock);
+}
+
+static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_hdmi *hdmi = data;
+ mtk_hdmi_audio_set_plugged_cb(hdmi, fn, codec_dev);
mtk_hdmi_update_plugged_status(hdmi);
return 0;
@@ -1658,6 +1603,7 @@ static void mtk_hdmi_unregister_audio_driver(void *data)
static int mtk_hdmi_register_audio_driver(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ struct hdmi_audio_param *aud_param = &hdmi->aud_param;
struct hdmi_codec_pdata codec_data = {
.ops = &mtk_hdmi_audio_codec_ops,
.max_i2s_channels = 2,
@@ -1667,6 +1613,13 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
};
int ret;
+ aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+
hdmi->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
@@ -1708,11 +1661,6 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
mutex_init(&hdmi->update_plugged_status_lock);
platform_set_drvdata(pdev, hdmi);
- ret = mtk_hdmi_output_init(hdmi);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to initialize hdmi output\n");
-
ret = mtk_hdmi_register_audio_driver(dev);
if (ret)
return dev_err_probe(dev, ret,
@@ -1789,28 +1737,9 @@ static struct platform_driver mtk_hdmi_driver = {
.pm = &mtk_hdmi_pm_ops,
},
};
-
-static struct platform_driver * const mtk_hdmi_drivers[] = {
- &mtk_hdmi_ddc_driver,
- &mtk_cec_driver,
- &mtk_hdmi_driver,
-};
-
-static int __init mtk_hdmitx_init(void)
-{
- return platform_register_drivers(mtk_hdmi_drivers,
- ARRAY_SIZE(mtk_hdmi_drivers));
-}
-
-static void __exit mtk_hdmitx_exit(void)
-{
- platform_unregister_drivers(mtk_hdmi_drivers,
- ARRAY_SIZE(mtk_hdmi_drivers));
-}
-
-module_init(mtk_hdmitx_init);
-module_exit(mtk_hdmitx_exit);
+module_platform_driver(mtk_hdmi_driver);
MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
MODULE_DESCRIPTION("MediaTek HDMI Driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("DRM_MTK_HDMI_V1");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
deleted file mode 100644
index 472bf141c92b..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014 MediaTek Inc.
- * Author: Jie Qiu <jie.qiu@mediatek.com>
- */
-#ifndef _MTK_HDMI_CTRL_H
-#define _MTK_HDMI_CTRL_H
-
-struct platform_driver;
-
-extern struct platform_driver mtk_cec_driver;
-extern struct platform_driver mtk_hdmi_ddc_driver;
-
-#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 07db68067844..6358e1af69b4 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -20,7 +20,6 @@
#include <linux/of_platform.h>
#include "mtk_drm_drv.h"
-#include "mtk_hdmi.h"
#define SIF1_CLOK (288)
#define DDC_DDCMCTL0 (0x0)
@@ -337,6 +336,7 @@ struct platform_driver mtk_hdmi_ddc_driver = {
.of_match_table = mtk_hdmi_ddc_match,
},
};
+module_platform_driver(mtk_hdmi_ddc_driver);
MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index e79f7c3ce32e..c9678dc68fa1 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -83,12 +83,13 @@ meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
}
static int meson_encoder_cvbs_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
- return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge,
+ return drm_bridge_attach(encoder, meson_encoder_cvbs->next_bridge,
&meson_encoder_cvbs->bridge, flags);
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index fe204437bd65..3db518e5f95d 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -33,11 +33,12 @@ struct meson_encoder_dsi {
container_of(x, struct meson_encoder_dsi, bridge)
static int meson_encoder_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_dsi->next_bridge,
&encoder_dsi->bridge, flags);
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index c08fa93e50a3..47136bbbe8c6 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -49,11 +49,12 @@ struct meson_encoder_hdmi {
container_of(x, struct meson_encoder_hdmi, bridge)
static int meson_encoder_hdmi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_hdmi->next_bridge,
&encoder_hdmi->bridge, flags);
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 974bc7c0ea76..7f127e2ae442 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -104,6 +104,7 @@ config DRM_MSM_DPU
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
depends on DRM_MSM
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select RATIONAL
default y
help
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 5df20cbeafb8..7a2ada6e2d74 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -48,7 +48,6 @@ msm-display-$(CONFIG_DRM_MSM_MDP4) += \
disp/mdp4/mdp4_dsi_encoder.o \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
- disp/mdp4/mdp4_lvds_connector.o \
disp/mdp4/mdp4_lvds_pll.o \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
index 9ddb7b31fd98..5ddd015f930d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
@@ -45,8 +45,3 @@ static const struct adreno_info a2xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a2xx);
-
-MODULE_FIRMWARE("qcom/leia_pfp_470.fw");
-MODULE_FIRMWARE("qcom/leia_pm4_470.fw");
-MODULE_FIRMWARE("qcom/yamato_pfp.fw");
-MODULE_FIRMWARE("qcom/yamato_pm4.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
index 2eb6c3e93748..1498e6532f62 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
@@ -85,8 +85,3 @@ static const struct adreno_info a3xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a3xx);
-
-MODULE_FIRMWARE("qcom/a300_pm4.fw");
-MODULE_FIRMWARE("qcom/a300_pfp.fw");
-MODULE_FIRMWARE("qcom/a330_pm4.fw");
-MODULE_FIRMWARE("qcom/a330_pfp.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
index 93519f807f87..09f9f228b75e 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
@@ -45,6 +45,3 @@ static const struct adreno_info a4xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a4xx);
-
-MODULE_FIRMWARE("qcom/a420_pm4.fw");
-MODULE_FIRMWARE("qcom/a420_pfp.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
index 633f31539162..b48a636d8237 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
@@ -150,12 +150,3 @@ static const struct adreno_info a5xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a5xx);
-
-MODULE_FIRMWARE("qcom/a530_pm4.fw");
-MODULE_FIRMWARE("qcom/a530_pfp.fw");
-MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
-MODULE_FIRMWARE("qcom/a530_zap.mdt");
-MODULE_FIRMWARE("qcom/a530_zap.b00");
-MODULE_FIRMWARE("qcom/a530_zap.b01");
-MODULE_FIRMWARE("qcom/a530_zap.b02");
-MODULE_FIRMWARE("qcom/a540_gpmu.fw2");
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 53e2ff4406d8..70f7ad806c34 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -681,6 +681,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
},
.gmem = (SZ_128K + SZ_4K),
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a610_zap.mdt",
@@ -713,6 +714,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -743,7 +745,8 @@ static const struct adreno_info a6xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mbn",
.a6xx = &(const struct a6xx_info) {
@@ -769,7 +772,8 @@ static const struct adreno_info a6xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
@@ -791,6 +795,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -815,6 +820,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -838,8 +844,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -874,7 +881,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00010000,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 137, 1 },
@@ -907,7 +913,6 @@ static const struct adreno_info a6xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
@@ -920,8 +925,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -939,8 +945,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_1M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -973,7 +980,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 1, 1 },
@@ -1000,7 +1006,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020000,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06060300),
.family = ADRENO_6XX_GEN4,
@@ -1019,7 +1024,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
@@ -1039,7 +1043,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00200200,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
@@ -1056,8 +1059,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_2M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -1085,22 +1089,10 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00800200,
},
- .address_space_size = SZ_16G,
}
};
DECLARE_ADRENO_GPULIST(a6xx);
-MODULE_FIRMWARE("qcom/a615_zap.mbn");
-MODULE_FIRMWARE("qcom/a619_gmu.bin");
-MODULE_FIRMWARE("qcom/a630_sqe.fw");
-MODULE_FIRMWARE("qcom/a630_gmu.bin");
-MODULE_FIRMWARE("qcom/a630_zap.mbn");
-MODULE_FIRMWARE("qcom/a640_gmu.bin");
-MODULE_FIRMWARE("qcom/a650_gmu.bin");
-MODULE_FIRMWARE("qcom/a650_sqe.fw");
-MODULE_FIRMWARE("qcom/a660_gmu.bin");
-MODULE_FIRMWARE("qcom/a660_sqe.fw");
-
static const struct adreno_reglist a702_hwcg[] = {
{ REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220 },
@@ -1395,7 +1387,6 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_cgc_mode = 0x00020000,
},
- .address_space_size = SZ_16G,
.preempt_record_size = 2860 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
@@ -1429,7 +1420,6 @@ static const struct adreno_info a7xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
.preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */
@@ -1451,7 +1441,6 @@ static const struct adreno_info a7xx_gpus[] = {
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
},
- .address_space_size = SZ_256G,
.preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
@@ -1484,7 +1473,6 @@ static const struct adreno_info a7xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
.preempt_record_size = 3572 * SZ_1K,
}
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c8711938a5f4..38c0f8ef85c3 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1064,14 +1064,6 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
- /* Notify AOSS about the ACD state (unimplemented for now => disable it) */
- if (!IS_ERR(gmu->qmp)) {
- ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}",
- 0 /* Hardcode ACD to be disabled for now */);
- if (ret)
- dev_err(gmu->dev, "failed to send GPU ACD state\n");
- }
-
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@@ -1671,6 +1663,75 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
return a6xx_gmu_rpmh_votes_init(gmu);
}
+static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct a6xx_hfi_acd_table *cmd = &gmu->acd_table;
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret, i, cmd_idx = 0;
+ extern bool disable_acd;
+
+ /* Skip ACD probe if requested via module param */
+ if (disable_acd) {
+ DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n");
+ return 0;
+ }
+
+ cmd->version = 1;
+ cmd->stride = 1;
+ cmd->enable_by_level = 0;
+
+ /* Skip freq = 0 and parse acd-level for rest of the OPPs */
+ for (i = 1; i < gmu->nr_gpu_freqs; i++) {
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ unsigned long freq;
+ u32 val;
+
+ freq = gmu->gpu_freqs[i];
+ opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true);
+ np = dev_pm_opp_get_of_node(opp);
+
+ ret = of_property_read_u32(np, "qcom,opp-acd-level", &val);
+ of_node_put(np);
+ dev_pm_opp_put(opp);
+ if (ret == -EINVAL)
+ continue;
+ else if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq);
+ return ret;
+ }
+
+ cmd->enable_by_level |= BIT(i);
+ cmd->data[cmd_idx++] = val;
+ }
+
+ cmd->num_levels = cmd_idx;
+
+ /* It is a problem if qmp node is unavailable when ACD is required */
+ if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n");
+ return -EINVAL;
+ }
+
+ /* Otherwise, nothing to do if qmp is unavailable */
+ if (IS_ERR_OR_NULL(gmu->qmp))
+ return 0;
+
+ /*
+ * Notify AOSS about the ACD state. AOSS is supposed to assume that ACD is disabled on
+ * system reset. So it is harmless if we couldn't notify 'OFF' state
+ */
+ ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level);
+ if (ret && cmd->enable_by_level) {
+ DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
{
int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
@@ -1989,10 +2050,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto detach_cxpd;
}
+ /* Other errors are handled during GPU ACD probe */
gmu->qmp = qmp_get(gmu->dev);
- if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) {
- ret = PTR_ERR(gmu->qmp);
- goto remove_device_link;
+ if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto detach_gxpd;
}
init_completion(&gmu->pd_gate);
@@ -2008,6 +2070,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
+ ret = a6xx_gmu_acd_probe(gmu);
+ if (ret)
+ goto detach_gxpd;
+
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
@@ -2018,7 +2084,13 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
-remove_device_link:
+detach_gxpd:
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ dev_pm_domain_detach(gmu->gxpd, false);
+
+ if (!IS_ERR_OR_NULL(gmu->qmp))
+ qmp_put(gmu->qmp);
+
device_link_del(link);
detach_cxpd:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 0c888b326cfb..b2d4489b4024 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -93,6 +93,7 @@ struct a6xx_gmu {
int nr_gpu_freqs;
unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
u32 gx_arc_votes[GMU_MAX_GX_FREQS];
+ struct a6xx_hfi_acd_table acd_table;
int nr_gpu_bws;
unsigned long gpu_bw_table[GMU_MAX_GX_FREQS];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 242d02d48c0c..bf3758f010f4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -655,7 +655,6 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_7c3(gpu)) {
gpu->ubwc_config.highest_bank_bit = 14;
gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
gpu->ubwc_config.macrotile_mode = 1;
}
@@ -2268,7 +2267,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
- "gpu", 0x100000000ULL,
+ "gpu", ADRENO_VM_START,
adreno_private_address_space_size(gpu));
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 0989aee3dd2c..8e69b1e84657 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -100,16 +100,14 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return 0;
}
-static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
- u32 *payload, u32 payload_size)
+static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seqnum)
{
- struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
- u32 val;
int ret;
+ u32 val;
/* Wait for a response */
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
- val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000);
if (ret) {
DRM_DEV_ERROR(gmu->dev,
@@ -122,6 +120,19 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
+ return 0;
+}
+
+static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
+ u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ int ret;
+
+ ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
+ if (ret)
+ return ret;
+
for (;;) {
struct a6xx_hfi_msg_response resp;
@@ -129,12 +140,18 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
sizeof(resp) >> 2);
- /* If the queue is empty our response never made it */
+ /* If the queue is empty, there may have been previous missed
+ * responses that preceded the response to our packet. Wait
+ * further before we give up.
+ */
if (!ret) {
- DRM_DEV_ERROR(gmu->dev,
- "The HFI response queue is unexpectedly empty\n");
-
- return -ENOENT;
+ ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "The HFI response queue is unexpectedly empty\n");
+ return ret;
+ }
+ continue;
}
if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
@@ -748,6 +765,38 @@ send:
NULL, 0);
}
+#define HFI_FEATURE_ACD 12
+
+static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
+ struct a6xx_hfi_msg_feature_ctrl msg = {
+ .feature = HFI_FEATURE_ACD,
+ .enable = 1,
+ .data = 0,
+ };
+ int ret;
+
+ if (!acd_table->enable_by_level)
+ return 0;
+
+ /* Enable ACD feature at GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
+ return ret;
+ }
+
+ /* Send ACD table to GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_ACD, acd_table, sizeof(*acd_table), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to ACD table (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_test msg = { 0 };
@@ -845,6 +894,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
if (ret)
return ret;
+ ret = a6xx_hfi_enable_acd(gmu);
+ if (ret)
+ return ret;
+
ret = a6xx_hfi_send_core_fw_start(gmu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 52ba4a07d7b9..653ef720e2da 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -151,12 +151,33 @@ struct a6xx_hfi_msg_test {
u32 header;
};
+#define HFI_H2F_MSG_ACD 7
+#define MAX_ACD_STRIDE 2
+
+struct a6xx_hfi_acd_table {
+ u32 header;
+ u32 version;
+ u32 enable_by_level;
+ u32 stride;
+ u32 num_levels;
+ u32 data[16 * MAX_ACD_STRIDE];
+};
+
#define HFI_H2F_MSG_START 10
struct a6xx_hfi_msg_start {
u32 header;
};
+#define HFI_H2F_FEATURE_CTRL 11
+
+struct a6xx_hfi_msg_feature_ctrl {
+ u32 header;
+ u32 feature;
+ u32 enable;
+ u32 data;
+};
+
#define HFI_H2F_MSG_CORE_FW_START 14
struct a6xx_hfi_msg_core_fw_start {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 236b25c094cd..f5e1490d07c1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -24,6 +24,10 @@ int enable_preemption = -1;
MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
module_param(enable_preemption, int, 0600);
+bool disable_acd;
+MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD");
+module_param_unsafe(disable_acd, bool, 0400);
+
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 26db1f4b5fb9..2348ffb35f7e 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -236,14 +236,27 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
u64 adreno_private_address_space_size(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+ const struct io_pgtable_cfg *ttbr1_cfg;
if (address_space_size)
return address_space_size;
- if (adreno_gpu->info->address_space_size)
- return adreno_gpu->info->address_space_size;
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_4GB_VA)
+ return SZ_4G;
- return SZ_4G;
+ if (!adreno_smmu || !adreno_smmu->get_ttbr1_cfg)
+ return SZ_4G;
+
+ ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+
+ /*
+ * Userspace VM is actually using TTBR0, but both are the same size,
+ * with b48 (sign bit) selecting which TTBRn to use. So if IAS is
+ * 48, the total (kernel+user) address space size is effectively
+ * 49 bits. But what userspace is control of is the lower 48.
+ */
+ return BIT(ttbr1_cfg->ias) - ADRENO_VM_START;
}
#define ARM_SMMU_FSR_TF BIT(1)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 92caba3584da..a8f4bf416e64 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -57,6 +57,7 @@ enum adreno_family {
#define ADRENO_QUIRK_HAS_HW_APRIV BIT(3)
#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
#define ADRENO_QUIRK_PREEMPTION BIT(5)
+#define ADRENO_QUIRK_4GB_VA BIT(6)
/* Helper for formating the chip_id in the way that userspace tools like
* crashdec expect.
@@ -104,7 +105,6 @@ struct adreno_info {
union {
const struct a6xx_info *a6xx;
};
- u64 address_space_size;
/**
* @speedbins: Optional table of fuse to speedbin mappings
*
@@ -578,6 +578,8 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
adreno_is_a740_family(gpu);
}
+/* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */
+#define ADRENO_VM_START 0x100000000ULL
u64 adreno_private_address_space_size(struct msm_gpu *gpu);
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index 6ac97c378056..ffc4d4257ae5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -27,17 +27,16 @@ static const struct dpu_mdp_cfg sm8650_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8650_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1000,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1000,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index ad60089f18ea..39027a21c6fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -100,14 +100,12 @@ static const struct dpu_pingpong_cfg msm8937_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index a1cf89a0a42d..8d1b43ea1663 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -93,7 +93,6 @@ static const struct dpu_pingpong_cfg msm8917_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index eea9b80e2287..16c12499b24b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -100,14 +100,12 @@ static const struct dpu_pingpong_cfg msm8953_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index ae18a354e5d2..91f514d28ac6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -181,15 +181,15 @@ static const struct dpu_pingpong_cfg msm8996_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_TE2_MASK,
- .sblk = &msm8996_pp_sblk_te,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_TE2_MASK,
- .sblk = &msm8996_pp_sblk_te,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 746474679ef5..413cd59dc0c4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -170,15 +170,15 @@ static const struct dpu_pingpong_cfg msm8998_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index bb89da0a481d..b2eb7ca699e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -141,15 +141,15 @@ static const struct dpu_pingpong_cfg sdm660_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index 7caf876ca3e3..85e121ad84a0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -115,14 +115,14 @@ static const struct dpu_pingpong_cfg sdm630_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
+ .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index ab7b4822ca63..49363d7d5b93 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -194,15 +194,15 @@ static const struct dpu_pingpong_cfg sdm845_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 979527d98fbc..08d38e1d420c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -37,17 +37,16 @@ static const struct dpu_mdp_cfg sm8150_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index d76b8992a6c1..d6f8b1030c68 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -41,12 +41,12 @@ static const struct dpu_ctl_cfg sc8180x_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -91,7 +91,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -99,7 +99,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -107,7 +107,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -115,7 +115,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -123,7 +123,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -131,7 +131,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 83db11339b29..71ba48b05656 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -38,12 +38,12 @@ static const struct dpu_ctl_cfg sm7150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -72,7 +72,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -80,7 +80,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -88,7 +88,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -96,7 +96,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -104,7 +104,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index d3d3a34d0b45..fcfb3774f7a1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -69,7 +69,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index 47e01c3c242f..a86fdb33ebdd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sm8250_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8250_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 040c94c0bb66..842fcc5887fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -51,7 +51,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -59,7 +59,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -67,7 +67,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index 43f64a005f5a..c5fd89dd7c89 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -38,7 +38,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -46,7 +46,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 397278ba999b..a234bb289d24 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -59,7 +59,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -67,7 +67,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index 3cbb2fe8aba2..53f3be28f6f6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -46,7 +46,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
index a06c8634d2d7..3a3bc8e429be 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -39,7 +39,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -47,7 +47,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index 0c860e804cab..90e86063a372 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sm8350_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8350_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index fcee1c3665f8..139f11321fea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sc8280xp_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -74,7 +73,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -82,7 +81,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -90,7 +89,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -98,7 +97,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -106,7 +105,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x2ac,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -114,7 +113,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x2ac,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -122,7 +121,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x2ac,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -130,7 +129,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x2ac,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 19b2ee8bbd5f..461294143a90 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -36,17 +36,16 @@ static const struct dpu_mdp_cfg sm8450_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8450_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 4d96ce71746f..c248b3b55c41 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sa8775p_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sa8775p_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 24f988465bf6..59c7fdf28e89 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -27,17 +27,16 @@ static const struct dpu_mdp_cfg sm8550_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8550_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -66,70 +65,70 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_12", .id = SSPP_DMA4,
.base = 0x2c000, .len = 0x344,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 14,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_13", .id = SSPP_DMA5,
.base = 0x2e000, .len = 0x344,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 15,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
new file mode 100644
index 000000000000..5667d055fbd1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
@@ -0,0 +1,433 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_9_1_SAR2130P_H
+#define _DPU_9_1_SAR2130P_H
+
+static const struct dpu_caps sar2130p_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sar2130p_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+static const struct dpu_ctl_cfg sar2130p_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sar2130p_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sar2130p_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sar2130p_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+static const struct dpu_pingpong_cfg sar2130p_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sar2130p_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sar2130p_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sar2130p_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg sar2130p_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sar2130p_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 0, .wr_enable = 0},
+ {.rd_enable = 0, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sar2130p_mdss_ver = {
+ .core_major_ver = 9,
+ .core_minor_ver = 1,
+};
+
+const struct dpu_mdss_cfg dpu_sar2130p_cfg = {
+ .mdss_ver = &sar2130p_mdss_ver,
+ .caps = &sar2130p_dpu_caps,
+ .mdp = &sar2130p_mdp,
+ .cdm = &dpu_cdm_5_x,
+ .ctl_count = ARRAY_SIZE(sar2130p_ctl),
+ .ctl = sar2130p_ctl,
+ .sspp_count = ARRAY_SIZE(sar2130p_sspp),
+ .sspp = sar2130p_sspp,
+ .mixer_count = ARRAY_SIZE(sar2130p_lm),
+ .mixer = sar2130p_lm,
+ .dspp_count = ARRAY_SIZE(sar2130p_dspp),
+ .dspp = sar2130p_dspp,
+ .pingpong_count = ARRAY_SIZE(sar2130p_pp),
+ .pingpong = sar2130p_pp,
+ .dsc_count = ARRAY_SIZE(sar2130p_dsc),
+ .dsc = sar2130p_dsc,
+ .merge_3d_count = ARRAY_SIZE(sar2130p_merge_3d),
+ .merge_3d = sar2130p_merge_3d,
+ .wb_count = ARRAY_SIZE(sar2130p_wb),
+ .wb = sar2130p_wb,
+ .intf_count = ARRAY_SIZE(sar2130p_intf),
+ .intf = sar2130p_intf,
+ .vbif_count = ARRAY_SIZE(sm8550_vbif),
+ .vbif = sm8550_vbif,
+ .perf = &sar2130p_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 6417baa84f82..52cc10aec1f9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -26,17 +26,16 @@ static const struct dpu_mdp_cfg x1e80100_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg x1e80100_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 0714936d8835..a4b0fe0d9899 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -445,9 +445,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
uint32_t lm_idx;
bool bg_alpha_enable = false;
- DECLARE_BITMAP(fetch_active, SSPP_MAX);
+ DECLARE_BITMAP(active_fetch, SSPP_MAX);
- memset(fetch_active, 0, sizeof(fetch_active));
+ memset(active_fetch, 0, sizeof(active_fetch));
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
@@ -464,7 +464,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
- set_bit(pstate->pipe.sspp->idx, fetch_active);
+ set_bit(pstate->pipe.sspp->idx, active_fetch);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -472,7 +472,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
&pstate->pipe, 0, stage_cfg);
if (pstate->r_pipe.sspp) {
- set_bit(pstate->r_pipe.sspp->idx, fetch_active);
+ set_bit(pstate->r_pipe.sspp->idx, active_fetch);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -492,8 +492,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
}
}
- if (ctl->ops.set_active_pipes)
- ctl->ops.set_active_pipes(ctl, fetch_active);
+ if (ctl->ops.set_active_fetch_pipes)
+ ctl->ops.set_active_fetch_pipes(ctl, active_fetch);
_dpu_crtc_program_lm_output_roi(crtc);
}
@@ -519,6 +519,8 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl);
+ if (mixer[i].lm_ctl->ops.set_active_fetch_pipes)
+ mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL);
}
/* initialize stage cfg */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 862e9e6bf0a5..7020098360e4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1246,7 +1246,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
return;
}
- phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
+ /* Use first (and only) CTL if active CTLs are supported */
+ if (num_ctl == 1)
+ phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[0]);
+ else
+ phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
if (!phys->hw_ctl) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
@@ -2190,6 +2194,9 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
/* clear all blendstages */
if (ctl->ops.setup_blendstage)
ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+
+ if (ctl->ops.set_active_fetch_pipes)
+ ctl->ops.set_active_fetch_pipes(ctl, NULL);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index da9994a79ca2..a0ba55ab3c89 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -60,6 +60,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
return;
intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->split_role == ENC_ROLE_MASTER)
+ intf_cfg.intf_master = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index abd6600046cb..8a618841e3ea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -298,6 +298,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
if (phys_enc->hw_cdm)
intf_cfg.cdm = phys_enc->hw_cdm->idx;
intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->split_role == ENC_ROLE_MASTER)
+ intf_cfg.intf_master = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
@@ -372,7 +374,8 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg)
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return phys_enc->split_role != ENC_ROLE_SOLO;
+ return !(phys_enc->hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG)) &&
+ phys_enc->split_role != ENC_ROLE_SOLO;
}
static void dpu_encoder_phys_vid_atomic_mode_set(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 64265ca4656a..c878fe196aeb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -34,11 +34,11 @@
#define VIG_MSM8998_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
-#define VIG_SDM845_MASK \
+#define VIG_SDM845_MASK_NO_SDMA \
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_SDMA \
- (VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
@@ -54,24 +54,24 @@
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SC7280_MASK \
- (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+ (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_INLINE_ROTATION))
#define VIG_SC7280_MASK_SDMA \
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-#define DMA_SDM845_MASK \
+#define DMA_SDM845_MASK_NO_SDMA \
(BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
-#define DMA_CURSOR_SDM845_MASK \
- (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
+#define DMA_CURSOR_SDM845_MASK_NO_SDMA \
+ (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_CURSOR))
#define DMA_SDM845_MASK_SDMA \
- (DMA_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_SDM845_MASK_SDMA \
- (DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (DMA_CURSOR_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_MSM8996_MASK \
(DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR))
@@ -98,15 +98,9 @@
#define PINGPONG_MSM8996_MASK \
(BIT(DPU_PINGPONG_DSC))
-#define PINGPONG_MSM8996_TE2_MASK \
- (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2))
-
#define PINGPONG_SDM845_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
-#define PINGPONG_SDM845_TE2_MASK \
- (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
-
#define PINGPONG_SM8150_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
@@ -376,8 +370,6 @@ static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
* MIXER sub blocks config
*************************************************************/
-/* MSM8998 */
-
static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
@@ -387,8 +379,6 @@ static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
},
};
-/* SDM845 */
-
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
@@ -398,8 +388,6 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-/* SC7180 */
-
static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
@@ -408,8 +396,6 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
},
};
-/* QCM2290 */
-
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
.maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
@@ -434,22 +420,11 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
-static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = {
- .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
- .version = 0x1},
-};
static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = {
/* No dither block */
};
-static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
- .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
- .version = 0x1},
- .dither = {.name = "dither", .base = 0x30e0,
- .len = 0x20, .version = 0x10000},
-};
-
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
.dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
@@ -759,7 +734,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_8_4_sa8775p.h"
#include "catalog/dpu_9_0_sm8550.h"
-
+#include "catalog/dpu_9_1_sar2130p.h"
#include "catalog/dpu_9_2_x1e80100.h"
#include "catalog/dpu_10_0_sm8650.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 4cea19e1a203..01dd6e65f777 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -115,7 +115,6 @@ enum {
/**
* PINGPONG sub-blocks
- * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
* @DPU_PINGPONG_SPLIT PP block supports split fifo
* @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
* @DPU_PINGPONG_DITHER Dither blocks
@@ -123,8 +122,7 @@ enum {
* @DPU_PINGPONG_MAX
*/
enum {
- DPU_PINGPONG_TE2 = 0x1,
- DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SPLIT = 0x1,
DPU_PINGPONG_SLAVE,
DPU_PINGPONG_DITHER,
DPU_PINGPONG_DSC,
@@ -404,8 +402,6 @@ struct dpu_dspp_sub_blks {
};
struct dpu_pingpong_sub_blks {
- struct dpu_pp_blk te;
- struct dpu_pp_blk te2;
struct dpu_pp_blk dither;
};
@@ -841,6 +837,7 @@ extern const struct dpu_mdss_cfg dpu_msm8937_cfg;
extern const struct dpu_mdss_cfg dpu_msm8953_cfg;
extern const struct dpu_mdss_cfg dpu_msm8996_cfg;
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
+extern const struct dpu_mdss_cfg dpu_sar2130p_cfg;
extern const struct dpu_mdss_cfg dpu_sdm630_cfg;
extern const struct dpu_mdss_cfg dpu_sdm660_cfg;
extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 411a7cf088eb..573e42b06ad0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -261,6 +261,12 @@ static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
case LM_5:
ctx->pending_flush_mask |= BIT(20);
break;
+ case LM_6:
+ ctx->pending_flush_mask |= BIT(21);
+ break;
+ case LM_7:
+ ctx->pending_flush_mask |= BIT(27);
+ break;
default:
break;
}
@@ -563,6 +569,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 wb_active = 0;
u32 cwb_active = 0;
u32 mode_sel = 0;
+ u32 merge_3d_active = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
* per VM. Explicitly disable it until VM support is
@@ -578,6 +585,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
+ merge_3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
@@ -591,15 +599,18 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->dsc)
dsc_active |= cfg->dsc;
+ if (cfg->merge_3d)
+ merge_3d_active |= BIT(cfg->merge_3d - MERGE_3D_0);
+
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
- if (cfg->merge_3d)
- DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
- BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->intf_master)
+ DPU_REG_WRITE(c, CTL_INTF_MASTER, BIT(cfg->intf_master - INTF_0));
if (cfg->cdm)
DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
@@ -643,6 +654,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 intf_master = 0;
u32 wb_active = 0;
u32 cwb_active = 0;
u32 merge3d_active = 0;
@@ -666,10 +678,21 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
dpu_hw_ctl_clear_all_blendstages(ctx);
+ if (ctx->ops.set_active_fetch_pipes)
+ ctx->ops.set_active_fetch_pipes(ctx, NULL);
+
if (cfg->intf) {
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active &= ~BIT(cfg->intf - INTF_0);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+
+ intf_master = DPU_REG_READ(c, CTL_INTF_MASTER);
+
+ /* Unset this intf as master, if it is the current master */
+ if (intf_master == BIT(cfg->intf - INTF_0)) {
+ DPU_DEBUG_DRIVER("Unsetting INTF_%d master\n", cfg->intf - INTF_0);
+ DPU_REG_WRITE(c, CTL_INTF_MASTER, 0);
+ }
}
if (cfg->cwb) {
@@ -697,8 +720,8 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
}
}
-static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
- unsigned long *fetch_active)
+static void dpu_hw_ctl_set_active_fetch_pipes(struct dpu_hw_ctl *ctx,
+ unsigned long *fetch_active)
{
int i;
u32 val = 0;
@@ -761,7 +784,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
- ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
+ ops->set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 080a9550a0cc..feb09590bc8f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -36,6 +36,7 @@ struct dpu_hw_stage_cfg {
/**
* struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
* @intf : Interface id
+ * @intf_master: Master interface id in the dual pipe topology
* @mode_3d: 3d mux configuration
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
@@ -46,6 +47,7 @@ struct dpu_hw_stage_cfg {
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
+ enum dpu_intf intf_master;
enum dpu_wb wb;
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
@@ -254,7 +256,7 @@ struct dpu_hw_ctl_ops {
void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
- void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+ void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 8d820cd1b554..175639c8bfbb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -125,6 +125,7 @@ enum dpu_lm {
LM_4,
LM_5,
LM_6,
+ LM_7,
LM_MAX
};
@@ -169,6 +170,8 @@ enum dpu_dsc {
DSC_3,
DSC_4,
DSC_5,
+ DSC_6,
+ DSC_7,
DSC_MAX
};
@@ -185,6 +188,8 @@ enum dpu_pingpong {
PINGPONG_3,
PINGPONG_4,
PINGPONG_5,
+ PINGPONG_6,
+ PINGPONG_7,
PINGPONG_CWB_0,
PINGPONG_CWB_1,
PINGPONG_CWB_2,
@@ -199,6 +204,7 @@ enum dpu_merge_3d {
MERGE_3D_2,
MERGE_3D_3,
MERGE_3D_4,
+ MERGE_3D_5,
MERGE_3D_MAX
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 3305ad0623ca..1fd82b6747e9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1512,6 +1512,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
{ .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, },
+ { .compatible = "qcom,sar2130p-dpu", .data = &dpu_sar2130p_cfg, },
{ .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
{ .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
{ .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index e03d6091f736..421138bc3cb7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -915,10 +915,9 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return 0;
}
-static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
- struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct msm_format *fmt,
- uint32_t max_linewidth)
+static int dpu_plane_is_multirect_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt)
{
if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect))
@@ -930,10 +929,6 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
if (MSM_FORMAT_IS_YUV(fmt))
return false;
- if (MSM_FORMAT_IS_UBWC(fmt) &&
- drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
- return false;
-
if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
return false;
@@ -941,6 +936,27 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
return true;
}
+static int dpu_plane_is_parallel_capable(struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ if (MSM_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
+ return false;
+
+ return true;
+}
+
+static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ return dpu_plane_is_multirect_capable(sspp, pipe_cfg, fmt) &&
+ dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth);
+}
+
+
static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct drm_atomic_state *state,
const struct drm_crtc_state *crtc_state)
@@ -1002,6 +1018,69 @@ static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dp
return true;
}
+static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate,
+ struct dpu_plane_state *prev_adjacent_pstate,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe *prev_pipe = &prev_adjacent_pstate->pipe;
+ struct dpu_sw_pipe_cfg *prev_pipe_cfg = &prev_adjacent_pstate->pipe_cfg;
+ const struct msm_format *prev_fmt = msm_framebuffer_format(prev_adjacent_pstate->base.fb);
+ u16 max_tile_height = 1;
+
+ if (prev_adjacent_pstate->r_pipe.sspp != NULL ||
+ prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE)
+ return false;
+
+ if (!dpu_plane_is_multirect_capable(pipe->sspp, pipe_cfg, fmt) ||
+ !dpu_plane_is_multirect_capable(prev_pipe->sspp, prev_pipe_cfg, prev_fmt))
+ return false;
+
+ if (MSM_FORMAT_IS_UBWC(fmt))
+ max_tile_height = max(max_tile_height, fmt->tile_height);
+
+ if (MSM_FORMAT_IS_UBWC(prev_fmt))
+ max_tile_height = max(max_tile_height, prev_fmt->tile_height);
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->sspp = NULL;
+
+ if (dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth) &&
+ dpu_plane_is_parallel_capable(prev_pipe_cfg, prev_fmt, max_linewidth) &&
+ (pipe_cfg->dst_rect.x1 >= prev_pipe_cfg->dst_rect.x2 ||
+ prev_pipe_cfg->dst_rect.x1 >= pipe_cfg->dst_rect.x2)) {
+ pipe->sspp = prev_pipe->sspp;
+
+ pipe->multirect_index = DPU_SSPP_RECT_1;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ prev_pipe->multirect_index = DPU_SSPP_RECT_0;
+ prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ return true;
+ }
+
+ if (pipe_cfg->dst_rect.y1 >= prev_pipe_cfg->dst_rect.y2 + 2 * max_tile_height ||
+ prev_pipe_cfg->dst_rect.y1 >= pipe_cfg->dst_rect.y2 + 2 * max_tile_height) {
+ pipe->sspp = prev_pipe->sspp;
+
+ pipe->multirect_index = DPU_SSPP_RECT_1;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+
+ prev_pipe->multirect_index = DPU_SSPP_RECT_0;
+ prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+
+ return true;
+ }
+
+ return false;
+}
+
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1102,13 +1181,14 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
struct dpu_global_state *global_state,
struct drm_atomic_state *state,
- struct drm_plane_state *plane_state)
+ struct drm_plane_state *plane_state,
+ struct drm_plane_state *prev_adjacent_plane_state)
{
const struct drm_crtc_state *crtc_state = NULL;
struct drm_plane *plane = plane_state->plane;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
struct dpu_rm_sspp_requirements reqs;
- struct dpu_plane_state *pstate;
+ struct dpu_plane_state *pstate, *prev_adjacent_pstate;
struct dpu_sw_pipe *pipe;
struct dpu_sw_pipe *r_pipe;
struct dpu_sw_pipe_cfg *pipe_cfg;
@@ -1120,6 +1200,8 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
plane_state->crtc);
pstate = to_dpu_plane_state(plane_state);
+ prev_adjacent_pstate = prev_adjacent_plane_state ?
+ to_dpu_plane_state(prev_adjacent_plane_state) : NULL;
pipe = &pstate->pipe;
r_pipe = &pstate->r_pipe;
pipe_cfg = &pstate->pipe_cfg;
@@ -1138,24 +1220,42 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation);
- pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!pipe->sspp)
- return -ENODEV;
+ if (drm_rect_width(&r_pipe_cfg->src_rect) == 0) {
+ if (!prev_adjacent_pstate ||
+ !dpu_plane_try_multirect_shared(pstate, prev_adjacent_pstate, fmt,
+ dpu_kms->catalog->caps->max_linewidth)) {
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
+ return -ENODEV;
- if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
- pipe->sspp,
- msm_framebuffer_format(plane_state->fb),
- dpu_kms->catalog->caps->max_linewidth)) {
- /* multirect is not possible, use two SSPP blocks */
- r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!r_pipe->sspp)
+ r_pipe->sspp = NULL;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
+ } else {
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
return -ENODEV;
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(plane_state->fb),
+ dpu_kms->catalog->caps->max_linewidth)) {
+ /* multirect is not possible, use two SSPP blocks */
+ r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!r_pipe->sspp)
+ return -ENODEV;
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
}
return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
@@ -1168,6 +1268,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
unsigned int num_planes)
{
unsigned int i;
+ struct drm_plane_state *prev_adjacent_plane_state = NULL;
for (i = 0; i < num_planes; i++) {
struct drm_plane_state *plane_state = states[i];
@@ -1177,9 +1278,12 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
continue;
int ret = dpu_plane_virtual_assign_resources(crtc, global_state,
- state, plane_state);
+ state, plane_state,
+ prev_adjacent_plane_state);
if (ret)
- return ret;
+ break;
+
+ prev_adjacent_plane_state = plane_state;
}
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 3efbba425ca6..2e296f79cba1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -53,6 +53,8 @@ int dpu_rm_init(struct drm_device *dev,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
+ rm->has_legacy_ctls = (cat->mdss_ver->core_major_ver < 5);
+
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct dpu_hw_mixer *hw;
@@ -434,20 +436,19 @@ static int _dpu_rm_reserve_ctls(
int i = 0, j, num_ctls;
bool needs_split_display;
- /*
- * For non-CWB mode, each hw_intf needs its own hw_ctl to program its
- * control path.
- *
- * Hardcode num_ctls to 1 if CWB is enabled because in CWB, both the
- * writeback and real-time encoders must be driven by the same control
- * path
- */
- if (top->cwb_enabled)
- num_ctls = 1;
- else
+ if (rm->has_legacy_ctls) {
+ /*
+ * TODO: check if there is a need for special handling if
+ * DPU < 5.0 get CWB support.
+ */
num_ctls = top->num_intf;
- needs_split_display = _dpu_rm_needs_split_display(top);
+ needs_split_display = _dpu_rm_needs_split_display(top);
+ } else {
+ /* use single CTL */
+ num_ctls = 1;
+ needs_split_display = false;
+ }
for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
const struct dpu_hw_ctl *ctl;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index a19dbdb1b6f4..aa62966056d4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -24,6 +24,7 @@ struct dpu_global_state;
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
* @cdm_blk: cdm hardware resource
+ * @has_legacy_ctls: DPU uses pre-ACTIVE CTL blocks.
*/
struct dpu_rm {
struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
@@ -37,6 +38,7 @@ struct dpu_rm {
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE];
struct dpu_hw_blk *cdm_blk;
+ bool has_legacy_ctls;
};
struct dpu_rm_sspp_requirements {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index c469e66cfc11..7e942c1337b3 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -6,6 +6,8 @@
#include <linux/delay.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_vblank.h>
#include "msm_drv.h"
@@ -189,7 +191,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct device_node *panel_node;
+ struct drm_bridge *next_bridge;
int dsi_id;
int ret;
@@ -199,27 +201,43 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
* bail out early if there is no panel node (no need to
* initialize LCDC encoder and LVDS connector)
*/
- panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
- if (!panel_node)
- return 0;
+ next_bridge = devm_drm_of_get_bridge(dev->dev, dev->dev->of_node, 0, 0);
+ if (IS_ERR(next_bridge)) {
+ ret = PTR_ERR(next_bridge);
+ if (ret == -ENODEV)
+ return 0;
+ return ret;
+ }
- encoder = mdp4_lcdc_encoder_init(dev, panel_node);
+ encoder = mdp4_lcdc_encoder_init(dev);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
- of_node_put(panel_node);
return PTR_ERR(encoder);
}
/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
encoder->possible_crtcs = 1 << DMA_P;
- connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
+ ret = drm_bridge_attach(encoder, next_bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach LVDS panel/bridge: %d\n", ret);
+
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(dev, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
- of_node_put(panel_node);
return PTR_ERR(connector);
}
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach LVDS connector: %d\n", ret);
+
+ return ret;
+ }
+
break;
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index 94b1ba92785f..f9d988076337 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -191,12 +191,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
-long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
-struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct device_node *panel_node);
-
-struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct device_node *panel_node, struct drm_encoder *encoder);
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev);
#ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
@@ -207,13 +202,6 @@ static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
}
#endif
-#ifdef CONFIG_COMMON_CLK
-struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
-#else
-static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
+struct clk *mpd4_get_lcdc_clock(struct drm_device *dev);
#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 8bbc7fb881d5..06a307c1272d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -14,7 +14,6 @@
struct mdp4_lcdc_encoder {
struct drm_encoder base;
- struct device_node *panel_node;
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
@@ -262,19 +261,12 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
return;
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
- panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (!IS_ERR(panel)) {
- drm_panel_disable(panel);
- drm_panel_unprepare(panel);
- }
-
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
@@ -300,7 +292,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
to_mdp4_lcdc_encoder(encoder);
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel;
uint32_t config;
int ret;
@@ -335,12 +326,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
- panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (!IS_ERR(panel)) {
- drm_panel_prepare(panel);
- drm_panel_enable(panel);
- }
-
setup_phy(encoder);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
@@ -348,22 +333,34 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
mdp4_lcdc_encoder->enabled = true;
}
+static enum drm_mode_status
+mdp4_lcdc_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, requested);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
.mode_set = mdp4_lcdc_encoder_mode_set,
.disable = mdp4_lcdc_encoder_disable,
.enable = mdp4_lcdc_encoder_enable,
+ .mode_valid = mdp4_lcdc_encoder_mode_valid,
};
-long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
-{
- struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
- to_mdp4_lcdc_encoder(encoder);
- return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate);
-}
-
/* initialize encoder */
-struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct device_node *panel_node)
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
@@ -374,14 +371,11 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
if (IS_ERR(mdp4_lcdc_encoder))
return ERR_CAST(mdp4_lcdc_encoder);
- mdp4_lcdc_encoder->panel_node = panel_node;
-
encoder = &mdp4_lcdc_encoder->base;
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
- /* TODO: do we need different pll in other cases? */
- mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
+ mdp4_lcdc_encoder->lcdc_clk = mpd4_get_lcdc_clock(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
deleted file mode 100644
index 52e728181b52..000000000000
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
- */
-
-#include "mdp4_kms.h"
-
-struct mdp4_lvds_connector {
- struct drm_connector base;
- struct drm_encoder *encoder;
- struct device_node *panel_node;
- struct drm_panel *panel;
-};
-#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
-
-static enum drm_connector_status mdp4_lvds_connector_detect(
- struct drm_connector *connector, bool force)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
-
- if (!mdp4_lvds_connector->panel) {
- mdp4_lvds_connector->panel =
- of_drm_find_panel(mdp4_lvds_connector->panel_node);
- if (IS_ERR(mdp4_lvds_connector->panel))
- mdp4_lvds_connector->panel = NULL;
- }
-
- return mdp4_lvds_connector->panel ?
- connector_status_connected :
- connector_status_disconnected;
-}
-
-static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
-
- drm_connector_cleanup(connector);
-
- kfree(mdp4_lvds_connector);
-}
-
-static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- struct drm_panel *panel = mdp4_lvds_connector->panel;
- int ret = 0;
-
- if (panel)
- ret = drm_panel_get_modes(panel, connector);
-
- return ret;
-}
-
-static enum drm_mode_status
-mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- struct drm_encoder *encoder = mdp4_lvds_connector->encoder;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = mdp4_lcdc_round_pixclk(encoder, requested);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
-
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- return MODE_OK;
-}
-
-static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
- .detect = mdp4_lvds_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = mdp4_lvds_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
- .get_modes = mdp4_lvds_connector_get_modes,
- .mode_valid = mdp4_lvds_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct device_node *panel_node, struct drm_encoder *encoder)
-{
- struct drm_connector *connector = NULL;
- struct mdp4_lvds_connector *mdp4_lvds_connector;
-
- mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
- if (!mdp4_lvds_connector)
- return ERR_PTR(-ENOMEM);
-
- mdp4_lvds_connector->encoder = encoder;
- mdp4_lvds_connector->panel_node = panel_node;
-
- connector = &mdp4_lvds_connector->base;
-
- drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs);
-
- connector->polled = 0;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index ab8c0c187fb2..fa2c29470510 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -122,40 +122,59 @@ static const struct clk_ops mpd4_lvds_pll_ops = {
.set_rate = mpd4_lvds_pll_set_rate,
};
-static const char *mpd4_lvds_pll_parents[] = {
- "pxo",
+static const struct clk_parent_data mpd4_lvds_pll_parents[] = {
+ { .fw_name = "pxo", .name = "pxo", },
};
static struct clk_init_data pll_init = {
.name = "mpd4_lvds_pll",
.ops = &mpd4_lvds_pll_ops,
- .parent_names = mpd4_lvds_pll_parents,
+ .parent_data = mpd4_lvds_pll_parents,
.num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
};
-struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
+static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
{
struct mdp4_lvds_pll *lvds_pll;
- struct clk *clk;
int ret;
lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL);
- if (!lvds_pll) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!lvds_pll)
+ return ERR_PTR(-ENOMEM);
lvds_pll->dev = dev;
lvds_pll->pll_hw.init = &pll_init;
- clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto fail;
+ ret = devm_clk_hw_register(dev->dev, &lvds_pll->pll_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_of_clk_add_hw_provider(dev->dev, of_clk_hw_simple_get, &lvds_pll->pll_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &lvds_pll->pll_hw;
+}
+
+struct clk *mpd4_get_lcdc_clock(struct drm_device *dev)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+
+
+ /* TODO: do we need different pll in other cases? */
+ hw = mpd4_lvds_pll_init(dev);
+ if (IS_ERR(hw)) {
+ DRM_DEV_ERROR(dev->dev, "failed to register LVDS PLL\n");
+ return ERR_CAST(hw);
}
- return clk;
+ clk = devm_clk_get(dev->dev, "lcdc_clk");
+ if (clk == ERR_PTR(-ENOENT)) {
+ drm_warn(dev, "can't get LCDC clock, using PLL directly\n");
-fail:
- return ERR_PTR(ret);
+ return devm_clk_hw_get_clk(dev->dev, hw, "lcdc_clk");
+ }
+
+ return clk;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 70fdc9fe228a..f8bfb908f9b4 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -13,13 +13,13 @@
#include "dp_catalog.h"
#include "dp_audio.h"
+#include "dp_drm.h"
#include "dp_panel.h"
#include "dp_reg.h"
#include "dp_display.h"
#include "dp_utils.h"
struct msm_dp_audio_private {
- struct platform_device *audio_pdev;
struct platform_device *pdev;
struct drm_device *drm_dev;
struct msm_dp_catalog *catalog;
@@ -160,24 +160,11 @@ static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable)
msm_dp_catalog_audio_enable(catalog, enable);
}
-static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev)
+static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_display)
{
struct msm_dp_audio *msm_dp_audio;
- struct msm_dp *msm_dp_display;
-
- if (!pdev) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
msm_dp_audio = msm_dp_display->msm_dp_audio;
-
if (!msm_dp_audio) {
DRM_ERROR("invalid msm_dp_audio data\n");
return ERR_PTR(-EINVAL);
@@ -186,68 +173,16 @@ static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device
return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
}
-static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
- struct device *codec_dev)
-{
-
- struct platform_device *pdev;
- struct msm_dp *msm_dp_display;
-
- pdev = to_platform_device(dev);
- if (!pdev) {
- pr_err("invalid input\n");
- return -ENODEV;
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- pr_err("invalid input\n");
- return -ENODEV;
- }
-
- return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev);
-}
-
-static int msm_dp_audio_get_eld(struct device *dev,
- void *data, uint8_t *buf, size_t len)
-{
- struct platform_device *pdev;
- struct msm_dp *msm_dp_display;
-
- pdev = to_platform_device(dev);
-
- if (!pdev) {
- DRM_ERROR("invalid input\n");
- return -ENODEV;
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- DRM_ERROR("invalid input\n");
- return -ENODEV;
- }
-
- mutex_lock(&msm_dp_display->connector->eld_mutex);
- memcpy(buf, msm_dp_display->connector->eld,
- min(sizeof(msm_dp_display->connector->eld), len));
- mutex_unlock(&msm_dp_display->connector->eld_mutex);
-
- return 0;
-}
-
-int msm_dp_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+int msm_dp_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
int rc = 0;
struct msm_dp_audio_private *audio;
- struct platform_device *pdev;
struct msm_dp *msm_dp_display;
- pdev = to_platform_device(dev);
- msm_dp_display = platform_get_drvdata(pdev);
+ msm_dp_display = to_dp_bridge(bridge)->msm_dp_display;
/*
* there could be cases where sound card can be opened even
@@ -262,7 +197,7 @@ int msm_dp_audio_hw_params(struct device *dev,
goto end;
}
- audio = msm_dp_audio_get_data(pdev);
+ audio = msm_dp_audio_get_data(msm_dp_display);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
@@ -281,15 +216,14 @@ end:
return rc;
}
-static void msm_dp_audio_shutdown(struct device *dev, void *data)
+void msm_dp_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
struct msm_dp_audio_private *audio;
- struct platform_device *pdev;
struct msm_dp *msm_dp_display;
- pdev = to_platform_device(dev);
- msm_dp_display = platform_get_drvdata(pdev);
- audio = msm_dp_audio_get_data(pdev);
+ msm_dp_display = to_dp_bridge(bridge)->msm_dp_display;
+ audio = msm_dp_audio_get_data(msm_dp_display);
if (IS_ERR(audio)) {
DRM_ERROR("failed to get audio data\n");
return;
@@ -311,47 +245,6 @@ static void msm_dp_audio_shutdown(struct device *dev, void *data)
msm_dp_display_signal_audio_complete(msm_dp_display);
}
-static const struct hdmi_codec_ops msm_dp_audio_codec_ops = {
- .hw_params = msm_dp_audio_hw_params,
- .audio_shutdown = msm_dp_audio_shutdown,
- .get_eld = msm_dp_audio_get_eld,
- .hook_plugged_cb = msm_dp_audio_hook_plugged_cb,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &msm_dp_audio_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio)
-{
- struct msm_dp_audio_private *audio_priv;
-
- audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
-
- if (audio_priv->audio_pdev) {
- platform_device_unregister(audio_priv->audio_pdev);
- audio_priv->audio_pdev = NULL;
- }
-}
-
-int msm_dp_register_audio_driver(struct device *dev,
- struct msm_dp_audio *msm_dp_audio)
-{
- struct msm_dp_audio_private *audio_priv;
-
- audio_priv = container_of(msm_dp_audio,
- struct msm_dp_audio_private, msm_dp_audio);
-
- audio_priv->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
-}
-
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
struct msm_dp_catalog *catalog)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index beea34cbab77..58fc14693e48 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -36,23 +36,6 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
struct msm_dp_catalog *catalog);
/**
- * msm_dp_register_audio_driver()
- *
- * Registers DP device with hdmi_codec interface.
- *
- * @dev: DP device instance.
- * @msm_dp_audio: an instance of msm_dp_audio module.
- *
- *
- * Returns the error code in case of failure, otherwise
- * zero on success.
- */
-int msm_dp_register_audio_driver(struct device *dev,
- struct msm_dp_audio *msm_dp_audio);
-
-void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio);
-
-/**
* msm_dp_audio_put()
*
* Cleans the msm_dp_audio instance.
@@ -61,10 +44,12 @@ void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm
*/
void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio);
-int msm_dp_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params);
+int msm_dp_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+void msm_dp_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge);
#endif /* _DP_AUDIO_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index d8633a596f8d..a50bfafbb4ea 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1034,10 +1034,12 @@ static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl,
return 0;
}
-static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_update_phy_vx_px(struct msm_dp_ctrl_private *ctrl,
+ enum drm_dp_phy dp_phy)
{
struct msm_dp_link *link = ctrl->link;
- int ret = 0, lane, lane_cnt;
+ int lane, lane_cnt, reg;
+ int ret = 0;
u8 buf[4];
u32 max_level_reached = 0;
u32 voltage_swing_level = link->phy_params.v_level;
@@ -1075,8 +1077,13 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev, "sink: p|v=0x%x\n",
voltage_swing_level | pre_emphasis_level);
- ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
- buf, lane_cnt);
+
+ if (dp_phy == DP_PHY_DPRX)
+ reg = DP_TRAINING_LANE0_SET;
+ else
+ reg = DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
+
+ ret = drm_dp_dpcd_write(ctrl->aux, reg, buf, lane_cnt);
if (ret == lane_cnt)
ret = 0;
@@ -1084,9 +1091,10 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
}
static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
- u8 pattern)
+ u8 pattern, enum drm_dp_phy dp_phy)
{
u8 buf;
+ int reg;
int ret = 0;
drm_dbg_dp(ctrl->drm_dev, "sink: pattern=%x\n", pattern);
@@ -1096,31 +1104,26 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
if (pattern && pattern != DP_TRAINING_PATTERN_4)
buf |= DP_LINK_SCRAMBLING_DISABLE;
- ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf);
- return ret == 1;
-}
-
-static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl,
- u8 *link_status)
-{
- int ret = 0, len;
-
- len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
- if (len != DP_LINK_STATUS_SIZE) {
- DRM_ERROR("DP link status read failed, err: %d\n", len);
- ret = -EINVAL;
- }
+ if (dp_phy == DP_PHY_DPRX)
+ reg = DP_TRAINING_PATTERN_SET;
+ else
+ reg = DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
- return ret;
+ ret = drm_dp_dpcd_writeb(ctrl->aux, reg, buf);
+ return ret == 1;
}
static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
- int *training_step)
+ int *training_step, enum drm_dp_phy dp_phy)
{
+ int delay_us;
int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 4;
+ delay_us = drm_dp_read_clock_recovery_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_1;
@@ -1129,18 +1132,19 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
if (ret)
return ret;
msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE);
+ DP_LINK_SCRAMBLING_DISABLE, dp_phy);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_reset_phy_params_vx_px(ctrl->link);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
for (tries = 0; tries < maximum_retries; tries++) {
- drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
+ fsleep(delay_us);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status);
if (ret)
return ret;
@@ -1161,7 +1165,7 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
}
msm_dp_link_adjust_levels(ctrl->link, link_status);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
}
@@ -1213,21 +1217,31 @@ static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl)
return 0;
}
-static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl,
+ enum drm_dp_phy dp_phy)
{
- msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
- drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+ int delay_us;
+
+ msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE, dp_phy);
+
+ delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+ fsleep(delay_us);
}
static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
- int *training_step)
+ int *training_step, enum drm_dp_phy dp_phy)
{
+ int delay_us;
int tries = 0, ret = 0;
u8 pattern;
u32 state_ctrl_bit;
int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE];
+ delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_2;
@@ -1247,12 +1261,12 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
if (ret)
return ret;
- msm_dp_ctrl_train_pattern_set(ctrl, pattern);
+ msm_dp_ctrl_train_pattern_set(ctrl, pattern, dp_phy);
for (tries = 0; tries <= maximum_retries; tries++) {
- drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+ fsleep(delay_us);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status);
if (ret)
return ret;
@@ -1262,7 +1276,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
}
msm_dp_link_adjust_levels(ctrl->link, link_status);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
@@ -1271,9 +1285,32 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
+static int msm_dp_ctrl_link_train_1_2(struct msm_dp_ctrl_private *ctrl,
+ int *training_step, enum drm_dp_phy dp_phy)
+{
+ int ret;
+
+ ret = msm_dp_ctrl_link_train_1(ctrl, training_step, dp_phy);
+ if (ret) {
+ DRM_ERROR("link training #1 on phy %d failed. ret=%d\n", dp_phy, ret);
+ return ret;
+ }
+ drm_dbg_dp(ctrl->drm_dev, "link training #1 on phy %d successful\n", dp_phy);
+
+ ret = msm_dp_ctrl_link_train_2(ctrl, training_step, dp_phy);
+ if (ret) {
+ DRM_ERROR("link training #2 on phy %d failed. ret=%d\n", dp_phy, ret);
+ return ret;
+ }
+ drm_dbg_dp(ctrl->drm_dev, "link training #2 on phy %d successful\n", dp_phy);
+
+ return 0;
+}
+
static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
+ int i;
int ret = 0;
const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
@@ -1286,8 +1323,6 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
link_info.rate = ctrl->link->link_params.rate;
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
- msm_dp_link_reset_phy_params_vx_px(ctrl->link);
-
msm_dp_aux_link_configure(ctrl->aux, &link_info);
if (drm_dp_max_downspread(dpcd))
@@ -1302,24 +1337,27 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
&assr, 1);
}
- ret = msm_dp_ctrl_link_train_1(ctrl, training_step);
+ for (i = ctrl->link->lttpr_count - 1; i >= 0; i--) {
+ enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
+
+ ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, dp_phy);
+ msm_dp_ctrl_clear_training_pattern(ctrl, dp_phy);
+
+ if (ret)
+ break;
+ }
+
if (ret) {
- DRM_ERROR("link training #1 failed. ret=%d\n", ret);
+ DRM_ERROR("link training of LTTPR(s) failed. ret=%d\n", ret);
goto end;
}
- /* print success info as this is a result of user initiated action */
- drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n");
-
- ret = msm_dp_ctrl_link_train_2(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, DP_PHY_DPRX);
if (ret) {
- DRM_ERROR("link training #2 failed. ret=%d\n", ret);
+ DRM_ERROR("link training on sink failed. ret=%d\n", ret);
goto end;
}
- /* print success info as this is a result of user initiated action */
- drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n");
-
end:
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
@@ -1636,7 +1674,7 @@ static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl)
if (ret)
goto end;
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
@@ -1660,7 +1698,7 @@ static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
return false;
}
msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
- msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_ctrl_update_phy_vx_px(ctrl, DP_PHY_DPRX);
msm_dp_link_send_test_response(ctrl->link);
pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
@@ -1805,7 +1843,7 @@ static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl)
u8 link_status[DP_LINK_STATUS_SIZE];
int num_lanes = ctrl->link->link_params.num_lanes;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
return drm_dp_channel_eq_ok(link_status, num_lanes);
}
@@ -1863,7 +1901,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
rc = msm_dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
@@ -1888,7 +1926,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
if (!drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes))
@@ -1902,7 +1940,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
}
/* stop link training before start re training */
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
}
rc = msm_dp_ctrl_reinitialize_mainlink(ctrl);
@@ -1926,7 +1964,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
* link training failed
* end txing train pattern here
*/
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
msm_dp_ctrl_deinitialize_mainlink(ctrl);
rc = -ECONNRESET;
@@ -1997,7 +2035,7 @@ int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train
msm_dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
/*
* Set up transfer unit values and set controller state to send
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index bbc47d86ae9e..386c4669c831 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/string_choices.h>
#include <drm/display/drm_dp_aux_bus.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/drm_edid.h>
#include "msm_drv.h"
@@ -288,13 +289,6 @@ static int msm_dp_display_bind(struct device *dev, struct device *master,
goto end;
}
-
- rc = msm_dp_register_audio_driver(dev, dp->audio);
- if (rc) {
- DRM_ERROR("Audio registration Dp failed\n");
- goto end;
- }
-
rc = msm_dp_hpd_event_thread_start(dp);
if (rc) {
DRM_ERROR("Event thread create failed\n");
@@ -316,7 +310,6 @@ static void msm_dp_display_unbind(struct device *dev, struct device *master,
of_dp_aux_depopulate_bus(dp->aux);
- msm_dp_unregister_audio_driver(dev, dp->audio);
msm_dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
@@ -367,17 +360,21 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
return 0;
}
-static void msm_dp_display_lttpr_init(struct msm_dp_display_private *dp)
+static int msm_dp_display_lttpr_init(struct msm_dp_display_private *dp, u8 *dpcd)
{
- u8 lttpr_caps[DP_LTTPR_COMMON_CAP_SIZE];
- int rc;
+ int rc, lttpr_count;
- if (drm_dp_read_lttpr_common_caps(dp->aux, dp->panel->dpcd, lttpr_caps))
- return;
+ if (drm_dp_read_lttpr_common_caps(dp->aux, dpcd, dp->link->lttpr_common_caps))
+ return 0;
- rc = drm_dp_lttpr_init(dp->aux, drm_dp_lttpr_count(lttpr_caps));
- if (rc)
+ lttpr_count = drm_dp_lttpr_count(dp->link->lttpr_common_caps);
+ rc = drm_dp_lttpr_init(dp->aux, lttpr_count);
+ if (rc) {
DRM_ERROR("failed to set LTTPRs transparency mode, rc=%d\n", rc);
+ return 0;
+ }
+
+ return lttpr_count;
}
static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
@@ -385,12 +382,17 @@ static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
struct drm_connector *connector = dp->msm_dp_display.connector;
const struct drm_display_info *info = &connector->display_info;
int rc = 0;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
- rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
+ rc = drm_dp_read_dpcd_caps(dp->aux, dpcd);
if (rc)
goto end;
- msm_dp_display_lttpr_init(dp);
+ dp->link->lttpr_count = msm_dp_display_lttpr_init(dp, dpcd);
+
+ rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
+ if (rc)
+ goto end;
msm_dp_link_process_request(dp->link);
@@ -626,9 +628,9 @@ static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display,
struct msm_dp_display_private, msm_dp_display);
/* notify audio subsystem only if sink supports audio */
- if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev &&
- dp->audio_supported)
- msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged);
+ if (dp->audio_supported)
+ drm_connector_hdmi_audio_plugged_notify(msm_dp_display->connector,
+ plugged);
}
static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data)
@@ -907,19 +909,6 @@ static int msm_dp_display_disable(struct msm_dp_display_private *dp)
return 0;
}
-int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
- hdmi_codec_plugged_cb fn, struct device *codec_dev)
-{
- bool plugged;
-
- msm_dp_display->plugged_cb = fn;
- msm_dp_display->codec_dev = codec_dev;
- plugged = msm_dp_display->link_ready;
- msm_dp_display_handle_plugged_change(msm_dp_display, plugged);
-
- return 0;
-}
-
/**
* msm_dp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index ecbc2d92f546..cc6e2cab36e9 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -7,7 +7,6 @@
#define _DP_DISPLAY_H_
#include "dp_panel.h"
-#include <sound/hdmi-codec.h>
#include "disp/msm_disp_snapshot.h"
#define DP_MAX_PIXEL_CLK_KHZ 675000
@@ -15,7 +14,6 @@
struct msm_dp {
struct drm_device *drm_dev;
struct platform_device *pdev;
- struct device *codec_dev;
struct drm_connector *connector;
struct drm_bridge *next_bridge;
bool link_ready;
@@ -25,14 +23,10 @@ struct msm_dp {
bool is_edp;
bool internal_hpd;
- hdmi_codec_plugged_cb plugged_cb;
-
struct msm_dp_audio *msm_dp_audio;
bool psr_supported;
};
-int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
- hdmi_codec_plugged_cb fn, struct device *codec_dev);
int msm_dp_display_get_modes(struct msm_dp *msm_dp_display);
bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display);
int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index cca57e56c906..f222d7ccaa88 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -12,6 +12,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
+#include "dp_audio.h"
#include "dp_drm.h"
/**
@@ -114,6 +115,9 @@ static const struct drm_bridge_funcs msm_dp_bridge_ops = {
.hpd_disable = msm_dp_bridge_hpd_disable,
.hpd_notify = msm_dp_bridge_hpd_notify,
.debugfs_init = msm_dp_bridge_debugfs_init,
+
+ .dp_audio_prepare = msm_dp_audio_prepare,
+ .dp_audio_shutdown = msm_dp_audio_shutdown,
};
static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
@@ -296,14 +300,15 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
struct msm_dp_bridge *msm_dp_bridge;
struct drm_bridge *bridge;
- msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL);
- if (!msm_dp_bridge)
- return -ENOMEM;
+ msm_dp_bridge = devm_drm_bridge_alloc(dev->dev, struct msm_dp_bridge, bridge,
+ msm_dp_display->is_edp ? &msm_edp_bridge_ops :
+ &msm_dp_bridge_ops);
+ if (IS_ERR(msm_dp_bridge))
+ return PTR_ERR(msm_dp_bridge);
msm_dp_bridge->msm_dp_display = msm_dp_display;
bridge = &msm_dp_bridge->bridge;
- bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops;
bridge->type = msm_dp_display->connector_type;
bridge->ycbcr_420_allowed = yuv_supported;
@@ -320,9 +325,13 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
*/
if (!msm_dp_display->is_edp) {
bridge->ops =
+ DRM_BRIDGE_OP_DP_AUDIO |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_MODES;
+ bridge->hdmi_audio_dev = &msm_dp_display->pdev->dev;
+ bridge->hdmi_audio_max_i2s_playback_channels = 8;
+ bridge->hdmi_audio_dai_port = -1;
}
rc = devm_drm_bridge_add(dev->dev, bridge);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 1a1fbb2d7d4f..92a9077959b3 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -714,21 +714,21 @@ end:
static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link)
{
- int len;
+ int ret;
link->prev_sink_count = link->msm_dp_link.sink_count;
- len = drm_dp_read_sink_count(link->aux);
- if (len < 0) {
+ ret = drm_dp_read_sink_count(link->aux);
+ if (ret < 0) {
DRM_ERROR("DP parse sink count failed\n");
- return len;
+ return ret;
}
- link->msm_dp_link.sink_count = len;
+ link->msm_dp_link.sink_count = ret;
- len = drm_dp_dpcd_read_link_status(link->aux,
- link->link_status);
- if (len < DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(link->aux,
+ link->link_status);
+ if (ret < 0) {
DRM_ERROR("DP link status read failed\n");
- return len;
+ return ret;
}
return msm_dp_link_parse_request(link);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 8db5d5698a97..ba47c6d19fbf 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -7,6 +7,7 @@
#define _DP_LINK_H_
#include "dp_aux.h"
+#include <drm/display/drm_dp_helper.h>
#define DS_PORT_STATUS_CHANGED 0x200
#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
@@ -60,6 +61,9 @@ struct msm_dp_link_phy_params {
};
struct msm_dp_link {
+ u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
+ int lttpr_count;
+
u32 sink_request;
u32 test_response;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 92415bf8aa16..4e8ab75c771b 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -47,7 +47,7 @@ static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel)
static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
{
- int rc;
+ int rc, max_lttpr_lanes, max_lttpr_rate;
struct msm_dp_panel_private *panel;
struct msm_dp_link_info *link_info;
u8 *dpcd, major, minor;
@@ -75,6 +75,16 @@ static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
if (link_info->rate > msm_dp_panel->max_dp_link_rate)
link_info->rate = msm_dp_panel->max_dp_link_rate;
+ /* Limit data lanes from LTTPR capabilities, if any */
+ max_lttpr_lanes = drm_dp_lttpr_max_lane_count(panel->link->lttpr_common_caps);
+ if (max_lttpr_lanes && max_lttpr_lanes < link_info->num_lanes)
+ link_info->num_lanes = max_lttpr_lanes;
+
+ /* Limit link rate from LTTPR capabilities, if any */
+ max_lttpr_rate = drm_dp_lttpr_max_link_rate(panel->link->lttpr_common_caps);
+ if (max_lttpr_rate && max_lttpr_rate < link_info->rate)
+ link_info->rate = max_lttpr_rate;
+
drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
drm_dbg_dp(panel->drm_dev, "lane_count=%d\n", link_info->num_lanes);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 7754dcec33d0..7675558ae2e5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -221,6 +221,22 @@ static const struct msm_dsi_config sc7280_dsi_cfg = {
},
};
+static const struct regulator_bulk_data sa8775p_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 8300 }, /* 1.2 V */
+ { .supply = "refgen" },
+};
+
+static const struct msm_dsi_config sa8775p_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sa8775p_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sa8775p_dsi_regulators),
+ .bus_clk_names = dsi_v2_4_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
+ .io_start = {
+ { 0xae94000, 0xae96000 },
+ },
+};
+
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
@@ -294,6 +310,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
&sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_1,
+ &sa8775p_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_6_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 120cb65164c1..65b0705fac0e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -27,6 +27,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
+#define MSM_DSI_6G_VER_MINOR_V2_5_1 0x20050001
#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4fabb01345aa..ca400924d4ee 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -434,12 +434,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dsi_mgr_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge,
+ return drm_bridge_attach(encoder, msm_dsi->next_bridge,
bridge, flags);
}
@@ -461,15 +462,14 @@ int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi,
struct drm_connector *connector;
int ret;
- dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
- sizeof(*dsi_bridge), GFP_KERNEL);
- if (!dsi_bridge)
- return -ENOMEM;
+ dsi_bridge = devm_drm_bridge_alloc(msm_dsi->dev->dev, struct dsi_bridge, base,
+ &dsi_mgr_bridge_funcs);
+ if (IS_ERR(dsi_bridge))
+ return PTR_ERR(dsi_bridge);
dsi_bridge->id = msm_dsi->id;
bridge = &dsi_bridge->base;
- bridge->funcs = &dsi_mgr_bridge_funcs;
ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge);
if (ret)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index c0bcc6828963..5973d7325699 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -581,6 +581,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_7nm_cfgs },
{ .compatible = "qcom,dsi-phy-7nm-8150",
.data = &dsi_phy_7nm_8150_cfgs },
+ { .compatible = "qcom,sa8775p-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_8775p_cfgs },
+ { .compatible = "qcom,sar2130p-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_sar2130p_cfgs },
{ .compatible = "qcom,sc7280-dsi-phy-7nm",
.data = &dsi_phy_7nm_7280_cfgs },
{ .compatible = "qcom,sm6375-dsi-phy-7nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 1925418d9999..7ea608f620fe 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -59,6 +59,8 @@ extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index a92decbee5b5..c19890358b74 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -1147,6 +1147,10 @@ static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 37550 },
};
+static const struct regulator_bulk_data dsi_phy_7nm_48000uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 48000 },
+};
+
static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 98000 },
};
@@ -1289,6 +1293,52 @@ const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs = {
.quirks = DSI_PHY_7NM_QUIRK_V4_3,
};
+const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_48000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_48000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_97800uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_97800uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V5_2,
+};
+
const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_98400uA_regulators,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 248541ff4492..2fd388b892dc 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -8,6 +8,7 @@
#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <drm/drm_bridge_connector.h>
@@ -199,12 +200,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = msm_hdmi_hpd_enable(hdmi->bridge);
- if (ret < 0) {
- DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- goto fail;
- }
-
return 0;
fail:
@@ -220,28 +215,24 @@ fail:
* The hdmi device:
*/
-#define HDMI_CFG(item, entry) \
- .item ## _names = item ##_names_ ## entry, \
- .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
-
-static const char *hpd_reg_names_8960[] = {"core-vdda"};
-static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
+static const char * const pwr_reg_names_8960[] = {"core-vdda"};
+static const char * const pwr_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static const struct hdmi_platform_config hdmi_tx_8960_config = {
- HDMI_CFG(hpd_reg, 8960),
- HDMI_CFG(hpd_clk, 8960),
+ .pwr_reg_names = pwr_reg_names_8960,
+ .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8960),
+ .pwr_clk_names = pwr_clk_names_8960,
+ .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8960),
};
-static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
-static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
-static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
-static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
+static const char * const pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
+static const char * const pwr_clk_names_8x74[] = {"iface", "core", "mdp_core", "alt_iface"};
static const struct hdmi_platform_config hdmi_tx_8974_config = {
- HDMI_CFG(pwr_reg, 8x74),
- HDMI_CFG(pwr_clk, 8x74),
- HDMI_CFG(hpd_clk, 8x74),
- .hpd_freq = hpd_clk_freq_8x74,
+ .pwr_reg_names = pwr_reg_names_8x74,
+ .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8x74),
+ .pwr_clk_names = pwr_clk_names_8x74,
+ .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8x74),
};
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
@@ -264,9 +255,6 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
if (priv->hdmi) {
- if (priv->hdmi->bridge)
- msm_hdmi_hpd_disable(priv->hdmi);
-
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
@@ -296,6 +284,7 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
hdmi->pdev = pdev;
hdmi->config = config;
spin_lock_init(&hdmi->reg_lock);
+ mutex_init(&hdmi->state_mutex);
ret = drm_of_find_panel_or_bridge(pdev->dev.of_node, 1, 0, NULL, &hdmi->next_bridge);
if (ret && ret != -ENODEV)
@@ -322,20 +311,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (hdmi->irq < 0)
return hdmi->irq;
- hdmi->hpd_regs = devm_kcalloc(&pdev->dev,
- config->hpd_reg_cnt,
- sizeof(hdmi->hpd_regs[0]),
- GFP_KERNEL);
- if (!hdmi->hpd_regs)
- return -ENOMEM;
-
- for (i = 0; i < config->hpd_reg_cnt; i++)
- hdmi->hpd_regs[i].supply = config->hpd_reg_names[i];
-
- ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret)
- return dev_err_probe(dev, ret, "failed to get hpd regulators\n");
-
hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
config->pwr_reg_cnt,
sizeof(hdmi->pwr_regs[0]),
@@ -350,25 +325,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to get pwr regulators\n");
- hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
- config->hpd_clk_cnt,
- sizeof(hdmi->hpd_clks[0]),
- GFP_KERNEL);
- if (!hdmi->hpd_clks)
- return -ENOMEM;
-
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- struct clk *clk;
-
- clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
- if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk),
- "failed to get hpd clk: %s\n",
- config->hpd_clk_names[i]);
-
- hdmi->hpd_clks[i] = clk;
- }
-
hdmi->pwr_clks = devm_kcalloc(&pdev->dev,
config->pwr_clk_cnt,
sizeof(hdmi->pwr_clks[0]),
@@ -376,17 +332,17 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (!hdmi->pwr_clks)
return -ENOMEM;
- for (i = 0; i < config->pwr_clk_cnt; i++) {
- struct clk *clk;
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ hdmi->pwr_clks[i].id = config->pwr_clk_names[i];
- clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
- if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk),
- "failed to get pwr clk: %s\n",
- config->pwr_clk_names[i]);
+ ret = devm_clk_bulk_get(&pdev->dev, config->pwr_clk_cnt, hdmi->pwr_clks);
+ if (ret)
+ return ret;
- hdmi->pwr_clks[i] = clk;
- }
+ hdmi->extp_clk = devm_clk_get_optional(&pdev->dev, "extp");
+ if (IS_ERR(hdmi->extp_clk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->extp_clk),
+ "failed to get extp clock\n");
hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
/* This will catch e.g. -EPROBE_DEFER */
@@ -432,6 +388,48 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev)
msm_hdmi_put_phy(hdmi);
}
+static int msm_hdmi_runtime_suspend(struct device *dev)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ const struct hdmi_platform_config *config = hdmi->config;
+
+ clk_bulk_disable_unprepare(config->pwr_clk_cnt, hdmi->pwr_clks);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
+
+ return 0;
+}
+
+static int msm_hdmi_runtime_resume(struct device *dev)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ const struct hdmi_platform_config *config = hdmi->config;
+ int ret;
+
+ ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ goto fail;
+
+ ret = clk_bulk_prepare_enable(config->pwr_clk_cnt, hdmi->pwr_clks);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ pinctrl_pm_select_sleep_state(dev);
+
+ return ret;
+}
+
+DEFINE_RUNTIME_DEV_PM_OPS(msm_hdmi_pm_ops, msm_hdmi_runtime_suspend, msm_hdmi_runtime_resume, NULL);
+
static const struct of_device_id msm_hdmi_dt_match[] = {
{ .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
@@ -449,6 +447,7 @@ static struct platform_driver msm_hdmi_driver = {
.driver = {
.name = "hdmi_msm",
.of_match_table = msm_hdmi_dt_match,
+ .pm = &msm_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index a5f481c39277..d5e572d10d6a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -41,16 +41,17 @@ struct hdmi {
/* video state: */
bool power_on;
+ bool hpd_enabled;
+ struct mutex state_mutex; /* protects two booleans */
unsigned long int pixclock;
void __iomem *mmio;
void __iomem *qfprom_mmio;
phys_addr_t mmio_phy_addr;
- struct regulator_bulk_data *hpd_regs;
struct regulator_bulk_data *pwr_regs;
- struct clk **hpd_clks;
- struct clk **pwr_clks;
+ struct clk_bulk_data *pwr_clks;
+ struct clk *extp_clk;
struct gpio_desc *hpd_gpiod;
@@ -83,21 +84,12 @@ struct hdmi {
/* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config {
- /* regulators that need to be on for hpd: */
- const char **hpd_reg_names;
- int hpd_reg_cnt;
-
/* regulators that need to be on for screen pwr: */
- const char **pwr_reg_names;
+ const char * const *pwr_reg_names;
int pwr_reg_cnt;
- /* clks that need to be on for hpd: */
- const char **hpd_clk_names;
- const long unsigned *hpd_freq;
- int hpd_clk_cnt;
-
- /* clks that need to be on for screen pwr (ie pixel clk): */
- const char **pwr_clk_names;
+ /* clks that need to be on: */
+ const char * const *pwr_clk_names;
int pwr_clk_cnt;
};
@@ -224,8 +216,8 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
enum drm_connector_status msm_hdmi_bridge_detect(
struct drm_bridge *bridge);
-int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
-void msm_hdmi_hpd_disable(struct hdmi *hdmi);
+void msm_hdmi_hpd_enable(struct drm_bridge *bridge);
+void msm_hdmi_hpd_disable(struct drm_bridge *bridge);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 8bb975e82c17..b9ec14ef2c20 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <linux/hdmi.h>
@@ -12,71 +13,9 @@
#include "hdmi.h"
-/* Supported HDMI Audio sample rates */
-#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
-#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
-#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
-#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
-#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
-#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
-#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
-#define MSM_HDMI_SAMPLE_RATE_MAX 7
-
-
-struct hdmi_msm_audio_acr {
- uint32_t n; /* N parameter for clock regeneration */
- uint32_t cts; /* CTS parameter for clock regeneration */
-};
-
-struct hdmi_msm_audio_arcs {
- unsigned long int pixclock;
- struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
-};
-
-#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ }
-
-/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
-/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
-static const struct hdmi_msm_audio_arcs acr_lut[] = {
- /* 25.200MHz */
- HDMI_MSM_AUDIO_ARCS(25200, {
- {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
- {12288, 25200}, {25088, 28000}, {24576, 25200} }),
- /* 27.000MHz */
- HDMI_MSM_AUDIO_ARCS(27000, {
- {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
- {12288, 27000}, {25088, 30000}, {24576, 27000} }),
- /* 27.027MHz */
- HDMI_MSM_AUDIO_ARCS(27030, {
- {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
- {12288, 27027}, {25088, 30030}, {24576, 27027} }),
- /* 74.250MHz */
- HDMI_MSM_AUDIO_ARCS(74250, {
- {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
- {12288, 74250}, {25088, 82500}, {24576, 74250} }),
- /* 148.500MHz */
- HDMI_MSM_AUDIO_ARCS(148500, {
- {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
- {12288, 148500}, {25088, 165000}, {24576, 148500} }),
-};
-
-static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(acr_lut); i++) {
- const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i];
- if (arcs->pixclock == pixclock)
- return arcs;
- }
-
- return NULL;
-}
-
int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
- const struct hdmi_msm_audio_arcs *arcs = NULL;
bool enabled = audio->enabled;
uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
uint32_t audio_config;
@@ -94,15 +33,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
enabled = false;
}
- if (enabled) {
- arcs = get_arcs(hdmi->pixclock);
- if (!arcs) {
- DBG("disabling audio: unsupported pixclock: %lu",
- hdmi->pixclock);
- enabled = false;
- }
- }
-
/* Read first before writing */
acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
@@ -116,15 +46,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
uint32_t n, cts, multiplier;
enum hdmi_acr_cts select;
- n = arcs->lut[audio->rate].n;
- cts = arcs->lut[audio->rate].cts;
+ drm_hdmi_acr_get_n_cts(hdmi->pixclock, audio->rate, &n, &cts);
- if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) {
+ if (audio->rate == 192000 || audio->rate == 176400) {
multiplier = 4;
n >>= 2; /* divide N by 4 and use multiplier */
- } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) {
+ } else if (audio->rate == 96000 || audio->rate == 88200) {
multiplier = 2;
n >>= 1; /* divide N by 2 and use multiplier */
} else {
@@ -137,13 +64,11 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier);
- if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate))
+ if (audio->rate == 48000 || audio->rate == 96000 ||
+ audio->rate == 192000)
select = ACR_48;
- else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate))
+ else if (audio->rate == 44100 || audio->rate == 88200 ||
+ audio->rate == 176400)
select = ACR_44;
else /* default to 32k */
select = ACR_32;
@@ -204,7 +129,6 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- unsigned int rate;
int ret;
drm_dbg_driver(bridge->dev, "%u Hz, %d bit, %d channels\n",
@@ -214,25 +138,12 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
switch (params->sample_rate) {
case 32000:
- rate = MSM_HDMI_SAMPLE_RATE_32KHZ;
- break;
case 44100:
- rate = MSM_HDMI_SAMPLE_RATE_44_1KHZ;
- break;
case 48000:
- rate = MSM_HDMI_SAMPLE_RATE_48KHZ;
- break;
case 88200:
- rate = MSM_HDMI_SAMPLE_RATE_88_2KHZ;
- break;
case 96000:
- rate = MSM_HDMI_SAMPLE_RATE_96KHZ;
- break;
case 176400:
- rate = MSM_HDMI_SAMPLE_RATE_176_4KHZ;
- break;
case 192000:
- rate = MSM_HDMI_SAMPLE_RATE_192KHZ;
break;
default:
drm_err(bridge->dev, "rate[%d] not supported!\n",
@@ -245,7 +156,7 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
if (ret)
return ret;
- hdmi->audio.rate = rate;
+ hdmi->audio.rate = params->sample_rate;
hdmi->audio.channels = params->cea.channels;
hdmi->audio.enabled = true;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 1456354c8af4..53a7ce8cc7bc 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -18,52 +18,34 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- int i, ret;
-
- pm_runtime_get_sync(&hdmi->pdev->dev);
+ int ret;
- ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret);
+ pm_runtime_resume_and_get(&hdmi->pdev->dev);
- if (config->pwr_clk_cnt > 0) {
+ if (hdmi->extp_clk) {
DBG("pixclock: %lu", hdmi->pixclock);
- ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
- config->pwr_clk_names[0], ret);
- }
- }
+ ret = clk_set_rate(hdmi->extp_clk, hdmi->pixclock);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to set extp clk rate: %d\n", ret);
- for (i = 0; i < config->pwr_clk_cnt; i++) {
- ret = clk_prepare_enable(hdmi->pwr_clks[i]);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
- config->pwr_clk_names[i], ret);
- }
+ ret = clk_prepare_enable(hdmi->extp_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable extp clk: %d\n", ret);
}
}
static void power_off(struct drm_bridge *bridge)
{
- struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- int i, ret;
/* TODO do we need to wait for final vblank somewhere before
* cutting the clocks?
*/
mdelay(16 + 4);
- for (i = 0; i < config->pwr_clk_cnt; i++)
- clk_disable_unprepare(hdmi->pwr_clks[i]);
-
- ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
+ if (hdmi->extp_clk)
+ clk_disable_unprepare(hdmi->extp_clk);
pm_runtime_put(&hdmi->pdev->dev);
}
@@ -320,13 +302,16 @@ static void msm_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
msm_hdmi_set_timings(hdmi, &crtc_state->adjusted_mode);
+ mutex_lock(&hdmi->state_mutex);
if (!hdmi->power_on) {
msm_hdmi_phy_resource_enable(phy);
msm_hdmi_power_on(bridge);
hdmi->power_on = true;
- if (connector->display_info.is_hdmi)
- msm_hdmi_audio_update(hdmi);
}
+ mutex_unlock(&hdmi->state_mutex);
+
+ if (connector->display_info.is_hdmi)
+ msm_hdmi_audio_update(hdmi);
drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
@@ -349,7 +334,10 @@ static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
msm_hdmi_hdcp_off(hdmi->hdcp_ctrl);
DBG("power down");
- msm_hdmi_set_mode(hdmi, false);
+
+ /* Keep the HDMI enabled if the HPD is enabled */
+ mutex_lock(&hdmi->state_mutex);
+ msm_hdmi_set_mode(hdmi, hdmi->hpd_enabled);
msm_hdmi_phy_powerdown(phy);
@@ -360,6 +348,7 @@ static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
msm_hdmi_audio_update(hdmi);
msm_hdmi_phy_resource_disable(phy);
}
+ mutex_unlock(&hdmi->state_mutex);
}
static void msm_hdmi_set_timings(struct hdmi *hdmi,
@@ -411,9 +400,6 @@ static void msm_hdmi_set_timings(struct hdmi *hdmi,
frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
-
- if (hdmi->connector->display_info.is_hdmi)
- msm_hdmi_audio_update(hdmi);
}
static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridge,
@@ -440,7 +426,6 @@ static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct dr
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = bridge->dev->dev_private;
struct msm_kms *kms = priv->kms;
long actual;
@@ -453,8 +438,8 @@ static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct dr
actual = kms->funcs->round_pixclk(kms,
tmds_rate,
hdmi_bridge->hdmi->encoder);
- else if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], tmds_rate);
+ else if (hdmi->extp_clk)
+ actual = clk_round_rate(hdmi->extp_clk, tmds_rate);
else
actual = tmds_rate;
@@ -474,6 +459,8 @@ static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
.atomic_post_disable = msm_hdmi_bridge_atomic_post_disable,
.edid_read = msm_hdmi_bridge_edid_read,
.detect = msm_hdmi_bridge_detect,
+ .hpd_enable = msm_hdmi_hpd_enable,
+ .hpd_disable = msm_hdmi_hpd_disable,
.hdmi_tmds_char_rate_valid = msm_hdmi_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = msm_hdmi_bridge_clear_infoframe,
.hdmi_write_infoframe = msm_hdmi_bridge_write_infoframe,
@@ -498,16 +485,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
struct hdmi_bridge *hdmi_bridge;
int ret;
- hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
- sizeof(*hdmi_bridge), GFP_KERNEL);
- if (!hdmi_bridge)
- return -ENOMEM;
+ hdmi_bridge = devm_drm_bridge_alloc(hdmi->dev->dev, struct hdmi_bridge, base,
+ &msm_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi_bridge))
+ return PTR_ERR(hdmi_bridge);
hdmi_bridge->hdmi = hdmi;
INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
bridge = &hdmi_bridge->base;
- bridge->funcs = &msm_hdmi_bridge_funcs;
bridge->ddc = hdmi->i2c;
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
bridge->vendor = "Qualcomm";
@@ -515,6 +501,7 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_EDID;
bridge->hdmi_audio_max_i2s_playback_channels = 8;
bridge->hdmi_audio_dev = &hdmi->pdev->dev;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index 9ce0ffa35417..407e6c449ee0 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -60,68 +60,30 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi)
}
}
-static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
-{
- const struct hdmi_platform_config *config = hdmi->config;
- struct device *dev = &hdmi->pdev->dev;
- int i, ret;
-
- if (enable) {
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- if (config->hpd_freq && config->hpd_freq[i]) {
- ret = clk_set_rate(hdmi->hpd_clks[i],
- config->hpd_freq[i]);
- if (ret)
- dev_warn(dev,
- "failed to set clk %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
-
- ret = clk_prepare_enable(hdmi->hpd_clks[i]);
- if (ret) {
- DRM_DEV_ERROR(dev,
- "failed to enable hpd clk: %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
- }
- } else {
- for (i = config->hpd_clk_cnt - 1; i >= 0; i--)
- clk_disable_unprepare(hdmi->hpd_clks[i]);
- }
-}
-
-int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
+void msm_hdmi_hpd_enable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
int ret;
unsigned long flags;
- ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret);
- goto fail;
- }
-
- ret = pinctrl_pm_select_default_state(dev);
- if (ret) {
- DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
- goto fail;
- }
-
if (hdmi->hpd_gpiod)
gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1);
- pm_runtime_get_sync(dev);
- enable_hpd_clocks(hdmi, true);
+ ret = pm_runtime_resume_and_get(dev);
+ if (WARN_ON(ret))
+ return;
+ mutex_lock(&hdmi->state_mutex);
msm_hdmi_set_mode(hdmi, false);
msm_hdmi_phy_reset(hdmi);
msm_hdmi_set_mode(hdmi, true);
+ hdmi->hpd_enabled = true;
+ mutex_unlock(&hdmi->state_mutex);
+
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
/* enable HPD events: */
@@ -140,34 +102,23 @@ int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
-
- return 0;
-
-fail:
- return ret;
}
-void msm_hdmi_hpd_disable(struct hdmi *hdmi)
+void msm_hdmi_hpd_disable(struct drm_bridge *bridge)
{
- const struct hdmi_platform_config *config = hdmi->config;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
struct device *dev = &hdmi->pdev->dev;
- int ret;
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
- msm_hdmi_set_mode(hdmi, false);
+ mutex_lock(&hdmi->state_mutex);
+ hdmi->hpd_enabled = false;
+ msm_hdmi_set_mode(hdmi, hdmi->power_on);
+ mutex_unlock(&hdmi->state_mutex);
- enable_hpd_clocks(hdmi, false);
pm_runtime_put(dev);
-
- ret = pinctrl_pm_select_sleep_state(dev);
- if (ret)
- dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
-
- ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret)
- dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
}
void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
@@ -202,14 +153,16 @@ void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
static enum drm_connector_status detect_reg(struct hdmi *hdmi)
{
- uint32_t hpd_int_status;
+ u32 hpd_int_status = 0;
+ int ret;
- pm_runtime_get_sync(&hdmi->pdev->dev);
- enable_hpd_clocks(hdmi, true);
+ ret = pm_runtime_resume_and_get(&hdmi->pdev->dev);
+ if (ret)
+ goto out;
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
- enable_hpd_clocks(hdmi, false);
+out:
pm_runtime_put(&hdmi->pdev->dev);
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index 7aa500d24240..ebefea4fb408 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -107,11 +107,15 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
if (num == 0)
return num;
+ ret = pm_runtime_resume_and_get(&hdmi->pdev->dev);
+ if (ret)
+ return ret;
+
init_ddc(hdmi_i2c);
ret = ddc_clear_irq(hdmi_i2c);
if (ret)
- return ret;
+ goto fail;
for (i = 0; i < num; i++) {
struct i2c_msg *p = &msgs[i];
@@ -169,7 +173,7 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
- return ret;
+ goto fail;
}
ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
@@ -202,7 +206,13 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
}
}
+ pm_runtime_put(&hdmi->pdev->dev);
+
return i;
+
+fail:
+ pm_runtime_put(&hdmi->pdev->dev);
+ return ret;
}
static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 03120c54ced6..667573f1db7c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -58,7 +58,11 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
struct device *dev = &phy->pdev->dev;
int i, ret = 0;
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "runtime resume failed: %d\n", ret);
+ return ret;
+ }
ret = regulator_bulk_enable(cfg->num_regs, phy->regs);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c3588dc9e537..f316e6776f67 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -671,7 +671,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
break;
case MSM_INFO_GET_FLAGS:
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index ebc9ba66efb8..2995e80fec3b 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -735,7 +735,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
msm_gem_assert_locked(obj);
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return ERR_PTR(-ENODEV);
pages = msm_gem_get_pages_locked(obj, madv);
@@ -1074,7 +1074,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_iova_spaces(obj, true);
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 85f0257e83da..ba5c4ff76292 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -224,7 +224,7 @@ msm_gem_assert_locked(struct drm_gem_object *obj)
/* imported/exported objects are not purgeable: */
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
{
- return msm_obj->base.import_attach || msm_obj->pin_count;
+ return drm_gem_is_imported(&msm_obj->base) || msm_obj->pin_count;
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index ee267490c935..2e37913d5a6a 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -50,7 +50,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
struct page **pages;
int ret = 0;
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return 0;
pages = msm_gem_pin_pages_locked(obj);
@@ -62,7 +62,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return;
msm_gem_unpin_pages_locked(obj);
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 6970b0f7f457..2e1d5c343272 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -156,6 +156,7 @@ void msm_devfreq_init(struct msm_gpu *gpu)
priv->gpu_devfreq_config.downdifferential = 10;
mutex_init(&df->lock);
+ df->suspended = true;
ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
DEV_PM_QOS_MIN_FREQUENCY, 0);
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index dcb49fd30402..709979fcfab6 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -150,7 +150,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
dev = msm_mdss->dev;
- domain = irq_domain_add_linear(dev->of_node, 32,
+ domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32,
&msm_mdss_irqdomain_ops, msm_mdss);
if (!domain) {
dev_err(dev, "failed to add irq_domain\n");
@@ -592,6 +592,16 @@ static const struct msm_mdss_data sa8775p_data = {
.reg_bus_bw = 74000,
};
+static const struct msm_mdss_data sar2130p_data = {
+ .ubwc_enc_version = UBWC_3_0, /* 4.0.2 in hw */
+ .ubwc_dec_version = UBWC_4_3,
+ .ubwc_swizzle = 6,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 0,
+ .macrotile_mode = 1,
+ .reg_bus_bw = 74000,
+};
+
static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
@@ -738,6 +748,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
{ .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data },
+ { .compatible = "qcom,sar2130p-mdss", .data = &sar2130p_data },
{ .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index c5651c39ac2a..89dce15eed3b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -93,7 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
}
msm_gem_object_set_name(ring->bo, "ring%d", id);
- args.name = to_msm_bo(ring->bo)->name,
+ args.name = to_msm_bo(ring->bo)->name;
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 8ee00f59ca82..fcb2a7517377 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -134,7 +134,6 @@ static int lcdif_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct lcdif_drm_private *lcdif;
- struct resource *res;
int ret;
lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL);
@@ -144,8 +143,7 @@ static int lcdif_load(struct drm_device *drm)
lcdif->drm = drm;
drm->dev_private = lcdif;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lcdif->base = devm_ioremap_resource(drm->dev, res);
+ lcdif->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lcdif->base))
return PTR_ERR(lcdif->base);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 59020862cf65..c183b1112bc4 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -8,6 +8,7 @@
* Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
+#include <linux/aperture.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -215,7 +216,6 @@ static int mxsfb_load(struct drm_device *drm,
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct mxsfb_drm_private *mxsfb;
- struct resource *res;
int ret;
mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL);
@@ -226,8 +226,7 @@ static int mxsfb_load(struct drm_device *drm,
drm->dev_private = mxsfb;
mxsfb->devdata = devdata;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxsfb->base = devm_ioremap_resource(drm->dev, res);
+ mxsfb->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxsfb->base))
return PTR_ERR(mxsfb->base);
@@ -361,6 +360,15 @@ static int mxsfb_probe(struct platform_device *pdev)
if (ret)
goto err_free;
+ /*
+ * Remove early framebuffers (ie. simplefb). The framebuffer can be
+ * located anywhere in RAM
+ */
+ ret = aperture_remove_all_conflicting_devices(mxsfb_driver.name);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't kick out existing framebuffers\n");
+
ret = drm_dev_register(drm, 0);
if (ret)
goto err_unload;
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 7b863355c5c6..385d24530d1e 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -2,6 +2,7 @@
ccflags-y += -I $(src)/include
ccflags-y += -I $(src)/include/nvkm
ccflags-y += -I $(src)/nvkm
+ccflags-y += -I $(src)/nvkm/subdev/gsp
ccflags-y += -I $(src)
# NVKM - HW resource manager
@@ -68,5 +69,6 @@ nouveau-y += nv17_fence.o
nouveau-y += nv50_fence.o
nouveau-y += nv84_fence.o
nouveau-y += nvc0_fence.o
+nouveau-y += gv100_fence.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 67146f1e8482..c063756eaea3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -768,9 +768,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
disp->image[nv_crtc->index] = NULL;
}
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_fini(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin_del(&nv_crtc->cursor.nvbo);
nvif_event_dtor(&nv_crtc->vblank);
nvif_head_dtor(&nv_crtc->head);
kfree(nv_crtc);
@@ -1303,6 +1301,7 @@ nv04_crtc_vblank_handler(struct nvif_event *event, void *repv, u32 repc)
int
nv04_crtc_create(struct drm_device *dev, int crtc_num)
{
+ struct nouveau_cli *cli = &nouveau_drm(dev)->client;
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_crtc *nv_crtc;
struct drm_plane *primary;
@@ -1336,20 +1335,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL,
- &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
- NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- }
- if (ret)
- nouveau_bo_fini(nv_crtc->cursor.nvbo);
- }
+ ret = nouveau_bo_new_map(cli, NOUVEAU_GEM_DOMAIN_VRAM, 64 * 64 * 4, &nv_crtc->cursor.nvbo);
+ if (ret)
+ return ret;
nv04_cursor_init(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 28be2912ff74..d5049dee4b8c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -9,11 +9,13 @@ nouveau-y += dispnv50/core907d.o
nouveau-y += dispnv50/core917d.o
nouveau-y += dispnv50/corec37d.o
nouveau-y += dispnv50/corec57d.o
+nouveau-y += dispnv50/coreca7d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc57d.o
+nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcca7d.o
nouveau-y += dispnv50/dac507d.o
nouveau-y += dispnv50/dac907d.o
@@ -31,6 +33,7 @@ nouveau-y += dispnv50/head907d.o
nouveau-y += dispnv50/head917d.o
nouveau-y += dispnv50/headc37d.o
nouveau-y += dispnv50/headc57d.o
+nouveau-y += dispnv50/headca7d.o
nouveau-y += dispnv50/wimm.o
nouveau-y += dispnv50/wimmc37b.o
@@ -39,6 +42,7 @@ nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
nouveau-y += dispnv50/wndwc57e.o
nouveau-y += dispnv50/wndwc67e.o
+nouveau-y += dispnv50/wndwca7e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index f045515696cb..c6331bf97582 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { GB202_DISP_CORE_CHANNEL_DMA, 0, coreca7d_new },
{ AD102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index f75088186fba..aa07a3ad5dfd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -7,7 +7,10 @@
struct nv50_core {
const struct nv50_core_func *func;
+ struct nv50_disp *disp;
+
struct nv50_dmac chan;
+
bool assign_windows;
};
@@ -18,6 +21,7 @@ struct nv50_core_func {
int (*init)(struct nv50_core *);
void (*ntfy_init)(struct nouveau_bo *, u32 offset);
int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
+ u32 caps_class;
int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
int (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
@@ -70,4 +74,6 @@ int corec37d_wndw_owner(struct nv50_core *);
extern const struct nv50_outp_func sorc37d;
int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
+
+int coreca7d_new(struct nouveau_drm *, s32, struct nv50_core **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index ce2cb78bbdd3..4b947b67a844 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -165,6 +165,7 @@ core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
return -ENOMEM;
core->func = func;
+ core->disp = disp;
ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 7f637b8830be..83eec2f091f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -105,7 +105,7 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
int ret;
ret = nvif_object_ctor(&disp->disp->object, "dispCaps", 0,
- GV100_DISP_CAPS, NULL, 0, &disp->caps);
+ disp->core->func->caps_class, NULL, 0, &disp->caps);
if (ret) {
NV_ERROR(drm,
"Failed to init notifier caps region: %d\n",
@@ -162,6 +162,7 @@ corec37d = {
.init = corec37d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
+ .caps_class = GV100_DISP_CAPS,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 421d0d57e1d8..39be576eadcb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -22,6 +22,7 @@
#include "core.h"
#include "head.h"
+#include <nvif/class.h>
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57d.h>
@@ -63,6 +64,7 @@ corec57d = {
.init = corec57d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
+ .caps_class = GV100_DISP_CAPS,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c
new file mode 100644
index 000000000000..171727be400e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nvif/class.h>
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+#include <nouveau_bo.h>
+
+static int
+coreca7d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
+{
+ const u64 ntfy_addr = core->disp->sync->offset + NV50_DISP_CORE_NTFY;
+ const u32 ntfy_hi = upper_32_bits(ntfy_addr);
+ const u32 ntfy_lo = lower_32_bits(ntfy_addr);
+ struct nvif_push *push = &core->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5 + (ntfy ? 5 + 2 : 0));
+ if (ret)
+ return ret;
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVCA7D, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi,
+
+ SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVVAL(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) |
+ NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
+ }
+
+ PUSH_MTHD(push, NVCA7D, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS],
+ SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+
+ PUSH_MTHD(push, NVCA7D, UPDATE,
+ NVDEF(NVCA7D, UPDATE, RELEASE_ELV, TRUE) |
+ NVDEF(NVCA7D, UPDATE, SPECIAL_HANDLING, NONE) |
+ NVDEF(NVCA7D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+ }
+
+ return PUSH_KICK(push);
+}
+
+static int
+coreca7d_init(struct nv50_core *core)
+{
+ struct nvif_push *push = &core->chan.push;
+ const u32 windows = 8, heads = 4;
+ int ret, i;
+
+ ret = PUSH_WAIT(push, windows * 6 + heads * 6);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE),
+
+ WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+ PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+ NVVAL(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, ILUT_ALLOWED, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE),
+
+ WINDOW_SET_PHYSICAL(i), BIT(i));
+ }
+
+ for (i = 0; i < heads; i++) {
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OLUT_ALLOWED, TRUE) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_TILE_MASK(i), BIT(i));
+
+ PUSH_MTHD(push, NVCA7D, TILE_SET_TILE_SIZE(i), 0);
+ }
+
+ core->assign_windows = true;
+ return PUSH_KICK(push);
+}
+
+static const struct nv50_core_func
+coreca7d = {
+ .init = coreca7d_init,
+ .ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
+ .caps_class = GB202_DISP_CAPS,
+ .ntfy_wait_done = corec37d_ntfy_wait_done,
+ .update = coreca7d_update,
+ .wndw.owner = corec37d_wndw_owner,
+ .head = &headca7d,
+ .sor = &sorc37d,
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ .crc = &crcca7d,
+#endif
+};
+
+int
+coreca7d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&coreca7d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index 5936b6b3b15d..deb6af40ef32 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -509,6 +509,10 @@ nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
if (ret)
return ret;
+ /* No CTXDMAs on Blackwell. */
+ if (core->chan.base.user.oclass >= GB202_DISP_CORE_CHANNEL_DMA)
+ return 0;
+
ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
NV50_DISP_HANDLE_CRC_CTX(head, idx),
NV_DMA_IN_MEMORY,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h
index 4823f1fde2dd..75a2009e8193 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h
@@ -94,6 +94,7 @@ void nv50_crc_atomic_clr(struct nv50_head *);
extern const struct nv50_crc_func crc907d;
extern const struct nv50_crc_func crcc37d;
extern const struct nv50_crc_func crcc57d;
+extern const struct nv50_crc_func crcca7d;
#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
struct nv50_crc {};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c
new file mode 100644
index 000000000000..912f59aebe87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "crcc37d.h"
+#include "core.h"
+#include "head.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+static int
+crcca7d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, ctx ? 3 : 2);
+ if (ret)
+ return ret;
+
+ if (ctx) {
+ const u32 crc_hi = upper_32_bits(ctx->mem.addr);
+ const u32 crc_lo = lower_32_bits(ctx->mem.addr);
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CRC(i), crc_hi,
+
+ HEAD_SET_SURFACE_ADDRESS_LO_CRC(i),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ADDRESS_LO, crc_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, ENABLE));
+ } else {
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC(i),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, DISABLE));
+ }
+
+ return 0;
+}
+
+static int
+crcca7d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int primary_crc, ret;
+
+ if (!source) {
+ ret = PUSH_WAIT(push, 1);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i), 0);
+
+ return crcca7d_set_ctx(head, NULL);
+ }
+
+ switch (source) {
+ case NV50_CRC_SOURCE_TYPE_SOR:
+ primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(or);
+ break;
+ case NV50_CRC_SOURCE_TYPE_SF:
+ primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF;
+ break;
+ default:
+ break;
+ }
+
+ ret = crcca7d_set_ctx(head, ctx);
+ if (ret)
+ return ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i),
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVVAL(NVCA7D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, primary_crc) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE));
+
+ return 0;
+}
+
+const struct nv50_crc_func
+crcca7d = {
+ .set_src = crcca7d_set_src,
+ .set_ctx = crcca7d_set_ctx,
+ .get_entry = crcc37d_get_entry,
+ .ctx_finished = crcc37d_ctx_finished,
+ .flip_threshold = CRCC37D_FLIP_THRESHOLD,
+ .num_entries = CRCC37D_MAX_ENTRIES,
+ .notifier_len = sizeof(struct crcc37d_notifier),
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index 31d8b2e4791d..557bd05240fa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { GB202_DISP_CURSOR, 0, cursc37a_new },
{ GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 504cb3f2054b..e5d37eee4301 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -279,6 +279,16 @@ nv50_dmac_create(struct nouveau_drm *drm,
if (syncbuf < 0)
return 0;
+ /* No CTXDMAs on Blackwell. */
+ if (disp->oclass >= GB202_DISP) {
+ /* "handle != NULL_HANDLE" is used to determine enable status
+ * in a number of places, so fill in some fake object handles.
+ */
+ dmac->sync.handle = NV50_DISP_HANDLE_SYNCBUF;
+ dmac->vram.handle = NV50_DISP_HANDLE_VRAM;
+ return 0;
+ }
+
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
@@ -775,10 +785,8 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
union hdmi_infoframe infoframe = { 0 };
const u8 rekey = 56; /* binary driver, and tegra, constant */
u32 max_ac_packet;
- struct {
- struct nvif_outp_infoframe_v0 infoframe;
- u8 data[17];
- } args = { 0 };
+ DEFINE_RAW_FLEX(struct nvif_outp_infoframe_v0, args, data, 17);
+ const u8 data_len = __member_size(args->data);
int ret, size;
max_ac_packet = mode->htotal - mode->hdisplay;
@@ -815,29 +823,29 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
return;
/* AVI InfoFrame. */
- args.infoframe.version = 0;
- args.infoframe.head = nv_crtc->index;
+ args->version = 0;
+ args->head = nv_crtc->index;
if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) {
drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode,
HDMI_QUANTIZATION_RANGE_FULL);
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
} else {
size = 0;
}
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, args, size);
/* Vendor InfoFrame. */
- memset(&args.data, 0, sizeof(args.data));
+ memset(args->data, 0, data_len);
if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
&nv_connector->base, mode))
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
else
size = 0;
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, args, size);
nv_encoder->hdmi.enabled = true;
}
@@ -2810,10 +2818,7 @@ nv50_display_destroy(struct drm_device *dev)
nvif_object_dtor(&disp->caps);
nv50_core_del(&disp->core);
- nouveau_bo_unmap(disp->sync);
- if (disp->sync)
- nouveau_bo_unpin(disp->sync);
- nouveau_bo_fini(disp->sync);
+ nouveau_bo_unpin_del(&disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
@@ -2845,20 +2850,7 @@ nv50_display_create(struct drm_device *dev)
dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &disp->sync);
- if (!ret) {
- ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
- if (!ret) {
- ret = nouveau_bo_map(disp->sync);
- if (ret)
- nouveau_bo_unpin(disp->sync);
- }
- if (ret)
- nouveau_bo_fini(disp->sync);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &disp->sync);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d7c74cc43ba5..3dd742b4f823 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -577,6 +577,7 @@ nv50_head_create(struct drm_device *dev, int index)
return ERR_PTR(-ENOMEM);
head->func = disp->core->func->head;
+ head->disp = disp;
head->base.index = index;
if (disp->disp->object.oclass < GF110_DISP)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index e9d17037ffcf..8bd2fcb1eff5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -13,6 +13,8 @@
struct nv50_head {
const struct nv50_head_func *func;
+ struct nv50_disp *disp;
+
struct nouveau_crtc base;
struct nv50_crc crc;
struct nv50_lut olut;
@@ -98,4 +100,7 @@ int headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *);
extern const struct nv50_head_func headc57d;
+bool headc57d_olut(struct nv50_head *, struct nv50_head_atom *, int size);
+
+extern const struct nv50_head_func headca7d;
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index fde4087e7691..3f8ba495de8f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -182,7 +182,7 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static bool
+bool
headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
if (size != 0 && size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headca7d.c b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c
new file mode 100644
index 000000000000..eeaeb15aa664
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+static int
+headca7d_display_id(struct nv50_head *head, u32 display_id)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_DISPLAY_ID(i, 0), display_id);
+
+ return 0;
+}
+
+static int
+headca7d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ u8 depth;
+ int ret;
+
+ switch (asyh->or.depth) {
+ case 6:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444;
+ break;
+ case 5:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444;
+ break;
+ case 2:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444;
+ break;
+ case 0:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, EXT_PACKET_WIN, NONE));
+
+ return 0;
+}
+
+static int
+headca7d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PROCAMP(i),
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA));
+
+ return 0;
+}
+
+static int
+headca7d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVDEF(NVCA7D, HEAD_SET_DITHER_CONTROL, OFFSET_ENABLE, DISABLE) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+
+ return 0;
+}
+
+static int
+headca7d_curs_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+headca7d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const u32 curs_hi = upper_32_bits(asyh->curs.offset);
+ const u32 curs_lo = lower_32_bits(asyh->curs.offset);
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 7);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(i, 0), curs_hi);
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ADDRESS_LO, curs_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0),
+
+ HEAD_SET_CONTROL_CURSOR_COMPOSITION(i),
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, K1, 0xff) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, CURSOR_COLOR_FACTOR_SELECT,
+ K1) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, VIEWPORT_COLOR_FACTOR_SELECT,
+ NEG_K1_TIMES_SRC) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, MODE, BLEND));
+
+ return 0;
+}
+
+static int
+headca7d_olut_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+headca7d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const u32 olut_hi = upper_32_bits(asyh->olut.offset);
+ const u32 olut_lo = lower_32_bits(asyh->olut.offset);
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_OLUT(i), olut_hi,
+
+ HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ADDRESS_LO, olut_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_OLUT_CONTROL(i),
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, INTERPOLATE, asyh->olut.output_mode) |
+ NVDEF(NVCA7D, HEAD_SET_OLUT_CONTROL, MIRROR, DISABLE) |
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, MODE, asyh->olut.mode) |
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, SIZE, asyh->olut.size),
+
+ HEAD_SET_OLUT_FP_NORM_SCALE(i), 0xffffffff);
+
+ return 0;
+}
+
+static int
+headca7d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ struct nv50_head_mode *m = &asyh->mode;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 11);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL, STRUCTURE, PROGRESSIVE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+ return 0;
+}
+
+static int
+headca7d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH));
+ return 0;
+}
+
+const struct nv50_head_func
+headca7d = {
+ .view = headca7d_view,
+ .mode = headca7d_mode,
+ .olut = headc57d_olut,
+ .ilut_check = head907d_ilut_check,
+ .olut_identity = true,
+ .olut_size = 1024,
+ .olut_set = headca7d_olut_set,
+ .olut_clr = headca7d_olut_clr,
+ .curs_layout = head917d_curs_layout,
+ .curs_format = headc37d_curs_format,
+ .curs_set = headca7d_curs_set,
+ .curs_clr = headca7d_curs_clr,
+ .dither = headca7d_dither,
+ .procamp = headca7d_procamp,
+ .or = headca7d_or,
+ .static_wndw_map = headc37d_static_wndw_map,
+ .display_id = headca7d_display_id,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index 566fbddfc8d7..53c9ab6c138b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { GB202_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index f6be426dd525..11d5b923d6e7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -556,14 +556,24 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
return ret;
if (wndw->ctxdma.parent) {
- ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
- if (IS_ERR(ctxdma)) {
- nouveau_bo_unpin(nvbo);
- return PTR_ERR(ctxdma);
+ if (wndw->wndw.base.user.oclass < GB202_DISP_WINDOW_CHANNEL_DMA) {
+ ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(nvbo);
+ return PTR_ERR(ctxdma);
+ }
+
+ if (asyw->visible)
+ asyw->image.handle[0] = ctxdma->object.handle;
+ } else {
+ /* No CTXDMAs on Blackwell. */
+ if (asyw->visible) {
+ /* "handle != NULL_HANDLE" is used to determine enable status
+ * in a number of places, so fill in a fake object handle.
+ */
+ asyw->image.handle[0] = NV50_DISP_HANDLE_WNDW_CTX(0);
+ }
}
-
- if (asyw->visible)
- asyw->image.handle[0] = ctxdma->object.handle;
}
ret = drm_gem_plane_helper_prepare_fb(plane, state);
@@ -901,6 +911,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { GB202_DISP_WINDOW_CHANNEL_DMA, 0, wndwca7e_new },
{ GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 76a6ae5d5652..90d100514bef 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -134,6 +134,9 @@ int wndwc57e_csc_clr(struct nv50_wndw *);
int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+int wndwca7e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
+
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 50a7b97d37a2..554c4f91f8be 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -25,6 +25,7 @@
#include <drm/drm_atomic_helper.h>
#include <nouveau_bo.h>
+#include <nvif/class.h>
#include <nvif/if0014.h>
#include <nvif/pushc37b.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
new file mode 100644
index 000000000000..0d8e9a9d1a57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7e.h>
+
+#include <nouveau_bo.h>
+
+static int
+wndwca7e_image_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL,
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0) |
+ NVDEF(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING));
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0),
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ const u32 iso0_hi = upper_32_bits(asyw->image.offset[0]);
+ const u32 iso0_lo = lower_32_bits(asyw->image.offset[0]);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret, kind;
+
+ if (asyw->image.kind)
+ kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR;
+ else
+ kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH;
+
+ ret = PUSH_WAIT(push, 17);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ISO(0), iso0_hi);
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0),
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ADDRESS_LO, iso0_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, TARGET, PHYSICAL_NVM) |
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, KIND, kind) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL,
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVCA7E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE,
+ NVVAL(NVCA7E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVCA7E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVCA7E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SET_PARAMS,
+ NVVAL(NVCA7E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVCA7E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVCA7E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVCA7E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVCA7E, SET_POINT_IN(0),
+ NVVAL(NVCA7E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVCA7E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE_IN,
+ NVVAL(NVCA7E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVCA7E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE_OUT,
+ NVVAL(NVCA7E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVCA7E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+
+ return 0;
+}
+
+static int
+wndwca7e_ilut_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT,
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ const u32 ilut_hi = upper_32_bits(asyw->xlut.i.offset);
+ const u32 ilut_lo = lower_32_bits(asyw->xlut.i.offset);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ILUT, ilut_hi,
+
+ SET_SURFACE_ADDRESS_LO_ILUT,
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ADDRESS_LO, ilut_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_ILUT_CONTROL,
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, SIZE, asyw->xlut.i.size) |
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, MODE, asyw->xlut.i.mode) |
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, INTERPOLATE, asyw->xlut.i.output_mode));
+
+ return 0;
+}
+
+static int
+wndwca7e_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ const u64 ntfy_addr = disp->sync->offset + asyw->ntfy.offset;
+ const u32 ntfy_hi = upper_32_bits(ntfy_addr);
+ const u32 ntfy_lo = lower_32_bits(ntfy_addr);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi,
+
+ SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_NOTIFIER_CONTROL,
+ NVVAL(NVCA7E, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken));
+
+ return 0;
+}
+
+static const struct nv50_wndw_func
+wndwca7e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .ntfy_set = wndwca7e_ntfy_set,
+ .ntfy_clr = wndwca7e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwca7e_ilut_set,
+ .xlut_clr = wndwca7e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwca7e_image_set,
+ .image_clr = wndwca7e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwca7e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwca7e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/gv100_fence.c b/drivers/gpu/drm/nouveau/gv100_fence.c
new file mode 100644
index 000000000000..cccdeca72002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/gv100_fence.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+#include "nv50_display.h"
+
+#include <nvif/push906f.h>
+
+#include <nvhw/class/clc36f.h>
+
+static int
+gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+ struct nvif_push *push = &chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 8);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual),
+ SEM_ADDR_HI, upper_32_bits(virtual),
+ SEM_PAYLOAD_LO, sequence);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, EN) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+
+ PUSH_MTHD(push, NVC36F, NON_STALL_INTERRUPT, 0);
+
+ PUSH_KICK(push);
+ return 0;
+}
+
+static int
+gv100_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+ struct nvif_push *push = &chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual),
+ SEM_ADDR_HI, upper_32_bits(virtual),
+ SEM_PAYLOAD_LO, sequence);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, ACQ_CIRC_GEQ) |
+ NVDEF(NVC36F, SEM_EXECUTE, ACQUIRE_SWITCH_TSG, EN) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT));
+
+ PUSH_KICK(push);
+ return 0;
+}
+
+static int
+gv100_fence_context_new(struct nouveau_channel *chan)
+{
+ struct nv84_fence_chan *fctx;
+ int ret;
+
+ ret = nv84_fence_context_new(chan);
+ if (ret)
+ return ret;
+
+ fctx = chan->fence;
+ fctx->base.emit32 = gv100_fence_emit32;
+ fctx->base.sync32 = gv100_fence_sync32;
+ return 0;
+}
+
+int
+gv100_fence_create(struct nouveau_drm *drm)
+{
+ struct nv84_fence_priv *priv;
+ int ret;
+
+ ret = nv84_fence_create(drm);
+ if (ret)
+ return ret;
+
+ priv = drm->fence;
+ priv->base.context_new = gv100_fence_context_new;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
new file mode 100644
index 000000000000..8735dda4c8a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clc36f_h_
+#define _clc36f_h_
+
+#define NVC36F_NON_STALL_INTERRUPT (0x00000020)
+#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0
+#define NVC36F_SEM_ADDR_LO (0x0000005c)
+#define NVC36F_SEM_ADDR_LO_OFFSET 31:2
+#define NVC36F_SEM_ADDR_HI (0x00000060)
+#define NVC36F_SEM_ADDR_HI_OFFSET 7:0
+#define NVC36F_SEM_PAYLOAD_LO (0x00000064)
+#define NVC36F_SEM_PAYLOAD_LO_PAYLOAD 31:0
+#define NVC36F_SEM_PAYLOAD_HI (0x00000068)
+#define NVC36F_SEM_PAYLOAD_HI_PAYLOAD 31:0
+#define NVC36F_SEM_EXECUTE (0x0000006c)
+#define NVC36F_SEM_EXECUTE_OPERATION 2:0
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000
+#define NVC36F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005
+#define NVC36F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI 20:20
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE 24:24
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_REDUCTION 30:27
+#define NVC36F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000
+#define NVC36F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001
+#define NVC36F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002
+#define NVC36F_SEM_EXECUTE_REDUCTION_IAND 0x00000003
+#define NVC36F_SEM_EXECUTE_REDUCTION_IOR 0x00000004
+#define NVC36F_SEM_EXECUTE_REDUCTION_IADD 0x00000005
+#define NVC36F_SEM_EXECUTE_REDUCTION_INC 0x00000006
+#define NVC36F_SEM_EXECUTE_REDUCTION_DEC 0x00000007
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT 31:31
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h
new file mode 100644
index 000000000000..092aebe9551c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clc97b_h_
+#define _clc97b_h_
+
+// dma opcode instructions
+#define NVC97B_DMA
+#define NVC97B_DMA_OPCODE 31:29
+#define NVC97B_DMA_OPCODE_METHOD 0x00000000
+#define NVC97B_DMA_OPCODE_JUMP 0x00000001
+#define NVC97B_DMA_OPCODE_NONINC_METHOD 0x00000002
+#define NVC97B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003
+#define NVC97B_DMA_METHOD_COUNT 27:18
+#define NVC97B_DMA_METHOD_OFFSET 15:2
+#define NVC97B_DMA_DATA 31:0
+#define NVC97B_DMA_DATA_NOP 0x00000000
+#define NVC97B_DMA_JUMP_OFFSET 15:2
+#define NVC97B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0
+
+#endif // _clc97b_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h
new file mode 100644
index 000000000000..0fec6fc21d44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h
@@ -0,0 +1,868 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clca7d_h_
+#define _clca7d_h_
+
+// class methods
+#define NVCA7D_UPDATE (0x00000200)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING 21:20
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_REASON 19:12
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS 24:24
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000)
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001)
+#define NVCA7D_UPDATE_RELEASE_ELV 0:0
+#define NVCA7D_UPDATE_RELEASE_ELV_FALSE (0x00000000)
+#define NVCA7D_UPDATE_RELEASE_ELV_TRUE (0x00000001)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN 8:4
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN 28:28
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN_FALSE (0x00000000)
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN_TRUE (0x00000001)
+#define NVCA7D_SET_NOTIFIER_CONTROL (0x0000020C)
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY 12:12
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000)
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS (0x00000218)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001)
+#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000260)
+#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000264)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001)
+
+#define NVCA7D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK 7:0
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002)
+
+#define NVCA7D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER 3:0
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i))
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F)
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE 8:8
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS 9:9
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME 10:10
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT 26:25
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH_BLOCKLINEAR (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_BLOCKLINEAR (0x00000002)
+#define NVCA7D_WINDOW_SET_PHYSICAL(a) (0x00001014 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW 31:0
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_NONE (0x00000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW0 (0x00000001)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW1 (0x00000002)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW2 (0x00000004)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW3 (0x00000008)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW4 (0x00000010)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW5 (0x00000020)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW6 (0x00000040)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW7 (0x00000080)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW8 (0x00000100)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW9 (0x00000200)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW10 (0x00000400)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW11 (0x00000800)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW12 (0x00001000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW13 (0x00002000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW14 (0x00004000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW15 (0x00008000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW16 (0x00010000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW17 (0x00020000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW18 (0x00040000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW19 (0x00080000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW20 (0x00100000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW21 (0x00200000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW22 (0x00400000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW23 (0x00800000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW24 (0x01000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW25 (0x02000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW26 (0x04000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW27 (0x08000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW28 (0x10000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW29 (0x20000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW30 (0x40000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW31 (0x80000000)
+
+#define NVCA7D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F)
+#define NVCA7D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE 1:0
+#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER 3:3
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE 11:10
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_NO_LOCK (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_FRAME_LOCK (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_RASTER_LOCK (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN 8:4
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCKOUT_WINDOW 15:12
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE 23:22
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_NO_LOCK (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_FRAME_LOCK (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_RASTER_LOCK (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN 20:16
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN 28:24
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE 30:30
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE 31:31
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS 5:4
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE 10:8
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_PHASE 13:12
+#define NVCA7D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_DISPLAY_ID_CODE 31:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED 5:5
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_ELV_START 31:17
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16
+#define NVCA7D_HEAD_SET_TILE_MASK(a) (0x00002060 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE 7:0
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE0 (0x00000001)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE1 (0x00000002)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE2 (0x00000004)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE3 (0x00000008)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE4 (0x00000010)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE5 (0x00000020)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE6 (0x00000040)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE7 (0x00000080)
+#define NVCA7D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_SIZE_WIDTH 15:0
+#define NVCA7D_HEAD_SET_RASTER_SIZE_HEIGHT 31:16
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END_Y 30:16
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END_Y 30:16
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START_Y 30:16
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC(a) (0x00002150 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC(a) (0x00002154 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT(a) (0x00002158 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT(a) (0x0000215C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(a,b) (0x00002170 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(a,b) (0x00002178 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND 1:1
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_PITCH (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_BLOCKLINEAR (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE 3:2
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SIZE 18:8
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND 4:4
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_LEVEL 25:20
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS 5:5
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_3BITS (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_4BITS (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0
+
+#define NVCA7D_TILE_SET_TILE_SIZE(a) (0x00006000 + (a)*0x00000200)
+#define NVCA7D_TILE_SET_TILE_SIZE_START 14:0
+#define NVCA7D_TILE_SET_TILE_SIZE_WIDTH 30:16
+
+#endif // _clca7d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h
new file mode 100644
index 000000000000..ebfb2e48a4f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clca7e_h_
+#define _clca7e_h_
+
+// class methods
+#define NVCA7E_SET_NOTIFIER_CONTROL (0x00000220)
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVCA7E_SET_SIZE (0x00000224)
+#define NVCA7E_SET_SIZE_WIDTH 15:0
+#define NVCA7E_SET_SIZE_HEIGHT 31:16
+#define NVCA7E_SET_STORAGE (0x00000228)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NVCA7E_SET_PARAMS (0x0000022C)
+#define NVCA7E_SET_PARAMS_FORMAT 7:0
+#define NVCA7E_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NVCA7E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F)
+#define NVCA7E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NVCA7E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVCA7E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E)
+#define NVCA7E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVCA7E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6)
+#define NVCA7E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NVCA7E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9)
+#define NVCA7E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF)
+#define NVCA7E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023)
+#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6)
+#define NVCA7E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028)
+#define NVCA7E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078)
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000)
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001)
+#define NVCA7E_SET_PARAMS_SWAP_UV 19:19
+#define NVCA7E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000)
+#define NVCA7E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001)
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE 22:22
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000)
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001)
+#define NVCA7E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004)
+#define NVCA7E_SET_PLANAR_STORAGE_PITCH 12:0
+#define NVCA7E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004)
+#define NVCA7E_SET_POINT_IN_X 15:0
+#define NVCA7E_SET_POINT_IN_Y 31:16
+#define NVCA7E_SET_SIZE_IN (0x00000298)
+#define NVCA7E_SET_SIZE_IN_WIDTH 15:0
+#define NVCA7E_SET_SIZE_IN_HEIGHT 31:16
+#define NVCA7E_SET_SIZE_OUT (0x000002A4)
+#define NVCA7E_SET_SIZE_OUT_WIDTH 15:0
+#define NVCA7E_SET_SIZE_OUT_HEIGHT 31:16
+#define NVCA7E_SET_PRESENT_CONTROL (0x00000308)
+#define NVCA7E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE 13:12
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002)
+#define NVCA7E_SET_ILUT_CONTROL (0x00000440)
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE 0:0
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR 1:1
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MODE 3:2
+#define NVCA7E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVCA7E_SET_ILUT_CONTROL_SIZE 18:8
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000650)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000654)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO(b) (0x00000658 + (b)*0x00000004)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO(b) (0x00000670 + (b)*0x00000004)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND 1:1
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_ENABLE (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT (0x00000688)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT (0x0000068C)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_ENABLE (0x00000001)
+
+#endif // _clca7e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h
new file mode 100644
index 000000000000..c9d74bd95e0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb100_dev_hshub_base_h__
+#define __gb100_dev_hshub_base_h__
+
+#define NV_PFB_HSHUB0 0x00870fff:0x00870000
+
+#define NV_PFB_HSHUB 0x00000FFF:0x00000000 /* RW--D */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00000E50 /* RW-4R */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00000E54 /* RW-4R */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO 0x000006C0 /* RW-4R */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI 0x000006C4 /* RW-4R */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+
+#endif // __gb100_dev_hshub_base_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h
new file mode 100644
index 000000000000..4d0bb8e14298
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb10b_dev_fb_h__
+#define __gb10b_dev_fb_h__
+
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO 0x008a1d58 /* RW-4R */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xffffff00 /* RW--V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI 0x008a1d5c /* RW-4R */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000fffff /* RW--V */
+
+#endif // __gb10b_dev_fb_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h
new file mode 100644
index 000000000000..b09f04b31738
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb202_dev_ce_h__
+#define __gb202_dev_ce_h__
+
+#define NV_CE_GRCE_MASK 0x001040d8 /* C--4R */
+#define NV_CE_GRCE_MASK_VALUE 9:0 /* C--VF */
+#define NV_CE_GRCE_MASK_VALUE_INIT 0x00f /* C---V */
+
+#endif // __gb202_dev_ce_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h
new file mode 100644
index 000000000000..ed359cb528fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb202_dev_therm_h__
+#define __gb202_dev_therm_h__
+
+#define NV_THERM_I2CS_SCRATCH 0x00ad00bc /* RW-4R */
+#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */
+#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000
+
+#endif // __gb202_dev_therm_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h
new file mode 100644
index 000000000000..52171b412aa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_falcon_v4_h__
+#define __gh100_dev_falcon_v4_h__
+
+#define NV_PFALCON_FALCON_MAILBOX0 0x00000040 /* RW-4R */
+#define NV_PFALCON_FALCON_MAILBOX0_DATA 31:0 /* RWIVF */
+#define NV_PFALCON_FALCON_MAILBOX0_DATA_INIT 0x00000000 /* RWI-V */
+#define NV_PFALCON_FALCON_MAILBOX1 0x00000044 /* RW-4R */
+#define NV_PFALCON_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */
+#define NV_PFALCON_FALCON_MAILBOX1_DATA_INIT 0x00000000 /* RWI-V */
+
+#define NV_PFALCON_FALCON_HWCFG2 0x000000f4 /* R--4R */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN 13:13 /* R--VF */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_LOCK 0x00000001 /* R---V */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_UNLOCK 0x00000000 /* R---V */
+
+#endif // __gh100_dev_falcon_v4_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h
new file mode 100644
index 000000000000..819f09465952
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_fb_h_
+#define __gh100_dev_fb_h_
+
+#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT 8 /* */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00100A34 /* RW-4R */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00100A38 /* RW-4R */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+
+#endif // __gh100_dev_fb_h_
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h
new file mode 100644
index 000000000000..e9507242cae5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_fsp_pri_h__
+#define __gh100_dev_fsp_pri_h__
+
+#define NV_PFSP 0x8F3FFF:0x8F0000 /* RW--D */
+
+#define NV_PFSP_MSGQ_HEAD(i) (0x008F2c80+(i)*8) /* RW-4A */
+#define NV_PFSP_MSGQ_HEAD__SIZE_1 8 /* */
+#define NV_PFSP_MSGQ_HEAD_VAL 31:0 /* RWIUF */
+#define NV_PFSP_MSGQ_HEAD_VAL_INIT 0x00000000 /* RWI-V */
+#define NV_PFSP_MSGQ_TAIL(i) (0x008F2c84+(i)*8) /* RW-4A */
+#define NV_PFSP_MSGQ_TAIL__SIZE_1 8 /* */
+#define NV_PFSP_MSGQ_TAIL_VAL 31:0 /* RWIUF */
+#define NV_PFSP_MSGQ_TAIL_VAL_INIT 0x00000000 /* RWI-V */
+
+#define NV_PFSP_QUEUE_HEAD(i) (0x008F2c00+(i)*8) /* RW-4A */
+#define NV_PFSP_QUEUE_HEAD__SIZE_1 8 /* */
+#define NV_PFSP_QUEUE_HEAD_ADDRESS 31:0 /* RWIVF */
+#define NV_PFSP_QUEUE_HEAD_ADDRESS_INIT 0x00000000 /* RWI-V */
+#define NV_PFSP_QUEUE_TAIL(i) (0x008F2c04+(i)*8) /* RW-4A */
+#define NV_PFSP_QUEUE_TAIL__SIZE_1 8 /* */
+#define NV_PFSP_QUEUE_TAIL_ADDRESS 31:0 /* RWIVF */
+#define NV_PFSP_QUEUE_TAIL_ADDRESS_INIT 0x00000000 /* RWI-V */
+
+#endif // __gh100_dev_fsp_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h
new file mode 100644
index 000000000000..6707e0e3b96b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_mmu_h__
+#define __gh100_dev_mmu_h__
+
+#define NV_MMU_PTE /* ----G */
+#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */
+#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
+#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_PTE_KIND (1*32+7):(1*32+4) /* RWXVF */
+#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */
+#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x6 /* R---V */
+#define NV_MMU_PTE_KIND_Z16 0x1 /* R---V */
+#define NV_MMU_PTE_KIND_S8 0x2 /* R---V */
+#define NV_MMU_PTE_KIND_S8Z24 0x3 /* R---V */
+#define NV_MMU_PTE_KIND_ZF32_X24S8 0x4 /* R---V */
+#define NV_MMU_PTE_KIND_Z24S8 0x5 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x8 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x9 /* R---V */
+#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0xA /* R---V */
+#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0xB /* R---V */
+#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0xC /* R---V */
+#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0xD /* R---V */
+#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0xE /* R---V */
+#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0xF /* R---V */
+
+#define NV_MMU_VER3_PDE /* ----G */
+#define NV_MMU_VER3_PDE_IS_PTE 0:0 /* RWXVF */
+#define NV_MMU_VER3_PDE_IS_PTE_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PDE_IS_PTE_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PDE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_PDE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PDE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE 2:1 /* RWXVF */
+#define NV_MMU_VER3_PDE_APERTURE_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF 5:3 /* RWXVF */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_ADDRESS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PDE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_PDE__SIZE 8
+
+#define NV_MMU_VER3_DUAL_PDE /* ----G */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE 0:0 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG 5:3 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG 51:8 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL 69:67 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SMALL 115:76 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */
+#define NV_MMU_VER3_DUAL_PDE__SIZE 16
+
+#define NV_MMU_VER3_PTE /* ----G */
+#define NV_MMU_VER3_PTE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_PTE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PTE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE 2:1 /* RWXVF */
+#define NV_MMU_VER3_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF 7:3 /* RWXVF */
+#define NV_MMU_VER3_PTE_PCF_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_SPARSE 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_MAPPING_NOWHERE 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_NO_VALID_4KB_PAGE 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACE 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACE 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACE 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACE 0x00000004 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE 0x00000005 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACE 0x00000006 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE 0x00000007 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACE 0x00000008 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACE 0x00000009 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE 0x0000000A /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE 0x0000000B /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACE 0x0000000C /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000D /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE 0x0000000E /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000F /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD 0x00000010 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD 0x00000011 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD 0x00000012 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD 0x00000013 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD 0x00000014 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD 0x00000015 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD 0x00000016 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD 0x00000017 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACD 0x00000018 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACD 0x00000019 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACD 0x0000001A /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACD 0x0000001B /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACD 0x0000001C /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001D /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACD 0x0000001E /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001F /* RW--V */
+#define NV_MMU_VER3_PTE_KIND 11:8 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_SYS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_PEER 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_VID 39:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_PEER_ID 63:(64-3) /* RWXVF */
+#define NV_MMU_VER3_PTE_PEER_ID_0 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_1 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_2 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_3 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_4 0x00000004 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_5 0x00000005 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_6 0x00000006 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_7 0x00000007 /* RW--V */
+#define NV_MMU_VER3_PTE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_PTE__SIZE 8
+
+#endif // __gh100_dev_mmu_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h
new file mode 100644
index 000000000000..8ff4663168d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_riscv_pri_h__
+#define __gh100_dev_riscv_pri_h__
+
+#define NV_PRISCV_RISCV_CPUCTL 0x00000388 /* RW-4R */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED 4:4 /* R-IVF */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_INIT 0x00000001 /* R-I-V */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_FALSE 0x00000000 /* R---V */
+
+#endif // __gh100_dev_riscv_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h
new file mode 100644
index 000000000000..49b4816cb00b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_therm_h__
+#define __gh100_dev_therm_h__
+
+#define NV_THERM_I2CS_SCRATCH 0x000200bc /* RW-4R */
+#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */
+#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */
+
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000
+
+#endif // __gh100_dev_therm_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h
new file mode 100644
index 000000000000..12b49e9894a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_xtl_ep_pri_h__
+#define __gh100_dev_xtl_ep_pri_h__
+
+#define NV_EP_PCFGM 0x92FFF:0x92000 /* RW--D */
+
+#endif // __gh100_dev_xtl_ep_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h
new file mode 100644
index 000000000000..1a891bd33fa3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_pri_nv_xal_ep_h__
+#define __gh100_pri_nv_xal_ep_h__
+
+#define NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT 0x000010
+#define NV_XAL_EP_BAR0_WINDOW_BASE 21:0
+#define NV_XAL_EP_BAR0_WINDOW 0x0010fd40
+
+#endif // __gh100_pri_nv_xal_ep_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvif/chan.h b/drivers/gpu/drm/nouveau/include/nvif/chan.h
new file mode 100644
index 000000000000..c329a29068d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/chan.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVIF_CHAN_H__
+#define __NVIF_CHAN_H__
+#include "push.h"
+
+struct nvif_chan {
+ const struct nvif_chan_func {
+ struct {
+ u32 (*read_get)(struct nvif_chan *);
+ } push;
+
+ struct {
+ u32 (*read_get)(struct nvif_chan *);
+ void (*push)(struct nvif_chan *, bool main, u64 addr, u32 size,
+ bool no_prefetch);
+ void (*kick)(struct nvif_chan *);
+ int (*post)(struct nvif_chan *, u32 gpptr, u32 pbptr);
+ u32 post_size;
+ } gpfifo;
+
+ struct {
+ int (*release)(struct nvif_chan *, u64 addr, u32 data);
+ } sem;
+ } *func;
+
+ struct {
+ struct nvif_map map;
+ } userd;
+
+ struct {
+ struct nvif_map map;
+ u32 cur;
+ u32 max;
+ int free;
+ } gpfifo;
+
+ struct {
+ struct nvif_map map;
+ u64 addr;
+ } sema;
+
+ struct nvif_push push;
+
+ struct nvif_user *usermode;
+ u32 doorbell_token;
+};
+
+int nvif_chan_dma_wait(struct nvif_chan *, u32 push_nr);
+
+void nvif_chan_gpfifo_ctor(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, struct nvif_chan *);
+int nvif_chan_gpfifo_wait(struct nvif_chan *, u32 gpfifo_nr, u32 push_nr);
+void nvif_chan_gpfifo_push(struct nvif_chan *, u64 addr, u32 size, bool no_prefetch);
+int nvif_chan_gpfifo_post(struct nvif_chan *);
+
+void nvif_chan506f_gpfifo_push(struct nvif_chan *, bool main, u64 addr, u32 size, bool no_prefetch);
+void nvif_chan506f_gpfifo_kick(struct nvif_chan *);
+
+int nvif_chan906f_ctor_(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_chan *);
+u32 nvif_chan906f_read_get(struct nvif_chan *);
+u32 nvif_chan906f_gpfifo_read_get(struct nvif_chan *);
+int nvif_chan906f_gpfifo_post(struct nvif_chan *, u32 gpptr, u32 pbptr);
+
+int nvif_chan506f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size);
+int nvif_chan906f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr);
+int nvif_chanc36f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_user *usermode, u32 doorbell_token);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index ea937fa7bc55..ea8267e0d8da 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -29,6 +29,8 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_TURING 0x0c
#define NV_DEVICE_INFO_V0_AMPERE 0x0d
#define NV_DEVICE_INFO_V0_ADA 0x0e
+#define NV_DEVICE_INFO_V0_HOPPER 0x0f
+#define NV_DEVICE_INFO_V0_BLACKWELL 0x10
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 824e052dcc25..ff6823cb2cd8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -57,12 +57,15 @@
#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
+#define BLACKWELL_INLINE_TO_MEMORY_A 0x0000cd40
#define NV04_DISP /* cl0046.h */ 0x00000046
#define VOLTA_USERMODE_A 0x0000c361
#define TURING_USERMODE_A 0x0000c461
#define AMPERE_USERMODE_A 0x0000c561
+#define HOPPER_USERMODE_A 0x0000c661
+#define BLACKWELL_USERMODE_A 0x0000c761
#define MAXWELL_FAULT_BUFFER_A /* clb069.h */ 0x0000b069
#define VOLTA_FAULT_BUFFER_A /* clb069.h */ 0x0000c369
@@ -85,6 +88,9 @@
#define TURING_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c46f
#define AMPERE_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c56f
#define AMPERE_CHANNEL_GPFIFO_B /* if0020.h */ 0x0000c76f
+#define HOPPER_CHANNEL_GPFIFO_A 0x0000c86f
+#define BLACKWELL_CHANNEL_GPFIFO_A 0x0000c96f
+#define BLACKWELL_CHANNEL_GPFIFO_B 0x0000ca6f
#define NV50_DISP /* if0010.h */ 0x00005070
#define G82_DISP /* if0010.h */ 0x00008270
@@ -102,8 +108,10 @@
#define TU102_DISP /* if0010.h */ 0x0000c570
#define GA102_DISP /* if0010.h */ 0x0000c670
#define AD102_DISP /* if0010.h */ 0x0000c770
+#define GB202_DISP 0x0000ca70
#define GV100_DISP_CAPS 0x0000c373
+#define GB202_DISP_CAPS 0x0000ca73
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -118,6 +126,7 @@
#define GV100_DISP_CURSOR /* if0014.h */ 0x0000c37a
#define TU102_DISP_CURSOR /* if0014.h */ 0x0000c57a
#define GA102_DISP_CURSOR /* if0014.h */ 0x0000c67a
+#define GB202_DISP_CURSOR 0x0000ca7a
#define NV50_DISP_OVERLAY /* if0014.h */ 0x0000507b
#define G82_DISP_OVERLAY /* if0014.h */ 0x0000827b
@@ -128,6 +137,7 @@
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c37b
#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c57b
#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c67b
+#define GB202_DISP_WINDOW_IMM_CHANNEL_DMA 0x0000ca7b
#define NV50_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000827c
@@ -153,6 +163,7 @@
#define TU102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c57d
#define GA102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c67d
#define AD102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c77d
+#define GB202_DISP_CORE_CHANNEL_DMA 0x0000ca7d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000827e
@@ -164,6 +175,7 @@
#define GV100_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c37e
#define TU102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c57e
#define GA102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c67e
+#define GB202_DISP_WINDOW_CHANNEL_DMA 0x0000ca7e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
@@ -189,16 +201,25 @@
#define TURING_A /* cl9097.h */ 0x0000c597
+#define AMPERE_A 0x0000c697
#define AMPERE_B /* cl9097.h */ 0x0000c797
#define ADA_A /* cl9097.h */ 0x0000c997
+#define HOPPER_A 0x0000cb97
+
+#define BLACKWELL_A 0x0000cd97
+#define BLACKWELL_B 0x0000ce97
+
#define NV74_BSP 0x000074b0
+#define NVB8B0_VIDEO_DECODER 0x0000b8b0
#define NVC4B0_VIDEO_DECODER 0x0000c4b0
#define NVC6B0_VIDEO_DECODER 0x0000c6b0
#define NVC7B0_VIDEO_DECODER 0x0000c7b0
#define NVC9B0_VIDEO_DECODER 0x0000c9b0
+#define NVCDB0_VIDEO_DECODER 0x0000cdb0
+#define NVCFB0_VIDEO_DECODER 0x0000cfb0
#define GT212_MSVLD 0x000085b1
#define IGT21A_MSVLD 0x000086b1
@@ -227,10 +248,14 @@
#define TURING_DMA_COPY_A 0x0000c5b5
#define AMPERE_DMA_COPY_A 0x0000c6b5
#define AMPERE_DMA_COPY_B 0x0000c7b5
+#define HOPPER_DMA_COPY_A 0x0000c8b5
+#define BLACKWELL_DMA_COPY_A 0x0000c9b5
+#define BLACKWELL_DMA_COPY_B 0x0000cab5
#define NVC4B7_VIDEO_ENCODER 0x0000c4b7
#define NVC7B7_VIDEO_ENCODER 0x0000c7b7
#define NVC9B7_VIDEO_ENCODER 0x0000c9b7
+#define NVCFB7_VIDEO_ENCODER 0x0000cfb7
#define FERMI_DECOMPRESS 0x000090b8
@@ -246,15 +271,25 @@
#define PASCAL_COMPUTE_B 0x0000c1c0
#define VOLTA_COMPUTE_A 0x0000c3c0
#define TURING_COMPUTE_A 0x0000c5c0
+#define AMPERE_COMPUTE_A 0x0000c6c0
#define AMPERE_COMPUTE_B 0x0000c7c0
#define ADA_COMPUTE_A 0x0000c9c0
+#define HOPPER_COMPUTE_A 0x0000cbc0
+#define BLACKWELL_COMPUTE_A 0x0000cdc0
+#define BLACKWELL_COMPUTE_B 0x0000cec0
#define NV74_CIPHER 0x000074c1
+#define NVB8D1_VIDEO_NVJPG 0x0000b8d1
#define NVC4D1_VIDEO_NVJPG 0x0000c4d1
#define NVC9D1_VIDEO_NVJPG 0x0000c9d1
+#define NVCDD1_VIDEO_NVJPG 0x0000cdd1
+#define NVCFD1_VIDEO_NVJPG 0x0000cfd1
+#define NVB8FA_VIDEO_OFA 0x0000b8fa
#define NVC6FA_VIDEO_OFA 0x0000c6fa
#define NVC7FA_VIDEO_OFA 0x0000c7fa
#define NVC9FA_VIDEO_OFA 0x0000c9fa
+#define NVCDFA_VIDEO_OFA 0x0000cdfa
+#define NVCFFA_VIDEO_OFA 0x0000cffa
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 8d205b6af46a..1b32dc701f61 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -16,7 +16,7 @@ struct nvif_object {
u32 handle;
s32 oclass;
void *priv; /*XXX: hack */
- struct {
+ struct nvif_map {
void __iomem *ptr;
u64 size;
} map;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
index 6d3a8a3d2087..a493fababe3c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push.h
@@ -31,6 +31,12 @@ struct nvif_push {
void (*kick)(struct nvif_push *push);
struct nvif_mem mem;
+ u64 addr;
+
+ struct {
+ u32 get;
+ u32 max;
+ } hw;
u32 *bgn;
u32 *cur;
@@ -41,7 +47,7 @@ struct nvif_push {
static inline __must_check int
PUSH_WAIT(struct nvif_push *push, u32 size)
{
- if (push->cur + size >= push->end) {
+ if (push->cur + size > push->end) {
int ret = push->wait(push, size);
if (ret)
return ret;
@@ -55,7 +61,11 @@ PUSH_WAIT(struct nvif_push *push, u32 size)
static inline int
PUSH_KICK(struct nvif_push *push)
{
- push->kick(push);
+ if (push->cur != push->bgn) {
+ push->kick(push);
+ push->bgn = push->cur;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push906f.h b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
index cc2866bc8b0a..79df71de98d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
@@ -7,6 +7,7 @@
#ifndef PUSH906F_SUBC
// Host methods
#define PUSH906F_SUBC_NV906F 0
+#define PUSH906F_SUBC_NVC36F 0
// Twod
#define PUSH906F_SUBC_NV902D 3
diff --git a/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h
new file mode 100644
index 000000000000..c8d6b6319134
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h
@@ -0,0 +1,18 @@
+#ifndef __NVIF_PUSHC97B_H__
+#define __NVIF_PUSHC97B_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/clc97b.h>
+
+#define PUSH_HDR(p,m,c) do { \
+ PUSH_ASSERT(!((m) & ~DRF_SMASK(NVC97B_DMA_METHOD_OFFSET)), "mthd"); \
+ PUSH_ASSERT(!((c) & ~DRF_MASK(NVC97B_DMA_METHOD_COUNT)), "size"); \
+ PUSH_DATA__((p), NVDEF(NVC97B, DMA, OPCODE, METHOD) | \
+ NVVAL(NVC97B, DMA, METHOD_COUNT, (c)) | \
+ NVVAL(NVC97B, DMA, METHOD_OFFSET, (m) >> 2), \
+ " mthd 0x%04x size %d - %s", (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,s,m,c) PUSH_HDR(p,m,c)
+#define PUSH_MTHD_INC 4:4
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 46afb877a296..99579e7b9376 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -46,7 +46,10 @@ struct nvkm_device {
GV100 = 0x140,
TU100 = 0x160,
GA100 = 0x170,
+ GH100 = 0x180,
AD100 = 0x190,
+ GB10x = 0x1a0,
+ GB20x = 0x1b0,
} card_type;
u32 chipset;
u8 chiprev;
@@ -77,6 +80,13 @@ struct nvkm_device {
struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst);
+enum nvkm_bar_id {
+ NVKM_BAR_INVALID = 0,
+ NVKM_BAR0_PRI,
+ NVKM_BAR1_FB,
+ NVKM_BAR2_INST,
+};
+
struct nvkm_device_func {
struct nvkm_device_pci *(*pci)(struct nvkm_device *);
struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
@@ -85,8 +95,8 @@ struct nvkm_device_func {
int (*init)(struct nvkm_device *);
void (*fini)(struct nvkm_device *, bool suspend);
int (*irq)(struct nvkm_device *);
- resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
- resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+ resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id);
+ resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id);
bool cpu_coherent;
};
@@ -124,6 +134,9 @@ struct nvkm_device *nvkm_device_find(u64 name);
_temp; \
})
+#define NVKM_RD32_(p,o,dr) nvkm_rd32((p), (o) + (dr))
+#define NVKM_RD32(p,A...) DRF_RV(NVKM_RD32_, (p), 0, ##A)
+
void nvkm_device_del(struct nvkm_device **);
struct nvkm_device_oclass {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
index 9d2a1abf64f9..d92ffd17b729 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: MIT */
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FSP , struct nvkm_fsp , fsp)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP , struct nvkm_gsp , gsp)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VFN , struct nvkm_vfn , vfn)
@@ -29,7 +30,7 @@ NVKM_LAYOUT_INST(NVKM_SUBDEV_IOCTRL , struct nvkm_subdev , ioctrl, 3)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FLA , struct nvkm_subdev , fla)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_BSP , struct nvkm_engine , bsp)
-NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 10)
+NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 20)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_CIPHER , struct nvkm_engine , cipher)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_DISP , struct nvkm_disp , disp)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_DMAOBJ , struct nvkm_dma , dma)
@@ -43,9 +44,9 @@ NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC , struct nvkm_engine , mspdec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP , struct nvkm_engine , msppp)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8)
-NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 4)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
+NVKM_LAYOUT_INST(NVKM_ENGINE_OFA , struct nvkm_engine , ofa, 2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 3e8db8280e2a..7903d7470d19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -87,5 +87,4 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
-int ad102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index be508f65b280..96c16cfccf16 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -78,9 +78,6 @@ struct nvkm_fifo {
struct {
struct nvkm_memory *mem;
struct nvkm_vma *bar1;
-
- struct mutex mutex;
- struct list_head list;
} userd;
struct {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 8145796ffc61..a2333cfe6955 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -55,5 +55,4 @@ int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int ga102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
-int ad102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 8d2e170883e1..ca83caa55157 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -13,7 +13,5 @@ struct nvkm_nvdec {
int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
int tu102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
-int ga100_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
int ga102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
-int ad102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 018c58fc32ba..1f6eef13f872 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -13,6 +13,4 @@ struct nvkm_nvenc {
int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
int tu102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
-int ga102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
-int ad102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
deleted file mode 100644
index 80b7933a789e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_NVJPG_H__
-#define __NVKM_NVJPG_H__
-#include <core/engine.h>
-
-int ga100_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ad102_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
deleted file mode 100644
index e72e2115333b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_OFA_H__
-#define __NVKM_OFA_H__
-#include <core/engine.h>
-
-int ga100_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ga102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ad102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 5b798a1a313d..e0d777a933e1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -102,6 +102,9 @@ int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gh100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gb100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gb202_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h
new file mode 100644
index 000000000000..8a3dbb1cbb46
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_FSP_H__
+#define __NVKM_FSP_H__
+#include <core/subdev.h>
+#include <core/falcon.h>
+
+struct nvkm_fsp {
+ const struct nvkm_fsp_func *func;
+ struct nvkm_subdev subdev;
+
+ struct nvkm_falcon falcon;
+};
+
+bool nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *, u32 hash_size, u32 pkey_size, u32 sig_size);
+int nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+
+int gh100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+int gb100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+int gb202_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 746e126c3ecf..226c7ec56b8e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -17,6 +17,9 @@ struct nvkm_gsp_mem {
dma_addr_t addr;
};
+int nvkm_gsp_mem_ctor(struct nvkm_gsp *, size_t size, struct nvkm_gsp_mem *);
+void nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *);
+
struct nvkm_gsp_radix3 {
struct nvkm_gsp_mem lvl0;
struct nvkm_gsp_mem lvl1;
@@ -31,6 +34,29 @@ typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
struct nvkm_gsp_event;
typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
+/**
+ * DOC: GSP message handling policy
+ *
+ * When sending a GSP RPC command, there can be multiple cases of handling
+ * the GSP RPC messages, which are the reply of GSP RPC commands, according
+ * to the requirement of the callers and the nature of the GSP RPC commands.
+ *
+ * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the
+ * caller after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP
+ * RPC message after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_POLL - If specified, wait for the specific reply and
+ * discard the reply before returning to the caller.
+ *
+ */
+enum nvkm_gsp_rpc_reply_policy {
+ NVKM_GSP_RPC_REPLY_NOWAIT = 0,
+ NVKM_GSP_RPC_REPLY_RECV,
+ NVKM_GSP_RPC_REPLY_POLL,
+};
+
struct nvkm_gsp {
const struct nvkm_gsp_func *func;
struct nvkm_subdev subdev;
@@ -42,6 +68,9 @@ struct nvkm_gsp {
const struct firmware *load;
const struct firmware *unload;
} booter;
+
+ const struct firmware *fmc;
+
const struct firmware *bl;
const struct firmware *rm;
} fws;
@@ -89,6 +118,15 @@ struct nvkm_gsp {
struct {
struct nvkm_gsp_mem fw;
+ u8 *hash;
+ u8 *pkey;
+ u8 *sig;
+
+ struct nvkm_gsp_mem args;
+ } fmc;
+
+ struct {
+ struct nvkm_gsp_mem fw;
u32 code_offset;
u32 data_offset;
u32 manifest_offset;
@@ -107,6 +145,7 @@ struct nvkm_gsp {
struct sg_table sgt;
struct nvkm_gsp_radix3 radix3;
struct nvkm_gsp_mem meta;
+ struct sg_table fbsr;
} sr;
struct {
@@ -186,31 +225,7 @@ struct nvkm_gsp {
u8 tpcs;
} gr;
- const struct nvkm_gsp_rm {
- void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
- void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
- void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
-
- void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
- int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc);
- void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
-
- void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
- void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv);
- void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv);
-
- int (*rm_free)(struct nvkm_gsp_object *);
-
- int (*client_ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *);
- void (*client_dtor)(struct nvkm_gsp_client *);
-
- int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
- void (*device_dtor)(struct nvkm_gsp_device *);
-
- int (*event_ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
- nvkm_gsp_event_func, struct nvkm_gsp_event *);
- void (*event_dtor)(struct nvkm_gsp_event *);
- } *rm;
+ struct nvkm_rm *rm;
struct {
struct mutex mutex;
@@ -248,16 +263,19 @@ nvkm_gsp_rm(struct nvkm_gsp *gsp)
return gsp && (gsp->fws.rm || gsp->fw.img);
}
+#include <rm/rm.h>
+
static inline void *
nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
{
- return gsp->rm->rpc_get(gsp, fn, argc);
+ return gsp->rm->api->rpc->get(gsp, fn, argc);
}
static inline void *
-nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc)
{
- return gsp->rm->rpc_push(gsp, argv, wait, repc);
+ return gsp->rm->api->rpc->push(gsp, argv, policy, repc);
}
static inline void *
@@ -268,13 +286,14 @@ nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
if (IS_ERR_OR_NULL(argv))
return argv;
- return nvkm_gsp_rpc_push(gsp, argv, true, argc);
+ return nvkm_gsp_rpc_push(gsp, argv, NVKM_GSP_RPC_REPLY_RECV, argc);
}
static inline int
-nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy)
{
- void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
+ void *repv = nvkm_gsp_rpc_push(gsp, argv, policy, 0);
if (IS_ERR(repv))
return PTR_ERR(repv);
@@ -285,19 +304,19 @@ nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
static inline void
nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
{
- gsp->rm->rpc_done(gsp, repv);
+ gsp->rm->api->rpc->done(gsp, repv);
}
static inline void *
nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
{
- return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
+ return object->client->gsp->rm->api->ctrl->get(object, cmd, argc);
}
static inline int
nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
{
- return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
+ return object->client->gsp->rm->api->ctrl->push(object, argv, repc);
}
static inline void *
@@ -328,7 +347,7 @@ nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
static inline void
nvkm_gsp_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
{
- object->client->gsp->rm->rm_ctrl_done(object, repv);
+ object->client->gsp->rm->api->ctrl->done(object, repv);
}
static inline void *
@@ -343,7 +362,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
object->parent = parent;
object->handle = handle;
- argv = gsp->rm->rm_alloc_get(object, oclass, argc);
+ argv = gsp->rm->api->alloc->get(object, oclass, argc);
if (IS_ERR_OR_NULL(argv)) {
object->client = NULL;
return argv;
@@ -355,7 +374,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
static inline void *
nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = object->client->gsp->rm->rm_alloc_push(object, argv);
+ void *repv = object->client->gsp->rm->api->alloc->push(object, argv);
if (IS_ERR(repv))
object->client = NULL;
@@ -377,7 +396,7 @@ nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
static inline void
nvkm_gsp_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
{
- object->client->gsp->rm->rm_alloc_done(object, repv);
+ object->client->gsp->rm->api->alloc->done(object, repv);
}
static inline int
@@ -395,39 +414,29 @@ nvkm_gsp_rm_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 ar
static inline int
nvkm_gsp_rm_free(struct nvkm_gsp_object *object)
{
- if (object->client)
- return object->client->gsp->rm->rm_free(object);
+ if (object->client) {
+ int ret = object->client->gsp->rm->api->alloc->free(object);
+ object->client = NULL;
+ return ret;
+ }
return 0;
}
-static inline int
-nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
-{
- if (WARN_ON(!gsp->rm))
- return -ENOSYS;
-
- return gsp->rm->client_ctor(gsp, client);
-}
-
-static inline void
-nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
-{
- if (client->gsp)
- client->gsp->rm->client_dtor(client);
-}
+int nvkm_gsp_client_ctor(struct nvkm_gsp *, struct nvkm_gsp_client *);
+void nvkm_gsp_client_dtor(struct nvkm_gsp_client *);
static inline int
nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
{
- return client->gsp->rm->device_ctor(client, device);
+ return client->gsp->rm->api->device->ctor(client, device);
}
static inline void
nvkm_gsp_device_dtor(struct nvkm_gsp_device *device)
{
if (device->object.client)
- device->object.client->gsp->rm->device_dtor(device);
+ device->object.client->gsp->rm->api->device->dtor(device);
}
static inline int
@@ -459,7 +468,9 @@ static inline int
nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
{
- return device->object.client->gsp->rm->event_ctor(device, handle, id, func, event);
+ struct nvkm_rm *rm = device->object.client->gsp->rm;
+
+ return rm->api->device->event.ctor(device, handle, id, func, event);
}
static inline void
@@ -468,7 +479,7 @@ nvkm_gsp_event_dtor(struct nvkm_gsp_event *event)
struct nvkm_gsp_device *device = event->device;
if (device)
- device->object.client->gsp->rm->event_dtor(event);
+ device->object.client->gsp->rm->api->device->event.dtor(event);
}
int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
@@ -479,5 +490,8 @@ int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_
int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gh100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gb100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gb202_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index e10cbd9203ec..db835cf7b8ac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -24,11 +24,6 @@ struct nvkm_instmem {
struct nvkm_ramht *ramht;
struct nvkm_memory *ramro;
struct nvkm_memory *ramfc;
-
- struct {
- struct sg_table fbsr;
- bool fbsr_valid;
- } rm;
};
u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
@@ -41,4 +36,5 @@ int nv04_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nv
int nv40_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
int nv50_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
int gk20a_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int gh100_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 935b1cacd528..abcb0dbcde70 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -8,7 +8,7 @@ struct nvkm_vma {
struct list_head head;
struct rb_node tree;
u64 addr;
- u64 size:50;
+ u64 size;
bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
#define NVKM_VMA_PAGE_NONE 7
@@ -73,6 +73,7 @@ struct nvkm_vmm {
struct nvkm_gsp_object object;
struct nvkm_vma *rsvd;
+ bool external;
} rm;
};
@@ -165,4 +166,5 @@ int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gh100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index 3c103101d5fc..112b674ed9c8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -50,6 +50,7 @@ int gf100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gf106_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
int gk104_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
int gp100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gh100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
/* pcie functions */
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
deleted file mode 100644
index 7a3fc023072d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0000_h__
-#define __src_common_sdk_nvidia_inc_class_cl0000_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV0000_ALLOC_PARAMETERS {
- NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
- NvU32 processID;
- char processName[NV_PROC_NAME_MAX_LENGTH];
-} NV0000_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
deleted file mode 100644
index e4de36d63666..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0005_h__
-#define __src_common_sdk_nvidia_inc_class_cl0005_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0005_ALLOC_PARAMETERS {
- NvHandle hParentClient;
- NvHandle hSrcResource;
-
- NvV32 hClass;
- NvV32 notifyIndex;
- NV_DECLARE_ALIGNED(NvP64 data, 8);
-} NV0005_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
deleted file mode 100644
index 8868118e47d6..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0080_h__
-#define __src_common_sdk_nvidia_inc_class_cl0080_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV0080_ALLOC_PARAMETERS {
- NvU32 deviceId;
- NvHandle hClientShare;
- NvHandle hTargetClient;
- NvHandle hTargetDevice;
- NvV32 flags;
- NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
- NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
- NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
- NvV32 vaMode;
-} NV0080_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
deleted file mode 100644
index 9040ea5608a0..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl2080_h__
-#define __src_common_sdk_nvidia_inc_class_cl2080_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV2080_ALLOC_PARAMETERS {
- NvU32 subDeviceId;
-} NV2080_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
deleted file mode 100644
index ba659d6477d3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
-#define __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_NOTIFIERS_HOTPLUG (1)
-
-#define NV2080_NOTIFIERS_DP_IRQ (7)
-
-#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
-#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
-
-#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
-
-#define NV2080_ENGINE_TYPE_BSP (0x00000013)
-#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
-
-#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
-#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
-
-#define NV2080_ENGINE_TYPE_SW (0x00000022)
-
-#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
-
-#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
-#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
-
-#define NV2080_ENGINE_TYPE_OFA (0x00000033)
-
-typedef struct {
- NvU32 plugDisplayMask;
- NvU32 unplugDisplayMask;
-} Nv2080HotplugNotification;
-
-typedef struct Nv2080DpIrqNotificationRec {
- NvU32 displayId;
-} Nv2080DpIrqNotification;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
deleted file mode 100644
index 9eb780a1ac72..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl84a0_h__
-#define __src_common_sdk_nvidia_inc_class_cl84a0_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
-
-#define NV01_MEMORY_LIST_FBMEM (0x00000082)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
deleted file mode 100644
index f1d21776e395..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl90f1_h__
-#define __src_common_sdk_nvidia_inc_class_cl90f1_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define FERMI_VASPACE_A (0x000090f1)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
deleted file mode 100644
index b8f32576cfaa..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
-#define __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NVC0B5_ALLOCATION_PARAMETERS {
- NvU32 version;
- NvU32 engineType;
-} NVC0B5_ALLOCATION_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
deleted file mode 100644
index 58b3ba7badf1..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
- NvBool bDscSupported;
- NvU32 encoderColorFormatMask;
- NvU32 lineBufferSizeKB;
- NvU32 rateBufferSizeKB;
- NvU32 bitsPerPixelPrecision;
- NvU32 maxNumHztSlices;
- NvU32 lineBufferBitDepth;
-} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
deleted file mode 100644
index 596f2ea8344e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 flags;
- NvU32 flags2;
-} NV0073_CTRL_DFP_GET_INFO_PARAMS;
-
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
-#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
-#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
-#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
-#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
-
-#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
-
-typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 numELDSize;
- NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
- NvU32 maxFreqSupported;
- NvU32 ctrl;
- NvU32 deviceEntry;
-} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
-
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
-
-#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvBool enable;
-} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
-
-typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
-
-typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
- NvU32 displayMask;
- NvU32 sorType;
-} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
-
-#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
-
-typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU8 sorExcludeMask;
- NvU32 slaveDisplayId;
- NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
- NvBool bIs2Head1Or;
- NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
- NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
- NvU8 reservedSorMask;
- NvU32 flags;
-} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
-
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
deleted file mode 100644
index bae4b1997736..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
+++ /dev/null
@@ -1,335 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
-
-typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvBool bAddrOnly;
- NvU32 cmd;
- NvU32 addr;
- NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
- NvU32 size;
- NvU32 replyType;
- NvU32 retryTimeMs;
-} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
-
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
-
-#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 cmd;
- NvU32 data;
- NvU32 err;
- NvU32 retryTimeMs;
- NvU32 eightLaneDpcdBaseAddr;
-} NV0073_CTRL_DP_CTRL_PARAMS;
-
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_UNUSED 3:3
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
-
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
-
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
-#define NV0073_CTRL_DP_DATA_TARGET 22:19
-#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
-
-#define NV0073_CTRL_MAX_LANES 8U
-
-typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 numLanes;
- NvU32 data[NV0073_CTRL_MAX_LANES];
-} NV0073_CTRL_DP_LANE_DATA_PARAMS;
-
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
-
-#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 mute;
-} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 preferredDisplayId;
-
- NvBool force;
- NvBool useBFM;
-
- NvU32 displayIdAssigned;
- NvU32 allDisplayMask;
-} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
-} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 head;
- NvU32 sorIndex;
- NvU32 dpLink;
-
- NvBool bEnableOverride;
- NvBool bMST;
- NvU32 singleHeadMultistreamMode;
- NvU32 hBlankSym;
- NvU32 vBlankSym;
- NvU32 colorFormat;
- NvBool bEnableTwoHeadOneOr;
-
- struct {
- NvU32 slotStart;
- NvU32 slotEnd;
- NvU32 PBN;
- NvU32 Timeslice;
- NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
- NvU32 singleHeadMSTPipeline;
- NvBool bEnableAudioOverRightPanel;
- } MST;
-
- struct {
- NvBool bEnhancedFraming;
- NvU32 tuSize;
- NvU32 waterMark;
- NvU32 actualPclkHz; // deprecated -Use MvidWarParams
- NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
- NvBool bEnableAudioOverRightPanel;
- struct {
- NvU32 activeCnt;
- NvU32 activeFrac;
- NvU32 activePolarity;
- NvBool mvidWarEnabled;
- struct {
- NvU32 actualPclkHz;
- NvU32 linkClkFreqHz;
- } MvidWarParams;
- } Legacy;
- } SST;
-} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
- NvU32 subDeviceInstance;
-} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
-
-typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 sorIndex;
- NvU32 maxLinkRate;
- NvU32 dpVersionsSupported;
- NvU32 UHBRSupported;
- NvBool bIsMultistreamSupported;
- NvBool bIsSCEnabled;
- NvBool bHasIncreasedWatermarkLimits;
- NvBool bIsPC2Disabled;
- NvBool isSingleHeadMSTSupported;
- NvBool bFECSupported;
- NvBool bIsTrainPhyRepeater;
- NvBool bOverrideLinkBw;
- NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
-} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
-
-#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
-
-typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
- // In
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
-
- // Out
- NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
- NvU8 linkBwCount;
-} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
-
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
-
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
deleted file mode 100644
index 954958dcf834..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
+++ /dev/null
@@ -1,216 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 bufferSize;
- NvU32 flags;
- NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
-} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_MAX_CONNECTORS 4U
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 flags;
- NvU32 DDCPartners;
- NvU32 count;
- struct {
- NvU32 index;
- NvU32 type;
- NvU32 location;
- } data[NV0073_CTRL_MAX_CONNECTORS];
- NvU32 platform;
-} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
- NvU8 subDeviceInstance;
- NvU32 displayId;
- NvU8 enable;
-} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
- NvU8 subDeviceInstance;
- NvU32 displayId;
- NvU8 mute;
-} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 headMask;
-} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 transmitControl;
- NvU32 packetSize;
- NvU32 targetHead;
- NvBool bUsePsrHeadforSdp;
- NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
-} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
-
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
-
-#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 index;
- NvU32 type;
- NvU32 protocol;
- NvU32 ditherType;
- NvU32 ditherAlgo;
- NvU32 location;
- NvU32 rootPortId;
- NvU32 dcbIndex;
- NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
- NvBool bIsLitByVbios;
- NvBool bIsDispDynamic;
-} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
-
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
-
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 brightness;
- NvBool bUncalibrated;
-} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 caps;
-} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
deleted file mode 100644
index d69cef3c01fd..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 flags;
- NvU32 numHeads;
-} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayMask;
- NvU32 displayMaskDDC;
-} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 flags;
- NvU32 displayMask;
- NvU32 retryTimeMs;
-} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 head;
- NvU32 flags;
- NvU32 displayId;
-} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
-
-#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
deleted file mode 100644
index 3db099e62364..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
- NvU32 totalVFs;
- NvU32 firstVfOffset;
- NvU32 vfFeatureMask;
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
- NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
- NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
- NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
- NvBool b64bitBar0;
- NvBool b64bitBar1;
- NvBool b64bitBar2;
- NvBool bSriovEnabled;
- NvBool bSriovHeavyEnabled;
- NvBool bEmulateVFBar0TlbInvalidationRegister;
- NvBool bClientRmAllocatedCtxBuffer;
-} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
deleted file mode 100644
index ed01df925573..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
deleted file mode 100644
index b5b7631de99b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
- NvU32 BoardID;
- char chipSKU[4];
- char chipSKUMod[2];
- char project[5];
- char projectSKU[5];
- char CDP[6];
- char projectSKUMod[2];
- NvU32 businessCycle;
-} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
deleted file mode 100644
index fe912d2bd183..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
- NvU32 size;
-} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
-
-#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
deleted file mode 100644
index 87bc4ff92ce1..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
- NvU32 event;
- NvU32 action;
- NvBool bNotifyState;
- NvU32 info32;
- NvU16 info16;
-} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
-
-#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
deleted file mode 100644
index 68c81f9f803c..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
-
-typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
-
-typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
- NV_DECLARE_ALIGNED(NvU64 base, 8);
- NV_DECLARE_ALIGNED(NvU64 limit, 8);
- NV_DECLARE_ALIGNED(NvU64 reserved, 8);
- NvU32 performance;
- NvBool supportCompressed;
- NvBool supportISO;
- NvBool bProtected;
- NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
-} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
-
-#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
-
-typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
- NvU32 numFBRegions;
- NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
-} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
deleted file mode 100644
index bc0f63699b06..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
-
-typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
- NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
- NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
- NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
- NvU32 numPbdmas;
- char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
-} NV2080_CTRL_FIFO_DEVICE_ENTRY;
-
-typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
- NvU32 baseIndex;
- NvU32 numEntries;
- NvBool bMore;
- // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
- NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
-} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
deleted file mode 100644
index 29d7a1052142..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
-
-#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
-
-#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
-
-typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
- NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
- NvU32 physAttr;
- NvU16 bufferId;
- NvU8 bInitialize;
- NvU8 bNonmapped;
-} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
-
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
-
-#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
-
-#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
- NvU32 engineType;
- NvHandle hClient;
- NvU32 ChID;
- NvHandle hChanClient;
- NvHandle hObject;
- NvHandle hVirtMemory;
- NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
- NvU32 entryCount;
- // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
- NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
-} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
- NvU32 gpcMask;
-} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
- NvU32 gpcId;
- NvU32 tpcMask;
-} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
- NvU32 gpcId;
- NvU32 zcullMask;
-} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
-
-#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
-
-typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
- NvU32 index;
- NvU32 flags;
- NvU32 length;
- NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
-} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
deleted file mode 100644
index 59f8895bc5d7..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
-} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
deleted file mode 100644
index e11b2dbe5288..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
- NvU32 feHwSysCap;
- NvU32 windowPresentMask;
- NvBool bFbRemapperEnabled;
- NvU32 numHeads;
- NvBool bPrimaryVga;
- NvU32 i2cPort;
- NvU32 internalDispActiveMask;
-} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
-
-#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
-
-#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
-
-typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
- NvU32 size;
- NvU32 alignment;
-} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
-
-typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
- NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
-} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
-
-typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
- NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
-} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
- NvU32 engDesc;
- NvU32 ctxAttr;
- NvU32 ctxBufferSize;
- NvU32 addrSpaceList;
- NvU32 registerBase;
-} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
-#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
-
-#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
- NvU32 numConstructedFalcons;
- NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
-} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
- NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
- NvU32 instMemAddrSpace;
- NvU32 instMemCpuCacheAttr;
-} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
- NvU32 addressSpace;
- NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 limit, 8);
- NvU32 cacheSnoop;
- NvU32 hclass;
- NvU32 channelInstance;
- NvBool valid;
-} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
-
-typedef enum NV2080_INTR_CATEGORY {
- NV2080_INTR_CATEGORY_DEFAULT = 0,
- NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
- NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
- NV2080_INTR_CATEGORY_RUNLIST = 3,
- NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
- NV2080_INTR_CATEGORY_UVM_OWNED = 5,
- NV2080_INTR_CATEGORY_UVM_SHARED = 6,
- NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
-} NV2080_INTR_CATEGORY;
-
-typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
- NvU8 subtreeStart;
- NvU8 subtreeEnd;
-} NV2080_INTR_CATEGORY_SUBTREE_MAP;
-
-typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
- NvU16 engineIdx;
- NvU32 pmcIntrMask;
- NvU32 vectorStall;
- NvU32 vectorNonStall;
-} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
-
-typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
- NvU32 tableLen;
- NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
- NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
-} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
- NvU32 fbsrType;
- NvU32 numRegions;
- NvHandle hClient;
- NvHandle hSysMem;
- NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
- NvBool bEnteringGcoffState;
-} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
- NvU32 fbsrType;
- NvHandle hClient;
- NvHandle hVidMem;
- NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
- NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
-} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
-
-typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
- NvU32 status;
- NvU16 backLightDataSize;
- NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
-} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
deleted file mode 100644
index 977e59818533..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GMMU_FMT_MAX_LEVELS 6U
-
-#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
-
-typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
- /*!
- * [in] GPU sub-device handle - this API only supports unicast.
- * Pass 0 to use subDeviceId instead.
- */
- NvHandle hSubDevice;
-
- /*!
- * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
- */
- NvU32 subDeviceId;
-
- /*!
- * [in] Page size (VA coverage) of the level to reserve.
- * This need not be a leaf (page table) page size - it can be
- * the coverage of an arbitrary level (including root page directory).
- */
- NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
-
- /*!
- * [in] First GPU virtual address of the range to reserve.
- * This must be aligned to pageSize.
- */
- NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
-
- /*!
- * [in] Last GPU virtual address of the range to reserve.
- * This (+1) must be aligned to pageSize.
- */
- NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
-
- /*!
- * [in] Number of PDE levels to copy.
- */
- NvU32 numLevelsToCopy;
-
- /*!
- * [in] Per-level information.
- */
- struct {
- /*!
- * Physical address of this page level instance.
- */
- NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
-
- /*!
- * Size in bytes allocated for this level instance.
- */
- NV_DECLARE_ALIGNED(NvU64 size, 8);
-
- /*!
- * Aperture in which this page level instance resides.
- */
- NvU32 aperture;
-
- /*!
- * Page shift corresponding to the level
- */
- NvU8 pageShift;
- } levels[GMMU_FMT_MAX_LEVELS];
-} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
deleted file mode 100644
index 684045796232..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
-
-typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
- NvBool bEnable;
- NvBool bSkipSubmit;
-} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
-
-#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
-
-typedef struct NVA06F_CTRL_BIND_PARAMS {
- NvU32 engineType;
-} NVA06F_CTRL_BIND_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
deleted file mode 100644
index 5c5a004a8031..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_nvlimits_h__
-#define __src_common_sdk_nvidia_inc_nvlimits_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV_MAX_SUBDEVICES 8
-
-#define NV_PROC_NAME_MAX_LENGTH 100U
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
deleted file mode 100644
index 51b5591c603e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_nvos_h__
-#define __src_common_sdk_nvidia_inc_nvos_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVOS02_FLAGS_PHYSICALITY 7:4
-#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
-#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
-#define NVOS02_FLAGS_LOCATION 11:8
-#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
-#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
-#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
-#define NVOS02_FLAGS_COHERENCY 15:12
-#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
-#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
-#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
-#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
-#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
-#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
-#define NVOS02_FLAGS_ALLOC 17:16
-#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
-#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
-#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
-#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
-
-#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
-#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
-#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
-
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
-#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
-
-#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
-#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
-#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
-
-#define NVOS02_FLAGS_MAPPING 31:30
-#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
-#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
-
-#define NV01_EVENT_CLIENT_RM (0x04000000)
-
-typedef struct
-{
- NvV32 channelInstance; // One of the n channel instances of a given channel type.
- // Note that core channel has only one instance
- // while all others have two (one per head).
- NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
- NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
- NvU32 offset; // Initial offset for put/get, usually zero.
- NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
-
- NvU32 flags;
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
-
-} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvV32 channelInstance; // One of the n channel instances of a given channel type.
- // All PIO channels have two instances (one per head).
- NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
- NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
-} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances;
- NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
-} NV_BSP_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
- NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
-} NV_MSENC_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
- NvU32 engineInstance;
-} NV_NVJPG_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
-} NV_OFA_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 index;
- NvV32 flags;
- NvU64 vaSize NV_ALIGN_BYTES(8);
- NvU64 vaStartInternal NV_ALIGN_BYTES(8);
- NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
- NvU32 bigPageSize;
- NvU64 vaBase NV_ALIGN_BYTES(8);
-} NV_VASPACE_ALLOCATION_PARAMETERS;
-
-#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
deleted file mode 100644
index 0e32e71e123f..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
+++ /dev/null
@@ -1,97 +0,0 @@
-#ifndef __src_common_shared_msgq_inc_msgq_msgq_priv_h__
-#define __src_common_shared_msgq_inc_msgq_msgq_priv_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * msgqTxHeader -- TX queue data structure
- * @version: the version of this structure, must be 0
- * @size: the size of the entire queue, including this header
- * @msgSize: the padded size of queue element, 16 is minimum
- * @msgCount: the number of elements in this queue
- * @writePtr: head index of this queue
- * @flags: 1 = swap the RX pointers
- * @rxHdrOff: offset of readPtr in this structure
- * @entryOff: offset of beginning of queue (msgqRxHeader), relative to
- * beginning of this structure
- *
- * The command queue is a queue of RPCs that are sent from the driver to the
- * GSP. The status queue is a queue of messages/responses from GSP-RM to the
- * driver. Although the driver allocates memory for both queues, the command
- * queue is owned by the driver and the status queue is owned by GSP-RM. In
- * addition, the headers of the two queues must not share the same 4K page.
- *
- * Each queue is prefixed with this data structure. The idea is that a queue
- * and its header are written to only by their owner. That is, only the
- * driver writes to the command queue and command queue header, and only the
- * GSP writes to the status (receive) queue and its header.
- *
- * This is enforced by the concept of "swapping" the RX pointers. This is
- * why the 'flags' field must be set to 1. 'rxHdrOff' is how the GSP knows
- * where the where the tail pointer of its status queue.
- *
- * When the driver writes a new RPC to the command queue, it updates writePtr.
- * When it reads a new message from the status queue, it updates readPtr. In
- * this way, the GSP knows when a new command is in the queue (it polls
- * writePtr) and it knows how much free space is in the status queue (it
- * checks readPtr). The driver never cares about how much free space is in
- * the status queue.
- *
- * As usual, producers write to the head pointer, and consumers read from the
- * tail pointer. When head == tail, the queue is empty.
- *
- * So to summarize:
- * command.writePtr = head of command queue
- * command.readPtr = tail of status queue
- * status.writePtr = head of status queue
- * status.readPtr = tail of command queue
- */
-typedef struct
-{
- NvU32 version; // queue version
- NvU32 size; // bytes, page aligned
- NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
- NvU32 msgCount; // number of entries in queue
- NvU32 writePtr; // message id of next slot
- NvU32 flags; // if set it means "i want to swap RX"
- NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
- NvU32 entryOff; // Offset of entries from start of backing store.
-} msgqTxHeader;
-
-/**
- * msgqRxHeader - RX queue data structure
- * @readPtr: tail index of the other queue
- *
- * Although this is a separate struct, it could easily be merged into
- * msgqTxHeader. msgqTxHeader.rxHdrOff is simply the offset of readPtr
- * from the beginning of msgqTxHeader.
- */
-typedef struct
-{
- NvU32 readPtr; // message id of last message read
-} msgqRxHeader;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
deleted file mode 100644
index 83cf1b2c15a3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __src_common_uproc_os_common_include_libos_init_args_h__
-#define __src_common_uproc_os_common_include_libos_init_args_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef NvU64 LibosAddress;
-
-typedef enum {
- LIBOS_MEMORY_REGION_NONE,
- LIBOS_MEMORY_REGION_CONTIGUOUS,
- LIBOS_MEMORY_REGION_RADIX3
-} LibosMemoryRegionKind;
-
-typedef enum {
- LIBOS_MEMORY_REGION_LOC_NONE,
- LIBOS_MEMORY_REGION_LOC_SYSMEM,
- LIBOS_MEMORY_REGION_LOC_FB
-} LibosMemoryRegionLoc;
-
-typedef struct
-{
- LibosAddress id8; // Id tag.
- LibosAddress pa; // Physical address.
- LibosAddress size; // Size of memory area.
- NvU8 kind; // See LibosMemoryRegionKind above.
- NvU8 loc; // See LibosMemoryRegionLoc above.
-} LibosMemoryRegionInitArgument;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
deleted file mode 100644
index 73213bdfcbda..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
-#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
-#define GSP_FW_SR_META_REVISION 2
-
-typedef struct
-{
- //
- // Magic
- // Use for verification by Booter
- //
- NvU64 magic; // = GSP_FW_SR_META_MAGIC;
-
- //
- // Revision number
- // Bumped up when we change this interface so it is not backward compatible.
- // Bumped up when we revoke GSP-RM ucode
- //
- NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
-
- //
- // ---- Members regarding data in SYSMEM ----------------------------
- // Consumed by Booter for DMA
- //
- NvU64 sysmemAddrOfSuspendResumeData;
- NvU64 sizeOfSuspendResumeData;
-
- // ---- Members for crypto ops across S/R ---------------------------
-
- //
- // HMAC over the entire GspFwSRMeta structure (including padding)
- // with the hmac field itself zeroed.
- //
- NvU8 hmac[32];
-
- // Hash over GspFwWprMeta structure
- NvU8 wprMetaHash[32];
-
- // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
- NvU8 heapFreeListHash[32];
-
- // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
- NvU8 dataHash[32];
-
- //
- // Pad structure to exactly 256 bytes (1 DMA chunk).
- // Padding initialized to zero.
- //
- NvU32 padding[24];
-
-} GspFwSRMeta;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
deleted file mode 100644
index a2e141e4b459..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
+++ /dev/null
@@ -1,170 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
-#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct
-{
- // Magic
- // BL to use for verification (i.e. Booter locked it in WPR2)
- NvU64 magic; // = 0xdc3aae21371a60b3;
-
- // Revision number of Booter-BL-Sequencer handoff interface
- // Bumped up when we change this interface so it is not backward compatible.
- // Bumped up when we revoke GSP-RM ucode
- NvU64 revision; // = 1;
-
- // ---- Members regarding data in SYSMEM ----------------------------
- // Consumed by Booter for DMA
-
- NvU64 sysmemAddrOfRadix3Elf;
- NvU64 sizeOfRadix3Elf;
-
- NvU64 sysmemAddrOfBootloader;
- NvU64 sizeOfBootloader;
-
- // Offsets inside bootloader image needed by Booter
- NvU64 bootloaderCodeOffset;
- NvU64 bootloaderDataOffset;
- NvU64 bootloaderManifestOffset;
-
- union
- {
- // Used only at initial boot
- struct
- {
- NvU64 sysmemAddrOfSignature;
- NvU64 sizeOfSignature;
- };
-
- //
- // Used at suspend/resume to read GspFwHeapFreeList
- // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
- //
- struct
- {
- NvU32 gspFwHeapFreeListWprOffset;
- NvU32 unused0;
- NvU64 unused1;
- };
- };
-
- // ---- Members describing FB layout --------------------------------
- NvU64 gspFwRsvdStart;
-
- NvU64 nonWprHeapOffset;
- NvU64 nonWprHeapSize;
-
- NvU64 gspFwWprStart;
-
- // GSP-RM to use to setup heap.
- NvU64 gspFwHeapOffset;
- NvU64 gspFwHeapSize;
-
- // BL to use to find ELF for jump
- NvU64 gspFwOffset;
- // Size is sizeOfRadix3Elf above.
-
- NvU64 bootBinOffset;
- // Size is sizeOfBootloader above.
-
- NvU64 frtsOffset;
- NvU64 frtsSize;
-
- NvU64 gspFwWprEnd;
-
- // GSP-RM to use for fbRegionInfo?
- NvU64 fbSize;
-
- // ---- Other members -----------------------------------------------
-
- // GSP-RM to use for fbRegionInfo?
- NvU64 vgaWorkspaceOffset;
- NvU64 vgaWorkspaceSize;
-
- // Boot count. Used to determine whether to load the firmware image.
- NvU64 bootCount;
-
- // TODO: the partitionRpc* fields below do not really belong in this
- // structure. The values are patched in by the partition bootstrapper
- // when GSP-RM is booted in a partition, and this structure was a
- // convenient place for the bootstrapper to access them. These should
- // be moved to a different comm. mechanism between the bootstrapper
- // and the GSP-RM tasks.
-
- union
- {
- struct
- {
- // Shared partition RPC memory (physical address)
- NvU64 partitionRpcAddr;
-
- // Offsets relative to partitionRpcAddr
- NvU16 partitionRpcRequestOffset;
- NvU16 partitionRpcReplyOffset;
-
- // Code section and dataSection offset and size.
- NvU32 elfCodeOffset;
- NvU32 elfDataOffset;
- NvU32 elfCodeSize;
- NvU32 elfDataSize;
-
- // Used during GSP-RM resume to check for revocation
- NvU32 lsUcodeVersion;
- };
-
- struct
- {
- // Pad for the partitionRpc* fields, plus 4 bytes
- NvU32 partitionRpcPadding[4];
-
- // CrashCat (contiguous) buffer size/location - occupies same bytes as the
- // elf(Code|Data)(Offset|Size) fields above.
- // TODO: move to GSP_FMC_INIT_PARAMS
- NvU64 sysmemAddrOfCrashReportQueue;
- NvU32 sizeOfCrashReportQueue;
-
- // Pad for the lsUcodeVersion field
- NvU32 lsUcodeVersionPadding[1];
- };
- };
-
- // Number of VF partitions allocating sub-heaps from the WPR heap
- // Used during boot to ensure the heap is adequately sized
- NvU8 gspFwHeapVfPartitionCount;
-
- // Pad structure to exactly 256 bytes. Can replace padding with additional
- // fields without incrementing revision. Padding initialized to 0.
- NvU8 padding[7];
-
- // BL to use for verification (i.e. Booter says OK to boot)
- NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
-} GspFwWprMeta;
-
-#define GSP_FW_WPR_META_REVISION 1
-#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
deleted file mode 100644
index 4eff473e8990..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
-#define __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct {
- //
- // Version 1
- // Version 2
- // Version 3 = for Partition boot
- // Version 4 = for eb riscv boot
- // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
- //
- NvU32 version; // structure version
- NvU32 bootloaderOffset;
- NvU32 bootloaderSize;
- NvU32 bootloaderParamOffset;
- NvU32 bootloaderParamSize;
- NvU32 riscvElfOffset;
- NvU32 riscvElfSize;
- NvU32 appVersion; // Changelist number associated with the image
- //
- // Manifest contains information about Monitor and it is
- // input to BR
- //
- NvU32 manifestOffset;
- NvU32 manifestSize;
- //
- // Monitor Data offset within RISCV image and size
- //
- NvU32 monitorDataOffset;
- NvU32 monitorDataSize;
- //
- // Monitor Code offset withtin RISCV image and size
- //
- NvU32 monitorCodeOffset;
- NvU32 monitorCodeSize;
- NvU32 bIsMonitorEnabled;
- //
- // Swbrom Code offset within RISCV image and size
- //
- NvU32 swbromCodeOffset;
- NvU32 swbromCodeSize;
- //
- // Swbrom Data offset within RISCV image and size
- //
- NvU32 swbromDataOffset;
- NvU32 swbromDataSize;
- //
- // Total size of FB carveout (image and reserved space).
- //
- NvU32 fbReservedSize;
- //
- // Indicates whether the entire RISC-V image is signed as "code" in code section.
- //
- NvU32 bSignedAsCode;
-} RM_RISCV_UCODE_DESC;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
deleted file mode 100644
index 341ab0dbeaf2..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
-#define __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum GSP_SEQ_BUF_OPCODE
-{
- GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
- GSP_SEQ_BUF_OPCODE_REG_MODIFY,
- GSP_SEQ_BUF_OPCODE_REG_POLL,
- GSP_SEQ_BUF_OPCODE_DELAY_US,
- GSP_SEQ_BUF_OPCODE_REG_STORE,
- GSP_SEQ_BUF_OPCODE_CORE_RESET,
- GSP_SEQ_BUF_OPCODE_CORE_START,
- GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
- GSP_SEQ_BUF_OPCODE_CORE_RESUME,
-} GSP_SEQ_BUF_OPCODE;
-
-#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
- ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
- /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
- 0)
-
-typedef struct
-{
- NvU32 addr;
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 mask;
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 mask;
- NvU32 val;
- NvU32 timeout;
- NvU32 error;
-} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
-
-typedef struct
-{
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 index;
-} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
-
-typedef struct GSP_SEQUENCER_BUFFER_CMD
-{
- GSP_SEQ_BUF_OPCODE opCode;
- union
- {
- GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
- GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
- GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
- GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
- GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
- } payload;
-} GSP_SEQUENCER_BUFFER_CMD;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
deleted file mode 100644
index 3144e9beac61..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_generated_g_allclasses_h__
-#define __src_nvidia_generated_g_allclasses_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
-
-#define NV04_DISPLAY_COMMON (0x00000073)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
deleted file mode 100644
index 6b8921138c7d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_nvidia_generated_g_chipset_nvoc_h__
-#define __src_nvidia_generated_g_chipset_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct
-{
- NvU16 deviceID; // deviceID
- NvU16 vendorID; // vendorID
- NvU16 subdeviceID; // subsystem deviceID
- NvU16 subvendorID; // subsystem vendorID
- NvU8 revisionID; // revision ID
-} BUSINFO;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
deleted file mode 100644
index a5128f00225b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_nvidia_generated_g_fbsr_nvoc_h__
-#define __src_nvidia_generated_g_fbsr_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
deleted file mode 100644
index 5641a21cacca..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_nvidia_generated_g_gpu_nvoc_h__
-#define __src_nvidia_generated_g_gpu_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- COMPUTE_BRANDING_TYPE_NONE,
- COMPUTE_BRANDING_TYPE_TESLA,
-} COMPUTE_BRANDING_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
deleted file mode 100644
index b5ad55f854dc..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __src_nvidia_generated_g_kernel_channel_nvoc_h__
-#define __src_nvidia_generated_g_kernel_channel_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum {
- /*!
- * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
- * kernel CPU-RM clients.
- */
- ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
- /*! @brief Error notifier is explicitly not set.
- *
- * The corresponding hErrorContext or hEccErrorContext must be
- * NV01_NULL_OBJECT.
- */
- ERROR_NOTIFIER_TYPE_NONE,
- /*! @brief Error notifier is a ContextDma */
- ERROR_NOTIFIER_TYPE_CTXDMA,
- /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
- ERROR_NOTIFIER_TYPE_MEMORY
-} ErrorNotifierType;
-
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
deleted file mode 100644
index 946954ac5b3d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef __src_nvidia_generated_g_kernel_fifo_nvoc_h__
-#define __src_nvidia_generated_g_kernel_fifo_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- /* *************************************************************************
- * Bug 3820969
- * THINK BEFORE CHANGING ENUM ORDER HERE.
- * VGPU-guest uses this same ordering. Because this enum is not versioned,
- * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
- * ************************************************************************/
-
- // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
- ENGINE_INFO_TYPE_ENG_DESC = 0,
-
- // HW engine ID
- ENGINE_INFO_TYPE_FIFO_TAG,
-
- // RM_ENGINE_TYPE_*
- ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
-
- //
- // runlist id (meaning varies by GPU)
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST,
-
- // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
- ENGINE_INFO_TYPE_MMU_FAULT_ID,
-
- // ROBUST_CHANNEL_*
- ENGINE_INFO_TYPE_RC_MASK,
-
- // Reset Bit Position. On Ampere, only valid if not _INVALID
- ENGINE_INFO_TYPE_RESET,
-
- // Interrupt Bit Position
- ENGINE_INFO_TYPE_INTR,
-
- // log2(MC_ENGINE_*)
- ENGINE_INFO_TYPE_MC,
-
- // The DEV_TYPE_ENUM for this engine
- ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
-
- // The particular instance of this engine type
- ENGINE_INFO_TYPE_INSTANCE_ID,
-
- //
- // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
-
- //
- // If this entry is a host-driven engine.
- // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
- //
- ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
-
- //
- // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
-
- //
- // The base address for this engine's NV_CHRAM registers. Valid only on
- // Ampere+
- //
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
-
- // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
- ENGINE_INFO_TYPE_KERNEL_RM_MAX,
- // Used for iterating the engine info table by the index passed.
- ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
-
- // Size of FIFO_ENGINE_LIST.engineData
- ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
-
- // Input-only parameter for kfifoEngineInfoXlate.
- ENGINE_INFO_TYPE_PBDMA_ID
-
- /* *************************************************************************
- * Bug 3820969
- * THINK BEFORE CHANGING ENUM ORDER HERE.
- * VGPU-guest uses this same ordering. Because this enum is not versioned,
- * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
- * ************************************************************************/
-} ENGINE_INFO_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
deleted file mode 100644
index daabaee41c87..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __src_nvidia_generated_g_mem_desc_nvoc_h__
-#define __src_nvidia_generated_g_mem_desc_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define ADDR_SYSMEM 1 // System memory (PCI)
-#define ADDR_FBMEM 2 // Frame buffer memory space
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
deleted file mode 100644
index 10121218f4d3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef __src_nvidia_generated_g_os_nvoc_h__
-#define __src_nvidia_generated_g_os_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct PACKED_REGISTRY_ENTRY
-{
- NvU32 nameOffset;
- NvU8 type;
- NvU32 data;
- NvU32 length;
-} PACKED_REGISTRY_ENTRY;
-
-typedef struct PACKED_REGISTRY_TABLE
-{
- NvU32 size;
- NvU32 numEntries;
- PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
-} PACKED_REGISTRY_TABLE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
deleted file mode 100644
index 8d925e24faea..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
+++ /dev/null
@@ -1,124 +0,0 @@
-#ifndef __src_nvidia_generated_g_rpc_structures_h__
-#define __src_nvidia_generated_g_rpc_structures_h__
-#include <nvrm/535.113.01/nvidia/generated/g_sdk-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct rpc_alloc_memory_v13_01
-{
- NvHandle hClient;
- NvHandle hDevice;
- NvHandle hMemory;
- NvU32 hClass;
- NvU32 flags;
- NvU32 pteAdjust;
- NvU32 format;
- NvU64 length NV_ALIGN_BYTES(8);
- NvU32 pageCount;
- struct pte_desc pteDesc;
-} rpc_alloc_memory_v13_01;
-
-typedef struct rpc_free_v03_00
-{
- NVOS00_PARAMETERS_v03_00 params;
-} rpc_free_v03_00;
-
-typedef struct rpc_unloading_guest_driver_v1F_07
-{
- NvBool bInPMTransition;
- NvBool bGc6Entering;
- NvU32 newLevel;
-} rpc_unloading_guest_driver_v1F_07;
-
-typedef struct rpc_update_bar_pde_v15_00
-{
- UpdateBarPde_v15_00 info;
-} rpc_update_bar_pde_v15_00;
-
-typedef struct rpc_gsp_rm_alloc_v03_00
-{
- NvHandle hClient;
- NvHandle hParent;
- NvHandle hObject;
- NvU32 hClass;
- NvU32 status;
- NvU32 paramsSize;
- NvU32 flags;
- NvU8 reserved[4];
- NvU8 params[];
-} rpc_gsp_rm_alloc_v03_00;
-
-typedef struct rpc_gsp_rm_control_v03_00
-{
- NvHandle hClient;
- NvHandle hObject;
- NvU32 cmd;
- NvU32 status;
- NvU32 paramsSize;
- NvU32 flags;
- NvU8 params[];
-} rpc_gsp_rm_control_v03_00;
-
-typedef struct rpc_run_cpu_sequencer_v17_00
-{
- NvU32 bufferSizeDWord;
- NvU32 cmdIndex;
- NvU32 regSaveArea[8];
- NvU32 commandBuffer[];
-} rpc_run_cpu_sequencer_v17_00;
-
-typedef struct rpc_post_event_v17_00
-{
- NvHandle hClient;
- NvHandle hEvent;
- NvU32 notifyIndex;
- NvU32 data;
- NvU16 info16;
- NvU32 status;
- NvU32 eventDataSize;
- NvBool bNotifyList;
- NvU8 eventData[];
-} rpc_post_event_v17_00;
-
-typedef struct rpc_rc_triggered_v17_02
-{
- NvU32 nv2080EngineType;
- NvU32 chid;
- NvU32 exceptType;
- NvU32 scope;
- NvU16 partitionAttributionId;
-} rpc_rc_triggered_v17_02;
-
-typedef struct rpc_os_error_log_v17_00
-{
- NvU32 exceptType;
- NvU32 runlistId;
- NvU32 chid;
- char errString[0x100];
-} rpc_os_error_log_v17_00;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
deleted file mode 100644
index e9fed4140468..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __src_nvidia_generated_g_sdk_structures_h__
-#define __src_nvidia_generated_g_sdk_structures_h__
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NVOS00_PARAMETERS_v03_00
-{
- NvHandle hRoot;
- NvHandle hObjectParent;
- NvHandle hObjectOld;
- NvV32 status;
-} NVOS00_PARAMETERS_v03_00;
-
-typedef struct UpdateBarPde_v15_00
-{
- NV_RPC_UPDATE_PDE_BAR_TYPE barType;
- NvU64 entryValue NV_ALIGN_BYTES(8);
- NvU64 entryLevelShift NV_ALIGN_BYTES(8);
-} UpdateBarPde_v15_00;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
deleted file mode 100644
index af50b11ec3b4..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
-#define __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct DOD_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 acpiIdListLen;
- NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
-} DOD_METHOD_DATA;
-
-typedef struct JT_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 jtCaps;
- NvU16 jtRevId;
- NvBool bSBIOSCaps;
-} JT_METHOD_DATA;
-
-typedef struct MUX_METHOD_DATA_ELEMENT
-{
- NvU32 acpiId;
- NvU32 mode;
- NV_STATUS status;
-} MUX_METHOD_DATA_ELEMENT;
-
-typedef struct MUX_METHOD_DATA
-{
- NvU32 tableLen;
- MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
- MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
-} MUX_METHOD_DATA;
-
-typedef struct CAPS_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 optimusCaps;
-} CAPS_METHOD_DATA;
-
-typedef struct ACPI_METHOD_DATA
-{
- NvBool bValid;
- DOD_METHOD_DATA dodMethodData;
- JT_METHOD_DATA jtMethodData;
- MUX_METHOD_DATA muxMethodData;
- CAPS_METHOD_DATA capsMethodData;
-} ACPI_METHOD_DATA;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
deleted file mode 100644
index e3160c60036d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
-#define __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- RM_ENGINE_TYPE_NULL = (0x00000000),
- RM_ENGINE_TYPE_GR0 = (0x00000001),
- RM_ENGINE_TYPE_GR1 = (0x00000002),
- RM_ENGINE_TYPE_GR2 = (0x00000003),
- RM_ENGINE_TYPE_GR3 = (0x00000004),
- RM_ENGINE_TYPE_GR4 = (0x00000005),
- RM_ENGINE_TYPE_GR5 = (0x00000006),
- RM_ENGINE_TYPE_GR6 = (0x00000007),
- RM_ENGINE_TYPE_GR7 = (0x00000008),
- RM_ENGINE_TYPE_COPY0 = (0x00000009),
- RM_ENGINE_TYPE_COPY1 = (0x0000000a),
- RM_ENGINE_TYPE_COPY2 = (0x0000000b),
- RM_ENGINE_TYPE_COPY3 = (0x0000000c),
- RM_ENGINE_TYPE_COPY4 = (0x0000000d),
- RM_ENGINE_TYPE_COPY5 = (0x0000000e),
- RM_ENGINE_TYPE_COPY6 = (0x0000000f),
- RM_ENGINE_TYPE_COPY7 = (0x00000010),
- RM_ENGINE_TYPE_COPY8 = (0x00000011),
- RM_ENGINE_TYPE_COPY9 = (0x00000012),
- RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
- RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
- RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
- RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
- RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
- RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
- RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
- RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
- RM_ENGINE_TYPE_NVENC0 = (0x00000025),
- RM_ENGINE_TYPE_NVENC1 = (0x00000026),
- RM_ENGINE_TYPE_NVENC2 = (0x00000027),
- RM_ENGINE_TYPE_VP = (0x00000028),
- RM_ENGINE_TYPE_ME = (0x00000029),
- RM_ENGINE_TYPE_PPP = (0x0000002a),
- RM_ENGINE_TYPE_MPEG = (0x0000002b),
- RM_ENGINE_TYPE_SW = (0x0000002c),
- RM_ENGINE_TYPE_TSEC = (0x0000002d),
- RM_ENGINE_TYPE_VIC = (0x0000002e),
- RM_ENGINE_TYPE_MP = (0x0000002f),
- RM_ENGINE_TYPE_SEC2 = (0x00000030),
- RM_ENGINE_TYPE_HOST = (0x00000031),
- RM_ENGINE_TYPE_DPU = (0x00000032),
- RM_ENGINE_TYPE_PMU = (0x00000033),
- RM_ENGINE_TYPE_FBFLCN = (0x00000034),
- RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
- RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
- RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
- RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
- RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
- RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
- RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
- RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
- RM_ENGINE_TYPE_OFA = (0x0000003d),
- RM_ENGINE_TYPE_LAST = (0x0000003e),
-} RM_ENGINE_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
deleted file mode 100644
index 3abec59f0cc4..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
-
-#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
deleted file mode 100644
index 4033a6f85a76..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct {
- RmPhysAddr sharedMemPhysAddr;
- NvU32 pageTableEntryCount;
- NvLength cmdQueueOffset;
- NvLength statQueueOffset;
- NvLength locklessCmdQueueOffset;
- NvLength locklessStatQueueOffset;
-} MESSAGE_QUEUE_INIT_ARGUMENTS;
-
-typedef struct {
- NvU32 oldLevel;
- NvU32 flags;
- NvBool bInPMTransition;
-} GSP_SR_INIT_ARGUMENTS;
-
-typedef struct
-{
- MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
- GSP_SR_INIT_ARGUMENTS srInitArguments;
- NvU32 gpuInstance;
-
- struct
- {
- NvU64 pa;
- NvU64 size;
- } profilerArgs;
-} GSP_ARGUMENTS_CACHED;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
deleted file mode 100644
index eeab25a5e290..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
+++ /dev/null
@@ -1,174 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h>
-#include <nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct GSP_VF_INFO
-{
- NvU32 totalVFs;
- NvU32 firstVFOffset;
- NvU64 FirstVFBar0Address;
- NvU64 FirstVFBar1Address;
- NvU64 FirstVFBar2Address;
- NvBool b64bitBar0;
- NvBool b64bitBar1;
- NvBool b64bitBar2;
-} GSP_VF_INFO;
-
-typedef struct GspSMInfo_t
-{
- NvU32 version;
- NvU32 regBankCount;
- NvU32 regBankRegCount;
- NvU32 maxWarpsPerSM;
- NvU32 maxThreadsPerWarp;
- NvU32 geomGsObufEntries;
- NvU32 geomXbufEntries;
- NvU32 maxSPPerSM;
- NvU32 rtCoreCount;
-} GspSMInfo;
-
-typedef struct GspStaticConfigInfo_t
-{
- NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
- NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
- NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
- NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
- NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
- NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
- NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
- COMPUTE_BRANDING_TYPE computeBranding;
-
- NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
- NvU32 sriovMaxGfid;
-
- NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
-
- GspSMInfo SM_info;
-
- NvBool poisonFuseEnabled;
-
- NvU64 fb_length;
- NvU32 fbio_mask;
- NvU32 fb_bus_width;
- NvU32 fb_ram_type;
- NvU32 fbp_mask;
- NvU32 l2_cache_size;
-
- NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
- NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
-
- NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvBool bGpuInternalSku;
- NvBool bIsQuadroGeneric;
- NvBool bIsQuadroAd;
- NvBool bIsNvidiaNvs;
- NvBool bIsVgx;
- NvBool bGeforceSmb;
- NvBool bIsTitan;
- NvBool bIsTesla;
- NvBool bIsMobile;
- NvBool bIsGc6Rtd3Allowed;
- NvBool bIsGcOffRtd3Allowed;
- NvBool bIsGcoffLegacyAllowed;
-
- NvU64 bar1PdeBase;
- NvU64 bar2PdeBase;
-
- NvBool bVbiosValid;
- NvU32 vbiosSubVendor;
- NvU32 vbiosSubDevice;
-
- NvBool bPageRetirementSupported;
-
- NvBool bSplitVasBetweenServerClientRm;
-
- NvBool bClRootportNeedsNosnoopWAR;
-
- VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
- VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
- NvU64 displaylessMaxPixels;
-
- // Client handle for internal RMAPI control.
- NvHandle hInternalClient;
-
- // Device handle for internal RMAPI control.
- NvHandle hInternalDevice;
-
- // Subdevice handle for internal RMAPI control.
- NvHandle hInternalSubdevice;
-
- NvBool bSelfHostedMode;
- NvBool bAtsSupported;
-
- NvBool bIsGpuUefi;
-} GspStaticConfigInfo;
-
-typedef struct GspSystemInfo
-{
- NvU64 gpuPhysAddr;
- NvU64 gpuPhysFbAddr;
- NvU64 gpuPhysInstAddr;
- NvU64 nvDomainBusDeviceFunc;
- NvU64 simAccessBufPhysAddr;
- NvU64 pcieAtomicsOpMask;
- NvU64 consoleMemSize;
- NvU64 maxUserVa;
- NvU32 pciConfigMirrorBase;
- NvU32 pciConfigMirrorSize;
- NvU8 oorArch;
- NvU64 clPdbProperties;
- NvU32 Chipset;
- NvBool bGpuBehindBridge;
- NvBool bMnocAvailable;
- NvBool bUpstreamL0sUnsupported;
- NvBool bUpstreamL1Unsupported;
- NvBool bUpstreamL1PorSupported;
- NvBool bUpstreamL1PorMobileOnly;
- NvU8 upstreamAddressValid;
- BUSINFO FHBBusInfo;
- BUSINFO chipsetIDInfo;
- ACPI_METHOD_DATA acpiMethodData;
- NvU32 hypervisorType;
- NvBool bIsPassthru;
- NvU64 sysTimerOffsetNs;
- GSP_VF_INFO gspVFInfo;
-} GspSystemInfo;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
deleted file mode 100644
index bd5e01f9814b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
-#define __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define MC_ENGINE_IDX_DISP 2
-
-#define MC_ENGINE_IDX_CE0 15
-
-#define MC_ENGINE_IDX_CE9 24
-
-#define MC_ENGINE_IDX_MSENC 38
-
-#define MC_ENGINE_IDX_MSENC2 40
-
-#define MC_ENGINE_IDX_GSP 49
-#define MC_ENGINE_IDX_NVJPG 50
-#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
-#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
-
-#define MC_ENGINE_IDX_NVJPEG7 57
-
-#define MC_ENGINE_IDX_BSP 64
-#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
-#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
-
-#define MC_ENGINE_IDX_NVDEC7 71
-
-#define MC_ENGINE_IDX_OFA0 80
-
-#define MC_ENGINE_IDX_GR 82
-#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
deleted file mode 100644
index 366447a368bf..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_nvbitmask_h__
-#define __src_nvidia_inc_kernel_gpu_nvbitmask_h__
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVGPU_ENGINE_CAPS_MASK_BITS 32
-#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
deleted file mode 100644
index 4a850dad4776..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_os_nv_memory_type_h__
-#define __src_nvidia_inc_kernel_os_nv_memory_type_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV_MEMORY_WRITECOMBINED 2
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
deleted file mode 100644
index f14b23852456..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
-#define __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define MAX_GPC_COUNT 32
-
-typedef enum
-{
- NV_RPC_UPDATE_PDE_BAR_1,
- NV_RPC_UPDATE_PDE_BAR_2,
- NV_RPC_UPDATE_PDE_BAR_INVALID,
-} NV_RPC_UPDATE_PDE_BAR_TYPE;
-
-typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
-{
- NvU32 headIndex;
- NvU32 maxHResolution;
- NvU32 maxVResolution;
-} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
-
-typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
-{
- NvU32 numHeads;
- NvU32 maxNumHeads;
-} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
deleted file mode 100644
index 7801af232dff..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
-#define __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-struct pte_desc
-{
- NvU32 idr:2;
- NvU32 reserved1:14;
- NvU32 length:16;
- union {
- NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
- NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
- } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
index e6833df1ccc7..af11648ad9c8 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
@@ -21,4 +21,6 @@ typedef NvU64 NvLength;
typedef NvU64 RmPhysAddr;
typedef NvU32 NV_STATUS;
+
+typedef union {} rpc_generic_union;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 2a0617e5fe2a..a3ba07fc48a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -315,7 +315,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
break;
}
case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
- getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+ getparam->value = nvkm_device->func->resource_size(nvkm_device, NVKM_BAR1_FB);
break;
case NOUVEAU_GETPARAM_VRAM_USED: {
struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
@@ -416,7 +416,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
*/
if (nouveau_cli_uvmm(cli)) {
ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
- chan->chan->dma.ib_max);
+ chan->chan->chan.gpfifo.max);
if (ret)
goto done;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2016c1e7242f..b96f0555ca14 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -401,6 +401,83 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
return 0;
}
+void
+nouveau_bo_unpin_del(struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo = *pnvbo;
+
+ if (!nvbo)
+ return;
+
+ nouveau_bo_unmap(nvbo);
+ nouveau_bo_unpin(nvbo);
+ nouveau_bo_fini(nvbo);
+
+ *pnvbo = NULL;
+}
+
+int
+nouveau_bo_new_pin(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new(cli, size, 0, domain, 0, 0, NULL, NULL, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_pin(nvbo, domain, false);
+ if (ret) {
+ nouveau_bo_fini(nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_pin(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map_gpu(struct nouveau_cli *cli, u32 domain, u32 size,
+ struct nouveau_bo **pnvbo, struct nouveau_vma **pvma)
+{
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_map(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_vma_new(nvbo, vmm, pvma);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
@@ -923,6 +1000,9 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_resource *, struct ttm_resource *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xcab5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc9b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1204,7 +1284,7 @@ retry:
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
reg->bus.offset = (reg->start << PAGE_SHIFT) +
- device->func->resource_addr(device, 1);
+ device->func->resource_addr(device, NVKM_BAR1_FB);
reg->bus.is_iomem = true;
/* Some BARs do not support being ioremapped WC */
@@ -1295,7 +1375,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_device *device = nvxx_device(drm);
- u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
+ u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 596a63a50a20..d59fd12268b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -9,6 +9,7 @@ struct nouveau_channel;
struct nouveau_cli;
struct nouveau_drm;
struct nouveau_fence;
+struct nouveau_vma;
struct nouveau_bo {
struct ttm_buffer_object bo;
@@ -89,6 +90,12 @@ void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo);
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo);
+int nouveau_bo_new_pin(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **);
+int nouveau_bo_new_map(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **);
+int nouveau_bo_new_map_gpu(struct nouveau_cli *, u32 domain, u32 size,
+ struct nouveau_bo **, struct nouveau_vma **);
+void nouveau_bo_unpin_del(struct nouveau_bo **);
+
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index cd659b9fd1d9..b1e92b1f7a26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -103,12 +103,11 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nvif_event_dtor(&chan->kill);
nvif_object_dtor(&chan->user);
nvif_mem_dtor(&chan->mem_userd);
+ nouveau_vma_del(&chan->sema.vma);
+ nouveau_bo_unpin_del(&chan->sema.bo);
nvif_object_dtor(&chan->push.ctxdma);
nouveau_vma_del(&chan->push.vma);
- nouveau_bo_unmap(chan->push.buffer);
- if (chan->push.buffer && chan->push.buffer->bo.pin_count)
- nouveau_bo_unpin(chan->push.buffer);
- nouveau_bo_fini(chan->push.buffer);
+ nouveau_bo_unpin_del(&chan->push.buffer);
kfree(chan);
}
*pchan = NULL;
@@ -163,14 +162,7 @@ nouveau_channel_prep(struct nouveau_cli *cli,
if (nouveau_vram_pushbuf)
target = NOUVEAU_GEM_DOMAIN_VRAM;
- ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
- &chan->push.buffer);
- if (ret == 0) {
- ret = nouveau_bo_pin(chan->push.buffer, target, false);
- if (ret == 0)
- ret = nouveau_bo_map(chan->push.buffer);
- }
-
+ ret = nouveau_bo_new_map(cli, target, size, &chan->push.buffer);
if (ret) {
nouveau_channel_del(pchan);
return ret;
@@ -199,8 +191,10 @@ nouveau_channel_prep(struct nouveau_cli *cli,
chan->push.addr = chan->push.vma->addr;
- if (device->info.family >= NV_DEVICE_INFO_V0_FERMI)
- return 0;
+ if (device->info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ return nouveau_bo_new_map_gpu(cli, NOUVEAU_GEM_DOMAIN_GART, PAGE_SIZE,
+ &chan->sema.bo, &chan->sema.vma);
+ }
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
@@ -209,13 +203,15 @@ nouveau_channel_prep(struct nouveau_cli *cli,
} else
if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
+ struct nvkm_device *nvkm_device = nvxx_device(drm);
+
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
* nfi why this exists, it came from the -nv ddx.
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1);
+ args.start = nvkm_device->func->resource_addr(nvkm_device, NVKM_BAR1_FB);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
@@ -253,27 +249,27 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
const struct nvif_mclass hosts[] = {
- { AMPERE_CHANNEL_GPFIFO_B, 0 },
- { AMPERE_CHANNEL_GPFIFO_A, 0 },
- { TURING_CHANNEL_GPFIFO_A, 0 },
- { VOLTA_CHANNEL_GPFIFO_A, 0 },
- { PASCAL_CHANNEL_GPFIFO_A, 0 },
- { MAXWELL_CHANNEL_GPFIFO_A, 0 },
- { KEPLER_CHANNEL_GPFIFO_B, 0 },
- { KEPLER_CHANNEL_GPFIFO_A, 0 },
- { FERMI_CHANNEL_GPFIFO , 0 },
- { G82_CHANNEL_GPFIFO , 0 },
- { NV50_CHANNEL_GPFIFO , 0 },
- { NV40_CHANNEL_DMA , 0 },
- { NV17_CHANNEL_DMA , 0 },
- { NV10_CHANNEL_DMA , 0 },
- { NV03_CHANNEL_DMA , 0 },
+ { BLACKWELL_CHANNEL_GPFIFO_B, 0 },
+ { BLACKWELL_CHANNEL_GPFIFO_A, 0 },
+ { HOPPER_CHANNEL_GPFIFO_A, 0 },
+ { AMPERE_CHANNEL_GPFIFO_B, 0 },
+ { AMPERE_CHANNEL_GPFIFO_A, 0 },
+ { TURING_CHANNEL_GPFIFO_A, 0 },
+ { VOLTA_CHANNEL_GPFIFO_A, 0 },
+ { PASCAL_CHANNEL_GPFIFO_A, 0 },
+ { MAXWELL_CHANNEL_GPFIFO_A, 0 },
+ { KEPLER_CHANNEL_GPFIFO_B, 0 },
+ { KEPLER_CHANNEL_GPFIFO_A, 0 },
+ { FERMI_CHANNEL_GPFIFO , 0 },
+ { G82_CHANNEL_GPFIFO , 0 },
+ { NV50_CHANNEL_GPFIFO , 0 },
+ { NV40_CHANNEL_DMA , 0 },
+ { NV17_CHANNEL_DMA , 0 },
+ { NV10_CHANNEL_DMA , 0 },
+ { NV03_CHANNEL_DMA , 0 },
{}
};
- struct {
- struct nvif_chan_v0 chan;
- char name[TASK_COMM_LEN+16];
- } args;
+ DEFINE_RAW_FLEX(struct nvif_chan_v0, args, name, TASK_COMM_LEN + 16);
struct nvif_device *device = &cli->device;
struct nouveau_channel *chan;
const u64 plength = 0x10000;
@@ -298,28 +294,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
return ret;
/* create channel object */
- args.chan.version = 0;
- args.chan.namelen = sizeof(args.name);
- args.chan.runlist = __ffs64(runm);
- args.chan.runq = 0;
- args.chan.priv = priv;
- args.chan.devm = BIT(0);
+ args->version = 0;
+ args->namelen = __member_size(args->name);
+ args->runlist = __ffs64(runm);
+ args->runq = 0;
+ args->priv = priv;
+ args->devm = BIT(0);
if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO) {
- args.chan.vmm = 0;
- args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
- args.chan.offset = chan->push.addr;
- args.chan.length = 0;
+ args->vmm = 0;
+ args->ctxdma = nvif_handle(&chan->push.ctxdma);
+ args->offset = chan->push.addr;
+ args->length = 0;
} else {
- args.chan.vmm = nvif_handle(&chan->vmm->vmm.object);
+ args->vmm = nvif_handle(&chan->vmm->vmm.object);
if (hosts[cid].oclass < FERMI_CHANNEL_GPFIFO)
- args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
+ args->ctxdma = nvif_handle(&chan->push.ctxdma);
else
- args.chan.ctxdma = 0;
- args.chan.offset = ioffset + chan->push.addr;
- args.chan.length = ilength;
+ args->ctxdma = 0;
+ args->offset = ioffset + chan->push.addr;
+ args->length = ilength;
}
- args.chan.huserd = 0;
- args.chan.ouserd = 0;
+ args->huserd = 0;
+ args->ouserd = 0;
/* allocate userd */
if (hosts[cid].oclass >= VOLTA_CHANNEL_GPFIFO_A) {
@@ -329,27 +325,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
if (ret)
return ret;
- args.chan.huserd = nvif_handle(&chan->mem_userd.object);
- args.chan.ouserd = 0;
+ args->huserd = nvif_handle(&chan->mem_userd.object);
+ args->ouserd = 0;
chan->userd = &chan->mem_userd.object;
} else {
chan->userd = &chan->user;
}
- snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current));
+ snprintf(args->name, __member_size(args->name), "%s[%d]",
+ current->comm, task_pid_nr(current));
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
- &args, sizeof(args), &chan->user);
+ args, __struct_size(args), &chan->user);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
- chan->runlist = args.chan.runlist;
- chan->chid = args.chan.chid;
- chan->inst = args.chan.inst;
- chan->token = args.chan.token;
+ chan->runlist = args->runlist;
+ chan->chid = args->chid;
+ chan->inst = args->inst;
+ chan->token = args->token;
return 0;
}
@@ -367,17 +364,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
return ret;
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
- struct {
- struct nvif_event_v0 base;
- struct nvif_chan_event_v0 host;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_chan_event_v0));
+ struct nvif_chan_event_v0 *host =
+ (struct nvif_chan_event_v0 *)args->data;
- args.host.version = 0;
- args.host.type = NVIF_CHAN_EVENT_V0_KILLED;
+ host->version = 0;
+ host->type = NVIF_CHAN_EVENT_V0_KILLED;
ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid,
nouveau_channel_killed, false,
- &args.base, sizeof(args), &chan->kill);
+ args, __struct_size(args), &chan->kill);
if (ret == 0)
ret = nvif_event_allow(&chan->kill);
if (ret) {
@@ -433,25 +430,33 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise dma tracking parameters */
- switch (chan->user.oclass) {
- case NV03_CHANNEL_DMA:
- case NV10_CHANNEL_DMA:
- case NV17_CHANNEL_DMA:
- case NV40_CHANNEL_DMA:
+ if (chan->user.oclass < NV50_CHANNEL_GPFIFO) {
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->dma.max = (0x10000 / 4) - 2;
- break;
- default:
- chan->user_put = 0x40;
- chan->user_get = 0x44;
- chan->user_get_hi = 0x60;
- chan->dma.ib_base = 0x10000 / 4;
- chan->dma.ib_max = NV50_DMA_IB_MAX;
- chan->dma.ib_put = 0;
- chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
- chan->dma.max = chan->dma.ib_base;
- break;
+ } else
+ if (chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
+ ret = nvif_chan506f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000);
+ if (ret)
+ return ret;
+ } else
+ if (chan->user.oclass < VOLTA_CHANNEL_GPFIFO_A) {
+ ret = nvif_chan906f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000,
+ chan->sema.bo->kmap.virtual, chan->sema.vma->addr);
+ if (ret)
+ return ret;
+ } else {
+ ret = nvif_chanc36f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000,
+ chan->sema.bo->kmap.virtual, chan->sema.vma->addr,
+ &drm->client.device.user, chan->token);
+ if (ret)
+ return ret;
}
chan->dma.put = 0;
@@ -520,46 +525,44 @@ nouveau_channels_fini(struct nouveau_drm *drm)
int
nouveau_channels_init(struct nouveau_drm *drm)
{
- struct {
- struct nv_device_info_v1 m;
- struct {
- struct nv_device_info_v1_data channels;
- struct nv_device_info_v1_data runlists;
- } v;
- } args = {
- .m.version = 1,
- .m.count = sizeof(args.v) / sizeof(args.v.channels),
- .v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
- .v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS,
- };
+ DEFINE_RAW_FLEX(struct nv_device_info_v1, args, data, 2);
+ struct nv_device_info_v1_data *channels = &args->data[0];
+ struct nv_device_info_v1_data *runlists = &args->data[1];
struct nvif_object *device = &drm->client.device.object;
int ret, i;
- ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
+ args->version = 1;
+ args->count = __member_size(args->data) / sizeof(*args->data);
+ channels->mthd = NV_DEVICE_HOST_CHANNELS;
+ runlists->mthd = NV_DEVICE_HOST_RUNLISTS;
+
+ ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
+ __struct_size(args));
if (ret ||
- args.v.runlists.mthd == NV_DEVICE_INFO_INVALID || !args.v.runlists.data ||
- args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
+ runlists->mthd == NV_DEVICE_INFO_INVALID || !runlists->data ||
+ channels->mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
- drm->chan_nr = drm->chan_total = args.v.channels.data;
- drm->runl_nr = fls64(args.v.runlists.data);
+ drm->chan_nr = drm->chan_total = channels->data;
+ drm->runl_nr = fls64(runlists->data);
drm->runl = kcalloc(drm->runl_nr, sizeof(*drm->runl), GFP_KERNEL);
if (!drm->runl)
return -ENOMEM;
if (drm->chan_nr == 0) {
for (i = 0; i < drm->runl_nr; i++) {
- if (!(args.v.runlists.data & BIT(i)))
+ if (!(runlists->data & BIT(i)))
continue;
- args.v.channels.mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
- args.v.channels.data = i;
+ channels->mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
+ channels->data = i;
- ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
- if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
+ ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
+ __struct_size(args));
+ if (ret || channels->mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
- drm->runl[i].chan_nr = args.v.channels.data;
+ drm->runl[i].chan_nr = channels->data;
drm->runl[i].chan_id_base = drm->chan_total;
drm->runl[i].context_base = dma_fence_context_alloc(drm->runl[i].chan_nr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 016f668c0bc1..561877725aac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -3,13 +3,11 @@
#define __NOUVEAU_CHAN_H__
#include <nvif/object.h>
#include <nvif/event.h>
-#include <nvif/push.h>
+#include <nvif/chan.h>
struct nvif_device;
struct nouveau_channel {
- struct {
- struct nvif_push push;
- } chan;
+ struct nvif_chan chan;
struct nouveau_cli *cli;
struct nouveau_vmm *vmm;
@@ -41,15 +39,15 @@ struct nouveau_channel {
int free;
int cur;
int put;
- int ib_base;
- int ib_max;
- int ib_free;
- int ib_put;
} dma;
- u32 user_get_hi;
u32 user_get;
u32 user_put;
+ struct {
+ struct nouveau_bo *bo;
+ struct nouveau_vma *vma;
+ } sema;
+
struct nvif_object user;
struct nvif_object blit;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1b10c6c12f46..63621b1510f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1401,6 +1401,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
nv_connector->aux.drm_dev = dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
nv_connector->aux.name = connector->name;
+ if (disp->disp.object.oclass >= GB202_DISP)
+ nv_connector->aux.no_zero_sized = true;
drm_dp_aux_init(&nv_connector->aux);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index a1f329ef0641..017a803121d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -43,8 +43,6 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
uint64_t val;
val = nvif_rd32(chan->userd, chan->user_get);
- if (chan->user_get_hi)
- val |= (uint64_t)nvif_rd32(chan->userd, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -68,111 +66,12 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
return (val - chan->push.addr) >> 2;
}
-void
-nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
- bool no_prefetch)
-{
- struct nvif_user *user = &chan->cli->drm->client.device.user;
- struct nouveau_bo *pb = chan->push.buffer;
- int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
-
- BUG_ON(chan->dma.ib_free < 1);
- WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
-
- nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
- nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
- (no_prefetch ? (1 << 31) : 0));
-
- chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
-
- mb();
- /* Flush writes. */
- nouveau_bo_rd32(pb, 0);
-
- nvif_wr32(chan->userd, 0x8c, chan->dma.ib_put);
- if (user->func && user->func->doorbell)
- user->func->doorbell(user, chan->token);
- chan->dma.ib_free--;
-}
-
-static int
-nv50_dma_push_wait(struct nouveau_channel *chan, int count)
-{
- uint32_t cnt = 0, prev_get = 0;
-
- while (chan->dma.ib_free < count) {
- uint32_t get = nvif_rd32(chan->userd, 0x88);
- if (get != prev_get) {
- prev_get = get;
- cnt = 0;
- }
-
- if ((++cnt & 0xff) == 0) {
- udelay(1);
- if (cnt > 100000)
- return -EBUSY;
- }
-
- chan->dma.ib_free = get - chan->dma.ib_put;
- if (chan->dma.ib_free <= 0)
- chan->dma.ib_free += chan->dma.ib_max;
- }
-
- return 0;
-}
-
-static int
-nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
-{
- uint64_t prev_get = 0;
- int ret, cnt = 0;
-
- ret = nv50_dma_push_wait(chan, slots + 1);
- if (unlikely(ret))
- return ret;
-
- while (chan->dma.free < count) {
- int get = READ_GET(chan, &prev_get, &cnt);
- if (unlikely(get < 0)) {
- if (get == -EINVAL)
- continue;
-
- return get;
- }
-
- if (get <= chan->dma.cur) {
- chan->dma.free = chan->dma.max - chan->dma.cur;
- if (chan->dma.free >= count)
- break;
-
- FIRE_RING(chan);
- do {
- get = READ_GET(chan, &prev_get, &cnt);
- if (unlikely(get < 0)) {
- if (get == -EINVAL)
- continue;
- return get;
- }
- } while (get == 0);
- chan->dma.cur = 0;
- chan->dma.put = 0;
- }
-
- chan->dma.free = get - chan->dma.cur - 1;
- }
-
- return 0;
-}
-
int
-nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
+nouveau_dma_wait(struct nouveau_channel *chan, int size)
{
uint64_t prev_get = 0;
int cnt = 0, get;
- if (chan->dma.ib_max)
- return nv50_dma_wait(chan, slots, size);
-
while (chan->dma.free < size) {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index c52cda82353e..0e27b76d1e1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -30,9 +30,7 @@
#include "nouveau_bo.h"
#include "nouveau_chan.h"
-int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
- bool no_prefetch);
+int nouveau_dma_wait(struct nouveau_channel *, int size);
/*
* There's a hw race condition where you can't jump to your PUT offset,
@@ -67,7 +65,7 @@ RING_SPACE(struct nouveau_channel *chan, int size)
{
int ret;
- ret = nouveau_dma_wait(chan, 1, size);
+ ret = nouveau_dma_wait(chan, size);
if (ret)
return ret;
@@ -94,12 +92,7 @@ FIRE_RING(struct nouveau_channel *chan)
return;
chan->accel_done = true;
- if (chan->dma.ib_max) {
- nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
- (chan->dma.cur - chan->dma.put) << 2, false);
- } else {
- WRITE_PUT(chan->dma.cur);
- }
+ WRITE_PUT(chan->dma.cur);
chan->dma.put = chan->dma.cur;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61d0f411ef84..ca4932a150e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -256,20 +256,15 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
chunk->pagemap.owner = drm->dev;
- ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
- &chunk->bo);
+ ret = nouveau_bo_new_pin(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, DMEM_CHUNK_SIZE,
+ &chunk->bo);
if (ret)
goto out_release;
- ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (ret)
- goto out_bo_free;
-
ptr = memremap_pages(&chunk->pagemap, numa_node_id());
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
- goto out_bo_unpin;
+ goto out_bo_free;
}
mutex_lock(&drm->dmem->mutex);
@@ -292,10 +287,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
return 0;
-out_bo_unpin:
- nouveau_bo_unpin(chunk->bo);
out_bo_free:
- nouveau_bo_fini(chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
out_release:
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
@@ -426,8 +419,7 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
nouveau_dmem_evict_chunk(chunk);
- nouveau_bo_unpin(chunk->bo);
- nouveau_bo_fini(chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e154d08857c5..0c82a63cd49d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -503,11 +503,16 @@ nouveau_accel_init(struct nouveau_drm *drm)
case KEPLER_CHANNEL_GPFIFO_B:
case MAXWELL_CHANNEL_GPFIFO_A:
case PASCAL_CHANNEL_GPFIFO_A:
+ ret = nvc0_fence_create(drm);
+ break;
case VOLTA_CHANNEL_GPFIFO_A:
case TURING_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_B:
- ret = nvc0_fence_create(drm);
+ case HOPPER_CHANNEL_GPFIFO_A:
+ case BLACKWELL_CHANNEL_GPFIFO_A:
+ case BLACKWELL_CHANNEL_GPFIFO_B:
+ ret = gv100_fence_create(drm);
break;
default:
break;
@@ -1079,6 +1084,10 @@ nouveau_pmops_freeze(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_suspend(drm, false);
}
@@ -1087,6 +1096,10 @@ nouveau_pmops_thaw(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_resume(drm, false);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index a0b5f1b16e8b..41b7c608c905 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -10,6 +10,8 @@
#include "nouveau_sched.h"
#include "nouveau_uvmm.h"
+#include <nvif/class.h>
+
/**
* DOC: Overview
*
@@ -131,7 +133,7 @@ nouveau_exec_job_run(struct nouveau_job *job)
struct nouveau_fence *fence = exec_job->fence;
int i, ret;
- ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
+ ret = nvif_chan_gpfifo_wait(&chan->chan, exec_job->push.count + 1, 16);
if (ret) {
NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
return ERR_PTR(ret);
@@ -141,9 +143,11 @@ nouveau_exec_job_run(struct nouveau_job *job)
struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
- nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
+ nvif_chan_gpfifo_push(&chan->chan, p->va, p->va_len, no_prefetch);
}
+ nvif_chan_gpfifo_post(&chan->chan);
+
ret = nouveau_fence_emit(fence);
if (ret) {
nouveau_fence_unref(&exec_job->fence);
@@ -375,10 +379,10 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (unlikely(atomic_read(&chan->killed)))
return nouveau_abi16_put(abi16, -ENODEV);
- if (!chan->dma.ib_max)
+ if (chan->user.oclass < NV50_CHANNEL_GPFIFO)
return nouveau_abi16_put(abi16, -ENOSYS);
- push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max);
+ push_max = nouveau_exec_push_max_from_ib_max(chan->chan.gpfifo.max);
if (unlikely(req->push_count > push_max)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->push_count, push_max);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index edddfc036c6d..6ded8c2b6d3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -184,10 +184,10 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_cli *cli = chan->cli;
struct nouveau_drm *drm = cli->drm;
struct nouveau_fence_priv *priv = (void*)drm->fence;
- struct {
- struct nvif_event_v0 base;
- struct nvif_chan_event_v0 host;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_chan_event_v0));
+ struct nvif_chan_event_v0 *host =
+ (struct nvif_chan_event_v0 *)args->data;
int ret;
INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
@@ -207,12 +207,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
if (!priv->uevent)
return;
- args.host.version = 0;
- args.host.type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
+ host->version = 0;
+ host->type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid,
nouveau_fence_wait_uevent_handler, false,
- &args.base, sizeof(args), &fctx->event);
+ args, __struct_size(args), &fctx->event);
WARN_ON(ret);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 8bc065acfe35..6a983dd9f7b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -83,6 +83,7 @@ void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
int nvc0_fence_create(struct nouveau_drm *);
+int gv100_fence_create(struct nouveau_drm *);
struct nv84_fence_chan {
struct nouveau_fence_chan base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 67e3c99de73a..690e10fbf0bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -850,8 +850,8 @@ revalidate:
}
}
- if (chan->dma.ib_max) {
- ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
+ if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
+ ret = nvif_chan_gpfifo_wait(&chan->chan, req->nr_push + 1, 16);
if (ret) {
NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
goto out;
@@ -864,8 +864,10 @@ revalidate:
u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
- nv50_dma_push(chan, addr, length, no_prefetch);
+ nvif_chan_gpfifo_push(&chan->chan, addr, length, no_prefetch);
}
+
+ nvif_chan_gpfifo_post(&chan->chan);
} else
if (drm->client.device.info.chipset >= 0x25) {
ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
@@ -958,7 +960,7 @@ out_prevalid:
u_free(push);
out_next:
- if (chan->dma.ib_max) {
+ if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index e12e2596ed84..6fa387da0637 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -720,10 +720,7 @@ nouveau_svm_fault(struct work_struct *work)
struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]);
struct nvif_object *device = &svm->drm->client.device.object;
struct nouveau_svmm *svmm;
- struct {
- struct nouveau_pfnmap_args i;
- u64 phys[1];
- } args;
+ DEFINE_RAW_FLEX(struct nouveau_pfnmap_args, args, p.phys, 1);
unsigned long hmm_flags;
u64 inst, start, limit;
int fi, fn;
@@ -772,11 +769,11 @@ nouveau_svm_fault(struct work_struct *work)
mutex_unlock(&svm->mutex);
/* Process list of faults. */
- args.i.i.version = 0;
- args.i.i.type = NVIF_IOCTL_V0_MTHD;
- args.i.m.version = 0;
- args.i.m.method = NVIF_VMM_V0_PFNMAP;
- args.i.p.version = 0;
+ args->i.version = 0;
+ args->i.type = NVIF_IOCTL_V0_MTHD;
+ args->m.version = 0;
+ args->m.method = NVIF_VMM_V0_PFNMAP;
+ args->p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
struct svm_notifier notifier;
@@ -802,9 +799,9 @@ nouveau_svm_fault(struct work_struct *work)
* fault window, determining required pages and access
* permissions based on pending faults.
*/
- args.i.p.addr = start;
- args.i.p.page = PAGE_SHIFT;
- args.i.p.size = PAGE_SIZE;
+ args->p.addr = start;
+ args->p.page = PAGE_SHIFT;
+ args->p.size = PAGE_SIZE;
/*
* Determine required permissions based on GPU fault
* access flags.
@@ -832,16 +829,16 @@ nouveau_svm_fault(struct work_struct *work)
notifier.svmm = svmm;
if (atomic)
- ret = nouveau_atomic_range_fault(svmm, svm->drm,
- &args.i, sizeof(args),
+ ret = nouveau_atomic_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
&notifier);
else
- ret = nouveau_range_fault(svmm, svm->drm, &args.i,
- sizeof(args), hmm_flags,
- &notifier);
+ ret = nouveau_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
+ hmm_flags, &notifier);
mmput(mm);
- limit = args.i.p.addr + args.i.p.size;
+ limit = args->p.addr + args->p.size;
for (fn = fi; ++fn < buffer->fault_nr; ) {
/* It's okay to skip over duplicate addresses from the
* same SVMM as faults are ordered by access type such
@@ -855,14 +852,14 @@ nouveau_svm_fault(struct work_struct *work)
if (buffer->fault[fn]->svmm != svmm ||
buffer->fault[fn]->addr >= limit ||
(buffer->fault[fi]->access == FAULT_ACCESS_READ &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e244927eb5d4..7d2436e5d50d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -312,8 +312,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->client.device.info.ram_user;
- arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
ret = nouveau_ttm_init_vram(drm);
if (ret) {
@@ -321,8 +321,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
/* GART init */
if (!drm->agp.bridge) {
@@ -357,7 +357,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
- arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ arch_io_free_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 8c73f40e3bda..40ee95340814 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -85,10 +85,8 @@ void
nv10_fence_destroy(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_fini(priv->bo);
+
+ nouveau_bo_unpin_del(&priv->bo);
drm->fence = NULL;
kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index d09bfd11369f..1b0c0aa3c305 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -130,20 +130,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
if (ret) {
nv10_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 62e28dddf87c..e1f0e8adf313 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -81,20 +81,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
if (ret) {
nv10_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index aa7dd0c5d917..1765b2cedaf9 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -185,10 +185,8 @@ static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_fini(priv->bo);
+
+ nouveau_bo_unpin_del(&priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -222,19 +220,8 @@ nv84_fence_create(struct nouveau_drm *drm)
* will lose CPU/GPU coherency!
*/
NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
- ret = nouveau_bo_new(&drm->client, 16 * drm->chan_total, 0,
- domain, 0, 0, NULL, NULL, &priv->bo);
- if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, domain, false);
- if (ret == 0) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
+ ret = nouveau_bo_new_map(&drm->client, domain, 16 * drm->chan_total, &priv->bo);
if (ret)
nv84_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index b7963a39dd91..198889c20ce1 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -14,6 +14,12 @@ nvif-y += nvif/outp.o
nvif-y += nvif/timer.o
nvif-y += nvif/vmm.o
+# Channel classes
+nvif-y += nvif/chan.o
+nvif-y += nvif/chan506f.o
+nvif-y += nvif/chan906f.o
+nvif-y += nvif/chanc36f.o
+
# Usermode classes
nvif-y += nvif/user.o
nvif-y += nvif/userc361.o
diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c
new file mode 100644
index 000000000000..baa10227d51a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+
+static void
+nvif_chan_gpfifo_push_kick(struct nvif_push *push)
+{
+ struct nvif_chan *chan = container_of(push, typeof(*chan), push);
+ u32 put = push->bgn - (u32 *)chan->push.mem.object.map.ptr;
+ u32 cnt;
+
+ if (chan->func->gpfifo.post) {
+ if (push->end - push->cur < chan->func->gpfifo.post_size)
+ push->end = push->cur + chan->func->gpfifo.post_size;
+
+ WARN_ON(nvif_chan_gpfifo_post(chan));
+ }
+
+ cnt = push->cur - push->bgn;
+
+ chan->func->gpfifo.push(chan, true, chan->push.addr + (put << 2), cnt << 2, false);
+ chan->func->gpfifo.kick(chan);
+}
+
+static int
+nvif_chan_gpfifo_push_wait(struct nvif_push *push, u32 push_nr)
+{
+ struct nvif_chan *chan = container_of(push, typeof(*chan), push);
+
+ return nvif_chan_gpfifo_wait(chan, 1, push_nr);
+}
+
+int
+nvif_chan_gpfifo_post(struct nvif_chan *chan)
+{
+ const u32 *map = chan->push.mem.object.map.ptr;
+ const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size;
+ const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+
+ return chan->func->gpfifo.post(chan, gpptr, pbptr);
+}
+
+void
+nvif_chan_gpfifo_push(struct nvif_chan *chan, u64 addr, u32 size, bool no_prefetch)
+{
+ chan->func->gpfifo.push(chan, false, addr, size, no_prefetch);
+}
+
+int
+nvif_chan_gpfifo_wait(struct nvif_chan *chan, u32 gpfifo_nr, u32 push_nr)
+{
+ struct nvif_push *push = &chan->push;
+ int ret = 0, time = 1000000;
+
+ if (gpfifo_nr) {
+ /* Account for pushbuf space needed by nvif_chan_gpfifo_post(),
+ * if used after pushing userspace GPFIFO entries.
+ */
+ if (chan->func->gpfifo.post)
+ push_nr += chan->func->gpfifo.post_size;
+ }
+
+ /* Account for the GPFIFO entry needed to submit pushbuf. */
+ if (push_nr)
+ gpfifo_nr++;
+
+ /* Wait for space in main push buffer. */
+ if (push->cur + push_nr > push->end) {
+ ret = nvif_chan_dma_wait(chan, push_nr);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for GPFIFO space. */
+ while (chan->gpfifo.free < gpfifo_nr) {
+ chan->gpfifo.free = chan->func->gpfifo.read_get(chan) - chan->gpfifo.cur - 1;
+ if (chan->gpfifo.free < 0)
+ chan->gpfifo.free += chan->gpfifo.max + 1;
+
+ if (chan->gpfifo.free < gpfifo_nr) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ }
+
+ return 0;
+}
+
+void
+nvif_chan_gpfifo_ctor(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, struct nvif_chan *chan)
+{
+ chan->func = func;
+
+ chan->userd.map.ptr = userd;
+
+ chan->gpfifo.map.ptr = gpfifo;
+ chan->gpfifo.max = (gpfifo_size >> 3) - 1;
+ chan->gpfifo.free = chan->gpfifo.max;
+
+ chan->push.mem.object.map.ptr = push;
+ chan->push.wait = nvif_chan_gpfifo_push_wait;
+ chan->push.kick = nvif_chan_gpfifo_push_kick;
+ chan->push.addr = push_addr;
+ chan->push.hw.max = push_size >> 2;
+ chan->push.bgn = chan->push.cur = chan->push.end = push;
+}
+
+int
+nvif_chan_dma_wait(struct nvif_chan *chan, u32 nr)
+{
+ struct nvif_push *push = &chan->push;
+ u32 cur = push->cur - (u32 *)push->mem.object.map.ptr;
+ u32 free, time = 1000000;
+
+ nr += chan->func->gpfifo.post_size;
+
+ do {
+ u32 get = chan->func->push.read_get(chan);
+
+ if (get <= cur) {
+ free = push->hw.max - cur;
+ if (free >= nr)
+ break;
+
+ PUSH_KICK(push);
+
+ while (get == 0) {
+ get = chan->func->push.read_get(chan);
+ if (get == 0) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ }
+
+ cur = 0;
+ }
+
+ free = get - cur - 1;
+
+ if (free < nr) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ } while (free < nr);
+
+ push->bgn = (u32 *)push->mem.object.map.ptr + cur;
+ push->cur = push->bgn;
+ push->end = push->bgn + free - chan->func->gpfifo.post_size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chan506f.c b/drivers/gpu/drm/nouveau/nvif/chan506f.c
new file mode 100644
index 000000000000..d3900887c4a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan506f.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+
+void
+nvif_chan506f_gpfifo_kick(struct nvif_chan *chan)
+{
+ wmb();
+ nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
+}
+
+void
+nvif_chan506f_gpfifo_push(struct nvif_chan *chan, bool main, u64 addr, u32 size, bool no_prefetch)
+{
+ u32 gpptr = chan->gpfifo.cur << 3;
+
+ if (WARN_ON(!chan->gpfifo.free))
+ return;
+
+ nvif_wr32(&chan->gpfifo, gpptr + 0, lower_32_bits(addr));
+ nvif_wr32(&chan->gpfifo, gpptr + 4, upper_32_bits(addr) |
+ (main ? 0 : BIT(9)) |
+ (size >> 2) << 10 |
+ (no_prefetch ? BIT(31) : 0));
+
+ chan->gpfifo.cur = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+ chan->gpfifo.free--;
+ if (!chan->gpfifo.free)
+ chan->push.end = chan->push.cur;
+}
+
+static u32
+nvif_chan506f_gpfifo_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->userd, 0x88);
+}
+
+static u32
+nvif_chan506f_read_get(struct nvif_chan *chan)
+{
+ u32 tlgetlo = nvif_rd32(&chan->userd, 0x58);
+ u32 tlgethi = nvif_rd32(&chan->userd, 0x5c);
+ struct nvif_push *push = &chan->push;
+
+ /* Update cached GET pointer if TOP_LEVEL_GET is valid. */
+ if (tlgethi & BIT(31)) {
+ u64 tlget = ((u64)(tlgethi & 0xff) << 32) | tlgetlo;
+
+ push->hw.get = (tlget - push->addr) >> 2;
+ }
+
+ return push->hw.get;
+}
+
+static const struct nvif_chan_func
+nvif_chan506f = {
+ .push.read_get = nvif_chan506f_read_get,
+ .gpfifo.read_get = nvif_chan506f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chan506f_gpfifo_kick,
+};
+
+int
+nvif_chan506f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size)
+{
+ nvif_chan_gpfifo_ctor(&nvif_chan506f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, chan);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chan906f.c b/drivers/gpu/drm/nouveau/nvif/chan906f.c
new file mode 100644
index 000000000000..c9cfb85179b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan906f.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+#include <nvif/user.h>
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl906f.h>
+
+/* Limits GPFIFO size to 1MiB, and "main" push buffer size to 64KiB. */
+#define NVIF_CHAN906F_PBPTR_BITS 15
+#define NVIF_CHAN906F_PBPTR_MASK ((1 << NVIF_CHAN906F_PBPTR_BITS) - 1)
+
+#define NVIF_CHAN906F_GPPTR_SHIFT NVIF_CHAN906F_PBPTR_BITS
+#define NVIF_CHAN906F_GPPTR_BITS (32 - NVIF_CHAN906F_PBPTR_BITS)
+#define NVIF_CHAN906F_GPPTR_MASK ((1 << NVIF_CHAN906F_GPPTR_BITS) - 1)
+
+#define NVIF_CHAN906F_SEM_RELEASE_SIZE 5
+
+static int
+nvif_chan906f_sem_release(struct nvif_chan *chan, u64 addr, u32 data)
+{
+ struct nvif_push *push = &chan->push;
+ int ret;
+
+ ret = PUSH_WAIT(push, NVIF_CHAN906F_SEM_RELEASE_SIZE);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV906F, SEMAPHOREA,
+ NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(addr)),
+
+ SEMAPHOREB, lower_32_bits(addr),
+
+ SEMAPHOREC, data,
+
+ SEMAPHORED,
+ NVDEF(NV906F, SEMAPHORED, OPERATION, RELEASE) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_WFI, DIS) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_SIZE, 16BYTE));
+
+ return 0;
+}
+
+int
+nvif_chan906f_gpfifo_post(struct nvif_chan *chan, u32 gpptr, u32 pbptr)
+{
+ return chan->func->sem.release(chan, chan->sema.addr,
+ (gpptr << NVIF_CHAN906F_GPPTR_SHIFT) | pbptr);
+}
+
+u32
+nvif_chan906f_gpfifo_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->sema, 0) >> NVIF_CHAN906F_GPPTR_SHIFT;
+}
+
+u32
+nvif_chan906f_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->sema, 0) & NVIF_CHAN906F_PBPTR_MASK;
+}
+
+static const struct nvif_chan_func
+nvif_chan906f = {
+ .push.read_get = nvif_chan906f_read_get,
+ .gpfifo.read_get = nvif_chan906f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chan506f_gpfifo_kick,
+ .gpfifo.post = nvif_chan906f_gpfifo_post,
+ .gpfifo.post_size = NVIF_CHAN906F_SEM_RELEASE_SIZE,
+ .sem.release = nvif_chan906f_sem_release,
+};
+
+int
+nvif_chan906f_ctor_(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_chan *chan)
+{
+ nvif_chan_gpfifo_ctor(func, userd, gpfifo, gpfifo_size, push, push_addr, push_size, chan);
+ chan->sema.map.ptr = sema;
+ chan->sema.addr = sema_addr;
+ return 0;
+}
+
+int
+nvif_chan906f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr)
+{
+ return nvif_chan906f_ctor_(&nvif_chan906f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, sema, sema_addr, chan);
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chanc36f.c b/drivers/gpu/drm/nouveau/nvif/chanc36f.c
new file mode 100644
index 000000000000..ca02b939c3fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chanc36f.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+#include <nvif/user.h>
+
+#include <nvif/push906f.h>
+#include <nvhw/class/clc36f.h>
+
+static void
+nvif_chanc36f_gpfifo_kick(struct nvif_chan *chan)
+{
+ struct nvif_user *usermode = chan->usermode;
+
+ nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
+
+ wmb(); /* ensure CPU writes are flushed to BAR1 */
+ nvif_rd32(&chan->userd, 0); /* ensure BAR1 writes are flushed to vidmem */
+
+ usermode->func->doorbell(usermode, chan->doorbell_token);
+}
+
+#define NVIF_CHANC36F_SEM_RELEASE_SIZE 6
+
+static int
+nvif_chanc36f_sem_release(struct nvif_chan *chan, u64 addr, u32 data)
+{
+ struct nvif_push *push = &chan->push;
+ int ret;
+
+ ret = PUSH_WAIT(push, NVIF_CHANC36F_SEM_RELEASE_SIZE);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(addr),
+
+ SEM_ADDR_HI, upper_32_bits(addr),
+
+ SEM_PAYLOAD_LO, data);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, DIS) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+
+ return 0;
+}
+
+static const struct nvif_chan_func
+nvif_chanc36f = {
+ .push.read_get = nvif_chan906f_read_get,
+ .gpfifo.read_get = nvif_chan906f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chanc36f_gpfifo_kick,
+ .gpfifo.post = nvif_chan906f_gpfifo_post,
+ .gpfifo.post_size = NVIF_CHANC36F_SEM_RELEASE_SIZE,
+ .sem.release = nvif_chanc36f_sem_release,
+};
+
+int
+nvif_chanc36f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_user *usermode, u32 doorbell_token)
+{
+ int ret;
+
+ ret = nvif_chan906f_ctor_(&nvif_chanc36f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, sema, sema_addr, chan);
+ if (ret)
+ return ret;
+
+ chan->usermode = usermode;
+ chan->doorbell_token = doorbell_token;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/conn.c b/drivers/gpu/drm/nouveau/nvif/conn.c
index 9ee18cb99264..5a1a83c62a2a 100644
--- a/drivers/gpu/drm/nouveau/nvif/conn.c
+++ b/drivers/gpu/drm/nouveau/nvif/conn.c
@@ -30,17 +30,17 @@ int
nvif_conn_event_ctor(struct nvif_conn *conn, const char *name, nvif_event_func func, u8 types,
struct nvif_event *event)
{
- struct {
- struct nvif_event_v0 base;
- struct nvif_conn_event_v0 conn;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_conn_event_v0));
+ struct nvif_conn_event_v0 *args_conn =
+ (struct nvif_conn_event_v0 *)args->data;
int ret;
- args.conn.version = 0;
- args.conn.types = types;
+ args_conn->version = 0;
+ args_conn->types = types;
ret = nvif_event_ctor_(&conn->object, name ?: "nvifConnHpd", nvif_conn_id(conn),
- func, true, &args.base, sizeof(args), false, event);
+ func, true, args, __struct_size(args), false, event);
NVIF_DEBUG(&conn->object, "[NEW EVENT:HPD types:%02x]", types);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 14da22fa3b5b..fa42146252da 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -36,6 +36,7 @@ int
nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { GB202_DISP, 0 },
{ AD102_DISP, 0 },
{ GA102_DISP, 0 },
{ TU102_DISP, 0 },
diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
index 6daeb7f0b09b..32f6c5eb92af 100644
--- a/drivers/gpu/drm/nouveau/nvif/outp.c
+++ b/drivers/gpu/drm/nouveau/nvif/outp.c
@@ -195,20 +195,17 @@ nvif_outp_dp_aux_pwr(struct nvif_outp *outp, bool enable)
int
nvif_outp_hda_eld(struct nvif_outp *outp, int head, void *data, u32 size)
{
- struct {
- struct nvif_outp_hda_eld_v0 mthd;
- u8 data[128];
- } args;
+ DEFINE_RAW_FLEX(struct nvif_outp_hda_eld_v0, mthd, data, 128);
int ret;
- if (WARN_ON(size > ARRAY_SIZE(args.data)))
+ if (WARN_ON(size > __member_size(mthd->data)))
return -EINVAL;
- args.mthd.version = 0;
- args.mthd.head = head;
+ mthd->version = 0;
+ mthd->head = head;
- memcpy(args.data, data, size);
- ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, &args, sizeof(args.mthd) + size);
+ memcpy(mthd->data, data, size);
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, mthd, sizeof(*mthd) + size);
NVIF_ERRON(ret, &outp->object, "[HDA_ELD head:%d size:%d]", head, size);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/user.c b/drivers/gpu/drm/nouveau/nvif/user.c
index b648a5e036af..53f03fa1c9c2 100644
--- a/drivers/gpu/drm/nouveau/nvif/user.c
+++ b/drivers/gpu/drm/nouveau/nvif/user.c
@@ -41,9 +41,11 @@ nvif_user_ctor(struct nvif_device *device, const char *name)
int version;
const struct nvif_user_func *func;
} users[] = {
- { AMPERE_USERMODE_A, -1, &nvif_userc361 },
- { TURING_USERMODE_A, -1, &nvif_userc361 },
- { VOLTA_USERMODE_A, -1, &nvif_userc361 },
+ { BLACKWELL_USERMODE_A, -1, &nvif_userc361 },
+ { HOPPER_USERMODE_A, -1, &nvif_userc361 },
+ { AMPERE_USERMODE_A, -1, &nvif_userc361 },
+ { TURING_USERMODE_A, -1, &nvif_userc361 },
+ { VOLTA_USERMODE_A, -1, &nvif_userc361 },
{}
};
int cid, ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 2e48b0816670..ddcf8782d6b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -17,8 +17,6 @@ include $(src)/nvkm/engine/msppp/Kbuild
include $(src)/nvkm/engine/msvld/Kbuild
include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild
-include $(src)/nvkm/engine/nvjpg/Kbuild
-include $(src)/nvkm/engine/ofa/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sec2/Kbuild
include $(src)/nvkm/engine/sw/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 165d61fc5d6c..9754bac65df7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -10,5 +10,4 @@ nvkm-y += nvkm/engine/ce/gv100.o
nvkm-y += nvkm/engine/ce/tu102.o
nvkm-y += nvkm/engine/ce/ga100.o
nvkm-y += nvkm/engine/ce/ga102.o
-
-nvkm-y += nvkm/engine/ce/r535.o
+nvkm-y += nvkm/engine/ce/gb202.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
index 9427a592bd16..1c0c60138706 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
@@ -90,7 +90,7 @@ ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&ga100_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
index ce56ede7c2e9..9359c5e7aa3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
@@ -44,7 +44,7 @@ ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&ga102_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c
new file mode 100644
index 000000000000..37c3c619c71b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb202/dev_ce.h>
+
+u32
+gb202_ce_grce_mask(struct nvkm_device *device)
+{
+ u32 data = nvkm_rd32(device, NV_CE_GRCE_MASK);
+
+ return NVVAL_GET(data, NV_CE, GRCE_MASK, VALUE);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index 806a76a72249..34fd2657134b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -16,4 +16,6 @@ int ga100_ce_oneinit(struct nvkm_engine *);
int ga100_ce_init(struct nvkm_engine *);
int ga100_ce_fini(struct nvkm_engine *, bool);
int ga100_ce_nonstall(struct nvkm_engine *);
+
+u32 gb202_ce_grce_mask(struct nvkm_device *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
deleted file mode 100644
index bd0d435dbbd3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h>
-
-struct r535_ce_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ce_obj_dtor(struct nvkm_object *object)
-{
- struct r535_ce_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_ce_obj = {
- .dtor = r535_ce_obj_dtor,
-};
-
-static int
-r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_ce_obj *obj;
- NVC0B5_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->version = 1;
- args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ce_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_ce_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_ce_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index 7c8647dcb349..67d0545cf902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -40,7 +40,7 @@ tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&tu102_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 9093d89b16f3..3375a59ebf1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2529,9 +2529,6 @@ nv170_chipset = {
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x000003ff, ga100_ce_new },
.fifo = { 0x00000001, ga100_fifo_new },
- .nvdec = { 0x0000001f, ga100_nvdec_new },
- .nvjpg = { 0x00000001, ga100_nvjpg_new },
- .ofa = { 0x00000001, ga100_ofa_new },
};
static const struct nvkm_device_chip
@@ -2561,8 +2558,6 @@ nv172_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2593,8 +2588,6 @@ nv173_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2625,8 +2618,6 @@ nv174_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2657,8 +2648,6 @@ nv176_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2689,12 +2678,26 @@ nv177_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
+nv180_chipset = {
+ .name = "GH100",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gh100_fb_new },
+ .fsp = { 0x00000001, gh100_fsp_new },
+ .gsp = { 0x00000001, gh100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv192_chipset = {
.name = "AD102",
.bar = { 0x00000001, tu102_bar_new },
@@ -2709,14 +2712,9 @@ nv192_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2735,14 +2733,9 @@ nv193_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2761,14 +2754,9 @@ nv194_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2787,14 +2775,9 @@ nv196_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2813,17 +2796,122 @@ nv197_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
+static const struct nvkm_device_chip
+nv1a0_chipset = {
+ .name = "GB100",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb100_fb_new },
+ .fsp = { 0x00000001, gb100_fsp_new },
+ .gsp = { 0x00000001, gb100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1a2_chipset = {
+ .name = "GB102",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb100_fb_new },
+ .fsp = { 0x00000001, gb100_fsp_new },
+ .gsp = { 0x00000001, gb100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b2_chipset = {
+ .name = "GB202",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b3_chipset = {
+ .name = "GB203",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b5_chipset = {
+ .name = "GB205",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b6_chipset = {
+ .name = "GB206",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b7_chipset = {
+ .name = "GB207",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
struct nvkm_subdev *
nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
@@ -3065,8 +3153,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
device->debug = nvkm_dbgopt(device->dbgopt, "device");
INIT_LIST_HEAD(&device->subdev);
- mmio_base = device->func->resource_addr(device, 0);
- mmio_size = device->func->resource_size(device, 0);
+ mmio_base = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ mmio_size = device->func->resource_size(device, NVKM_BAR0_PRI);
device->pri = ioremap(mmio_base, mmio_size);
if (device->pri == NULL) {
@@ -3139,7 +3227,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
case 0x170: device->card_type = GA100; break;
+ case 0x180: device->card_type = GH100; break;
case 0x190: device->card_type = AD100; break;
+ case 0x1a0: device->card_type = GB10x; break;
+ case 0x1b0: device->card_type = GB20x; break;
default:
break;
}
@@ -3242,11 +3333,19 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
+ case 0x180: device->chip = &nv180_chipset; break;
case 0x192: device->chip = &nv192_chipset; break;
case 0x193: device->chip = &nv193_chipset; break;
case 0x194: device->chip = &nv194_chipset; break;
case 0x196: device->chip = &nv196_chipset; break;
case 0x197: device->chip = &nv197_chipset; break;
+ case 0x1a0: device->chip = &nv1a0_chipset; break;
+ case 0x1a2: device->chip = &nv1a2_chipset; break;
+ case 0x1b2: device->chip = &nv1b2_chipset; break;
+ case 0x1b3: device->chip = &nv1b3_chipset; break;
+ case 0x1b5: device->chip = &nv1b5_chipset; break;
+ case 0x1b6: device->chip = &nv1b6_chipset; break;
+ case 0x1b7: device->chip = &nv1b7_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 3ff6436007fa..8f0261a0d618 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1560,18 +1560,42 @@ nvkm_device_pci(struct nvkm_device *device)
return container_of(device, struct nvkm_device_pci, device);
}
+static int
+nvkm_device_pci_resource_idx(struct nvkm_device_pci *pdev, enum nvkm_bar_id bar)
+{
+ int idx = 0;
+
+ if (bar == NVKM_BAR0_PRI)
+ return idx;
+
+ idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
+ if (bar == NVKM_BAR1_FB)
+ return idx;
+
+ idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
+ if (bar == NVKM_BAR2_INST)
+ return idx;
+
+ WARN_ON(1);
+ return -1;
+}
+
static resource_size_t
-nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
+nvkm_device_pci_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
- return pci_resource_start(pdev->pdev, bar);
+ int idx = nvkm_device_pci_resource_idx(pdev, bar);
+
+ return idx >= 0 ? pci_resource_start(pdev->pdev, idx) : 0;
}
static resource_size_t
-nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
+nvkm_device_pci_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
- return pci_resource_len(pdev->pdev, bar);
+ int idx = nvkm_device_pci_resource_idx(pdev, bar);
+
+ return idx >= 0 ? pci_resource_len(pdev->pdev, idx) : 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index e42b18820a95..75ee7506d443 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -11,6 +11,7 @@
#include <subdev/devinit.h>
#include <subdev/fault.h>
#include <subdev/fb.h>
+#include <subdev/fsp.h>
#include <subdev/fuse.h>
#include <subdev/gpio.h>
#include <subdev/gsp.h>
@@ -43,8 +44,6 @@
#include <engine/msvld.h>
#include <engine/nvenc.h>
#include <engine/nvdec.h>
-#include <engine/nvjpg.h>
-#include <engine/ofa.h>
#include <engine/sec.h>
#include <engine/sec2.h>
#include <engine/sw.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 78a83f904bbd..114e50ca1827 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -186,21 +186,31 @@ nvkm_device_tegra(struct nvkm_device *device)
}
static struct resource *
-nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
- return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
+ int idx;
+
+ switch (bar) {
+ case NVKM_BAR0_PRI: idx = 0; break;
+ case NVKM_BAR1_FB : idx = 1; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return platform_get_resource(tdev->pdev, IORESOURCE_MEM, idx);
}
static resource_size_t
-nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? res->start : 0;
}
static resource_size_t
-nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? resource_size(res) : 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index d7f75b3a43c8..58191b7a0494 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -148,6 +148,9 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break;
+ case GH100: args->v0.family = NV_DEVICE_INFO_V0_HOPPER; break;
+ case GB10x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break;
+ case GB20x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break;
default:
args->v0.family = 0;
break;
@@ -209,8 +212,8 @@ nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = device->func->resource_addr(device, 0);
- *size = device->func->resource_size(device, 0);
+ *addr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ *size = device->func->resource_size(device, NVKM_BAR0_PRI);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index e346e924fee8..e1aecd3fe96c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -27,9 +27,6 @@ nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o
nvkm-y += nvkm/engine/disp/ga102.o
-nvkm-y += nvkm/engine/disp/ad102.o
-
-nvkm-y += nvkm/engine/disp/r535.o
nvkm-y += nvkm/engine/disp/udisp.o
nvkm-y += nvkm/engine/disp/uconn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
deleted file mode 100644
index 7f300a79aa29..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-#include "chan.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_disp_func
-ad102_disp = {
- .uevent = &gv100_disp_chan_uevent,
- .ramht_size = 0x2000,
- .root = { 0, 0,AD102_DISP },
- .user = {
- {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
- {{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs },
- {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
- {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core },
- {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw },
- {}
- },
-};
-
-int
-ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_disp **pdisp)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_disp_new(&ad102_disp, device, type, inst, pdisp);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
index 4e43ee383c34..9b84e357d354 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
@@ -49,7 +49,7 @@ nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
- const u64 base = device->func->resource_addr(device, 0);
+ const u64 base = device->func->resource_addr(device, NVKM_BAR0_PRI);
*type = NVKM_OBJECT_MAP_IO;
*addr = base + chan->func->user(chan, size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index cfa3698d3a2f..614921166fba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -805,7 +805,7 @@ gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
struct gv100_disp_caps *caps = gv100_disp_caps(object);
struct nvkm_device *device = caps->disp->engine.subdev.device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = 0x640000 + device->func->resource_addr(device, 0);
+ *addr = 0x640000 + device->func->resource_addr(device, NVKM_BAR0_PRI);
*size = 0x1000;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index aff92848abfe..376e9c3bcb1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -25,8 +25,7 @@ nvkm-y += nvkm/engine/fifo/gv100.o
nvkm-y += nvkm/engine/fifo/tu102.o
nvkm-y += nvkm/engine/fifo/ga100.o
nvkm-y += nvkm/engine/fifo/ga102.o
-
-nvkm-y += nvkm/engine/fifo/r535.o
+nvkm-y += nvkm/engine/fifo/gb202.o
nvkm-y += nvkm/engine/fifo/ucgrp.o
nvkm-y += nvkm/engine/fifo/uchan.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 22443fe4a39f..fdffa0391b31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -303,7 +303,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
}
/* Allocate USERD + BAR1 polling area. */
- if (fifo->func->chan.func->userd->bar == 1) {
+ if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) {
struct nvkm_vmm *bar1 = nvkm_bar_bar1_vmm(device);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr *
@@ -349,8 +349,6 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_chid_unref(&fifo->cgid);
nvkm_chid_unref(&fifo->chid);
- mutex_destroy(&fifo->userd.mutex);
-
nvkm_event_fini(&fifo->nonstall.event);
mutex_destroy(&fifo->mutex);
@@ -391,8 +389,5 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
spin_lock_init(&fifo->lock);
mutex_init(&fifo->mutex);
- INIT_LIST_HEAD(&fifo->userd.list);
- mutex_init(&fifo->userd.mutex);
-
return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 7d4716dcd512..4e09985424b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -275,11 +275,7 @@ nvkm_chan_del(struct nvkm_chan **pchan)
nvkm_gpuobj_del(&chan->ramfc);
if (chan->cgrp) {
- if (!chan->func->id_put)
- nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
- else
- chan->func->id_put(chan);
-
+ nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
nvkm_cgrp_unref(&chan->cgrp);
}
@@ -359,14 +355,14 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
/* Validate arguments against class requirements. */
if ((runq && runq >= runl->func->runqs) ||
(!func->inst->vmm != !vmm) ||
- ((func->userd->bar < 0) == !userd) ||
+ (!func->userd->bar == !userd) ||
(!func->ramfc->ctxdma != !dmaobj) ||
((func->ramfc->devm < devm) && devm != BIT(0)) ||
(!func->ramfc->priv && priv)) {
RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
"push:%d:%p devm:%08x:%08x priv:%d:%d",
runl->func->runqs, runq, func->inst->vmm, vmm,
- func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
+ func->userd->bar, userd, func->ramfc->ctxdma, dmaobj,
func->ramfc->devm, devm, func->ramfc->priv, priv);
return -EINVAL;
}
@@ -441,30 +437,26 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
}
/* Allocate channel ID. */
- if (!chan->func->id_get) {
- chan->id = nvkm_chid_get(runl->chid, chan);
- if (chan->id >= 0) {
- if (func->userd->bar < 0) {
- if (ouserd + chan->func->userd->size >=
- nvkm_memory_size(userd)) {
- RUNL_DEBUG(runl, "ouserd %llx", ouserd);
- return -EINVAL;
- }
-
- ret = nvkm_memory_kmap(userd, &chan->userd.mem);
- if (ret) {
- RUNL_DEBUG(runl, "userd %d", ret);
- return ret;
- }
-
- chan->userd.base = ouserd;
- } else {
- chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
- chan->userd.base = chan->id * chan->func->userd->size;
+ chan->id = nvkm_chid_get(runl->chid, chan);
+ if (chan->id >= 0) {
+ if (!func->userd->bar) {
+ if (ouserd + chan->func->userd->size >=
+ nvkm_memory_size(userd)) {
+ RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_kmap(userd, &chan->userd.mem);
+ if (ret) {
+ RUNL_DEBUG(runl, "userd %d", ret);
+ return ret;
}
+
+ chan->userd.base = ouserd;
+ } else {
+ chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
+ chan->userd.base = chan->id * chan->func->userd->size;
}
- } else {
- chan->id = chan->func->id_get(chan, userd, ouserd);
}
if (chan->id < 0) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 013682a709d5..445db5dfd1e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -17,9 +17,6 @@ struct nvkm_cctx {
};
struct nvkm_chan_func {
- int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd);
- void (*id_put)(struct nvkm_chan *);
-
const struct nvkm_chan_func_inst {
u32 size;
bool zero;
@@ -27,7 +24,7 @@ struct nvkm_chan_func {
} *inst;
const struct nvkm_chan_func_userd {
- int bar;
+ enum nvkm_bar_id bar;
u32 base;
u32 size;
void (*clear)(struct nvkm_chan *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c
new file mode 100644
index 000000000000..b469e8afeb0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "runl.h"
+
+u32
+gb202_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return BIT(30) | (chan->cgrp->runl->id << 16) | chan->id;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 6c94451d0faa..e4a4fad2eafc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -133,7 +133,7 @@ gf100_chan_userd_clear(struct nvkm_chan *chan)
static const struct nvkm_chan_func_userd
gf100_chan_userd = {
- .bar = 1,
+ .bar = NVKM_BAR1_FB,
.size = 0x1000,
.clear = gf100_chan_userd_clear,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index d8a4d773a58c..5655eda52a7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -113,7 +113,7 @@ gk104_chan_ramfc = {
const struct nvkm_chan_func_userd
gk104_chan_userd = {
- .bar = 1,
+ .bar = NVKM_BAR1_FB,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
@@ -745,7 +745,7 @@ gk104_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
- if (fifo->func->chan.func->userd->bar == 1)
+ if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB)
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 33066c8cdc64..d7f046c03cfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -70,7 +70,6 @@ gv100_chan_ramfc = {
const struct nvkm_chan_func_userd
gv100_chan_userd = {
- .bar = -1,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 674faf002b20..c4b8e567d86f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -154,7 +154,7 @@ nv04_chan_ramfc = {
const struct nvkm_chan_func_userd
nv04_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0x800000,
.size = 0x010000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index e50a94b6d7f8..084ca5561ee1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -93,7 +93,7 @@ nv40_chan_ramfc = {
static const struct nvkm_chan_func_userd
nv40_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0xc00000,
.size = 0x001000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index 954b5f3a7d57..7bf77661157d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -124,7 +124,7 @@ nv50_chan_ramfc = {
const struct nvkm_chan_func_userd
nv50_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0xc00000,
.size = 0x002000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index a0f3277605a5..5e81ae195329 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -6,6 +6,7 @@
#include <core/enum.h>
struct nvkm_cctx;
struct nvkm_cgrp;
+struct nvkm_chan;
struct nvkm_engn;
struct nvkm_memory;
struct nvkm_runl;
@@ -195,6 +196,7 @@ extern const struct nvkm_chan_func_ramfc gv100_chan_ramfc;
void tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *, u32 info);
extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault;
+u32 tu102_chan_doorbell_handle(struct nvkm_chan *);
int ga100_fifo_runl_ctor(struct nvkm_fifo *);
int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
@@ -206,6 +208,8 @@ extern const struct nvkm_engn_func ga100_engn_ce;
extern const struct nvkm_cgrp_func ga100_cgrp;
extern const struct nvkm_chan_func ga100_chan;
+u32 gb202_chan_doorbell_handle(struct nvkm_chan *);
+
int nvkm_uchan_new(struct nvkm_fifo *, struct nvkm_cgrp *, const struct nvkm_oclass *,
void *argv, u32 argc, struct nvkm_object **);
int nvkm_ucgrp_new(struct nvkm_fifo *, const struct nvkm_oclass *, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 1d39a6840a40..c5a03298e88c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -31,7 +31,7 @@
#include <nvif/class.h>
-static u32
+u32
tu102_chan_doorbell_handle(struct nvkm_chan *chan)
{
return (chan->cgrp->runl->id << 16) | chan->id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
index 9e56bcc166ed..52420a1edca5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
@@ -258,7 +258,7 @@ nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
- if (chan->func->userd->bar < 0)
+ if (!chan->func->userd->bar)
return -ENOSYS;
*type = NVKM_OBJECT_MAP_IO;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 1555f8c40b4f..b5418f05ccd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -41,9 +41,6 @@ nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/gv100.o
nvkm-y += nvkm/engine/gr/tu102.o
nvkm-y += nvkm/engine/gr/ga102.o
-nvkm-y += nvkm/engine/gr/ad102.o
-
-nvkm-y += nvkm/engine/gr/r535.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
index d285c597aff9..2b51f1d0c281 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
@@ -352,7 +352,7 @@ int
ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&ga102_gr, device, type, inst, pgr);
+ return -ENODEV;
return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index b0e0c9305034..54f686ba39ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -445,6 +445,4 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
-int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 02a8c62a0a32..13407fafe947 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -297,7 +297,7 @@ nv20_gr_init(struct nvkm_gr *base)
nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
/* begin RAM config */
- vramsz = device->func->resource_size(device, 1) - 1;
+ vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index a5e1f02791b4..b609b0150ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -386,7 +386,7 @@ nv40_gr_init(struct nvkm_gr *base)
}
/* begin RAM config */
- vramsz = device->func->resource_size(device, 1) - 1;
+ vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
switch (device->chipset) {
case 0x40:
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
deleted file mode 100644
index f4bed3eb1ec2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gf100.h"
-
-#include <core/memory.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu/vmm.h>
-#include <engine/fifo/priv.h>
-
-#include <nvif/if900d.h>
-
-#include <nvhw/drf.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-
-#define r535_gr(p) container_of((p), struct r535_gr, base)
-
-#define R515_GR_MAX_CTXBUFS 9
-
-struct r535_gr {
- struct nvkm_gr base;
-
- struct {
- u16 bufferId;
- u32 size;
- u8 page;
- u8 align;
- bool global;
- bool init;
- bool ro;
- } ctxbuf[R515_GR_MAX_CTXBUFS];
- int ctxbuf_nr;
-
- struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_chan {
- struct nvkm_object object;
- struct r535_gr *gr;
-
- struct nvkm_vmm *vmm;
- struct nvkm_chan *chan;
-
- struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
- struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_gr_obj_dtor(struct nvkm_object *object)
-{
- struct r535_gr_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_gr_obj = {
- .dtor = r535_gr_obj_dtor,
-};
-
-static int
-r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
- struct r535_gr_obj *obj;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0,
- &obj->rm);
-}
-
-static void *
-r535_gr_chan_dtor(struct nvkm_object *object)
-{
- struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
- struct r535_gr *gr = grc->gr;
-
- for (int i = 0; i < gr->ctxbuf_nr; i++) {
- nvkm_vmm_put(grc->vmm, &grc->vma[i]);
- nvkm_memory_unref(&grc->mem[i]);
- }
-
- nvkm_vmm_unref(&grc->vmm);
- return grc;
-}
-
-static const struct nvkm_object_func
-r535_gr_chan = {
- .dtor = r535_gr_chan_dtor,
-};
-
-static int
-r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
- struct nvkm_memory **pmem, struct nvkm_vma **pvma,
- struct nvkm_gsp_object *chan)
-{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
- NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
- if (WARN_ON(IS_ERR(ctrl)))
- return PTR_ERR(ctrl);
-
- ctrl->engineType = 1;
- ctrl->hChanClient = vmm->rm.client.object.handle;
- ctrl->hObject = chan->handle;
-
- for (int i = 0; i < gr->ctxbuf_nr; i++) {
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
- &ctrl->promoteEntry[ctrl->entryCount];
- const bool alloc = golden || !gr->ctxbuf[i].global;
- int ret;
-
- entry->bufferId = gr->ctxbuf[i].bufferId;
- entry->bInitialize = gr->ctxbuf[i].init && alloc;
-
- if (alloc) {
- ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
- NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
- gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
- gr->ctxbuf[i].init, &pmem[i]);
- if (WARN_ON(ret))
- return ret;
-
- if (gr->ctxbuf[i].bufferId ==
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
- entry->bNonmapped = 1;
- } else {
- if (gr->ctxbuf[i].bufferId ==
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
- continue;
-
- pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
- }
-
- if (!entry->bNonmapped) {
- struct gf100_vmm_map_v0 args = {
- .priv = 1,
- .ro = gr->ctxbuf[i].ro,
- };
-
- mutex_lock(&vmm->mutex.vmm);
- ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
- nvkm_memory_size(pmem[i]), &pvma[i]);
- mutex_unlock(&vmm->mutex.vmm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
- if (ret)
- return ret;
-
- entry->gpuVirtAddr = pvma[i]->addr;
- }
-
- if (entry->bInitialize) {
- entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
- entry->size = gr->ctxbuf[i].size;
- entry->physAttr = 4;
- }
-
- nvkm_debug(subdev,
- "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
- entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
- entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
-
- ctrl->entryCount++;
- }
-
- return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
-}
-
-static int
-r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
- struct nvkm_object **pobject)
-{
- struct r535_gr *gr = r535_gr(base);
- struct r535_gr_chan *grc;
- int ret;
-
- if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
- grc->gr = gr;
- grc->vmm = nvkm_vmm_ref(chan->vmm);
- grc->chan = chan;
- *pobject = &grc->object;
-
- ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static u64
-r535_gr_units(struct nvkm_gr *gr)
-{
- struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
-
- return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
-}
-
-static int
-r535_gr_oneinit(struct nvkm_gr *base)
-{
- NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
- struct r535_gr *gr = container_of(base, typeof(*gr), base);
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_gsp *gsp = device->gsp;
- struct nvkm_mmu *mmu = device->mmu;
- struct {
- struct nvkm_memory *inst;
- struct nvkm_vmm *vmm;
- struct nvkm_gsp_object chan;
- struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
- } golden = {};
- int ret;
-
- /* Allocate a channel to use for golden context init. */
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
- if (ret)
- goto done;
-
- ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
- if (ret)
- goto done;
-
- ret = mmu->func->promote_vmm(golden.vmm);
- if (ret)
- goto done;
-
- {
- NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
-
- args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000,
- device->fifo->func->chan.user.oclass,
- sizeof(*args), &golden.chan);
- if (IS_ERR(args)) {
- ret = PTR_ERR(args);
- goto done;
- }
-
- args->gpFifoOffset = 0;
- args->gpFifoEntries = 0x1000 / 8;
- args->flags =
- NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) |
- NVDEF(NVOS04, FLAGS, VPR, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) |
- NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) |
- NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) |
- NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) |
- NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) |
- NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) |
- NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) |
- NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) |
- NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) |
- NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) |
- NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) |
- NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
- args->hVASpace = golden.vmm->rm.object.handle;
- args->engineType = 1;
- args->instanceMem.base = nvkm_memory_addr(golden.inst);
- args->instanceMem.size = 0x1000;
- args->instanceMem.addressSpace = 2;
- args->instanceMem.cacheAttrib = 1;
- args->ramfcMem.base = nvkm_memory_addr(golden.inst);
- args->ramfcMem.size = 0x200;
- args->ramfcMem.addressSpace = 2;
- args->ramfcMem.cacheAttrib = 1;
- args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000;
- args->userdMem.size = 0x200;
- args->userdMem.addressSpace = 2;
- args->userdMem.cacheAttrib = 1;
- args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000;
- args->mthdbufMem.size = 0x5000;
- args->mthdbufMem.addressSpace = 2;
- args->mthdbufMem.cacheAttrib = 1;
- args->internalFlags =
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) |
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) |
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
-
- ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args);
- if (ret)
- goto done;
- }
-
- /* Fetch context buffer info from RM and allocate each of them here to use
- * during golden context init (or later as a global context buffer).
- *
- * Also build the information that'll be used to create channel contexts.
- */
- info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
- NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
- sizeof(*info));
- if (WARN_ON(IS_ERR(info))) {
- ret = PTR_ERR(info);
- goto done;
- }
-
- for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) {
- static const struct {
- u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
- u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
- bool global;
- bool init;
- bool ro;
- } map[] = {
-#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
- .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
- .global = (G), .init = (I), .ro = (R) }
-#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
- /* global init ro */
- _A( GRAPHICS, MAIN, false, true, false),
- _B( PATCH, false, true, false),
- _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
- _B( PAGEPOOL, true, false, false),
- _B( ATTRIBUTE_CB, true, false, false),
- _B( RTV_CB_GLOBAL, true, false, false),
- _B( FECS_EVENT, true, true, false),
- _B( PRIV_ACCESS_MAP, true, true, true),
-#undef _B
-#undef _A
- };
- u32 size = info->engineContextBuffersInfo[0].engine[i].size;
- u8 align, page;
- int id;
-
- for (id = 0; id < ARRAY_SIZE(map); id++) {
- if (map[id].id0 == i)
- break;
- }
-
- nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
- size, (id < ARRAY_SIZE(map)) ? "*" : "");
- if (id >= ARRAY_SIZE(map))
- continue;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
- size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
-
- if (size >= 1 << 21) page = 21;
- else if (size >= 1 << 16) page = 16;
- else page = 12;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
- align = order_base_2(size);
- else
- align = page;
-
- if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
- continue;
-
- gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
- gr->ctxbuf[gr->ctxbuf_nr].size = size;
- gr->ctxbuf[gr->ctxbuf_nr].page = page;
- gr->ctxbuf[gr->ctxbuf_nr].align = align;
- gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
- gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
- gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
- gr->ctxbuf_nr++;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
- if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
- continue;
-
- gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
- gr->ctxbuf[gr->ctxbuf_nr].bufferId =
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
- gr->ctxbuf_nr++;
- }
- }
-
- nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
-
- /* Promote golden context to RM. */
- ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
- if (ret)
- goto done;
-
- /* Allocate 3D class on channel to trigger golden context init in RM. */
- {
- int i;
-
- for (i = 0; gr->base.func->sclass[i].ctor; i++) {
- if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) {
- struct nvkm_gsp_object threed;
-
- ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000,
- gr->base.func->sclass[i].oclass, 0,
- &threed);
- if (ret)
- goto done;
-
- nvkm_gsp_rm_free(&threed);
- break;
- }
- }
-
- if (WARN_ON(!gr->base.func->sclass[i].ctor)) {
- ret = -EINVAL;
- goto done;
- }
- }
-
-done:
- nvkm_gsp_rm_free(&golden.chan);
- for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
- nvkm_vmm_put(golden.vmm, &golden.vma[i]);
- nvkm_vmm_unref(&golden.vmm);
- nvkm_memory_unref(&golden.inst);
- return ret;
-
-}
-
-static void *
-r535_gr_dtor(struct nvkm_gr *base)
-{
- struct r535_gr *gr = r535_gr(base);
-
- while (gr->ctxbuf_nr)
- nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
-
- kfree(gr->base.func);
- return gr;
-}
-
-int
-r535_gr_new(const struct gf100_gr_func *hw,
- struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
-{
- struct nvkm_gr_func *rm;
- struct r535_gr *gr;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_gr_dtor;
- rm->oneinit = r535_gr_oneinit;
- rm->units = r535_gr_units;
- rm->chan_new = r535_gr_chan_new;
-
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_gr_obj_ctor;
- }
-
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- *pgr = &gr->base;
-
- return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index b7a458e9040a..bda8054c6b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -219,7 +219,7 @@ int
tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&tu102_gr, device, type, inst, pgr);
+ return -ENODEV;
return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index 2b0e923cb755..37b0cdc760c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -2,8 +2,4 @@
nvkm-y += nvkm/engine/nvdec/base.o
nvkm-y += nvkm/engine/nvdec/gm107.o
nvkm-y += nvkm/engine/nvdec/tu102.o
-nvkm-y += nvkm/engine/nvdec/ga100.o
nvkm-y += nvkm/engine/nvdec/ga102.o
-nvkm-y += nvkm/engine/nvdec/ad102.o
-
-nvkm-y += nvkm/engine/nvdec/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
index 022a9c824304..eea6368adae2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
@@ -23,16 +23,6 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_nvdec_gsp = {
- .sclass = {
- { -1, -1, NVC7B0_VIDEO_DECODER },
- {}
- }
-};
-
static const struct nvkm_falcon_func
ga102_nvdec_flcn = {
.disable = gm200_flcn_disable,
@@ -67,7 +57,7 @@ ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst
struct nvkm_nvdec **pnvdec)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec);
+ return -ENODEV;
return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index f506ae83bfd7..f8d43e913093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -20,7 +20,4 @@ extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[];
int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **);
-
-int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
deleted file mode 100644
index 75a24f3e6617..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvdec_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvdec_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvdec_obj = {
- .dtor = r535_nvdec_obj_dtor,
-};
-
-static int
-r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvdec_obj *obj;
- NV_BSP_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvdec_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
-
- kfree(nvdec->engine.func);
- return nvdec;
-}
-
-int
-r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
-{
- struct nvkm_engine_func *rm;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvdec_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvdec_obj_ctor;
- }
-
- if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
index 808c8e010b9e..fe95b6e22f21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
@@ -23,22 +23,12 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-tu102_nvdec = {
- .sclass = {
- { -1, -1, NVC4B0_VIDEO_DECODER },
- {}
- }
-};
-
int
tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvdec **pnvdec)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec);
+ return -ENODEV;
return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index 2c1495b730f3..6dcb20d1d156 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -2,7 +2,3 @@
nvkm-y += nvkm/engine/nvenc/base.o
nvkm-y += nvkm/engine/nvenc/gm107.o
nvkm-y += nvkm/engine/nvenc/tu102.o
-nvkm-y += nvkm/engine/nvenc/ga102.o
-nvkm-y += nvkm/engine/nvenc/ad102.o
-
-nvkm-y += nvkm/engine/nvenc/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
deleted file mode 100644
index 6463ab8e5871..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_nvenc = {
- .sclass = {
- { -1, -1, NVC7B7_VIDEO_ENCODER },
- {}
- }
-};
-
-int
-ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvenc **pnvenc)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
index 7917affc6505..b097e3f2867b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -18,7 +18,4 @@ extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[];
int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_nvenc **pnvenc);
-
-int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
deleted file mode 100644
index c8a2a9196ce5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvenc_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvenc_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvenc_obj = {
- .dtor = r535_nvenc_obj_dtor,
-};
-
-static int
-r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvenc_obj *obj;
- NV_MSENC_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvenc_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
-
- kfree(nvenc->engine.func);
- return nvenc;
-}
-
-int
-r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
-{
- struct nvkm_engine_func *rm;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvenc_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvenc_obj_ctor;
- }
-
- if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
index 933864423bb3..8a436b398749 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
@@ -23,22 +23,12 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-tu102_nvenc = {
- .sclass = {
- { -1, -1, NVC4B7_VIDEO_ENCODER },
- {}
- }
-};
-
int
tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvenc **pnvenc)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc);
+ return -ENODEV;
return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
deleted file mode 100644
index 1408f664add6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/nvjpg/ga100.o
-nvkm-y += nvkm/engine/nvjpg/ad102.o
-
-nvkm-y += nvkm/engine/nvjpg/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
deleted file mode 100644
index 62705dc6494c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ad102_nvjpg = {
- .sclass = {
- { -1, -1, NVC9D1_VIDEO_NVJPG },
- {}
- }
-};
-
-int
-ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
deleted file mode 100644
index f550eb07da5a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga100_nvjpg = {
- .sclass = {
- { -1, -1, NVC4D1_VIDEO_NVJPG },
- {}
- }
-};
-
-int
-ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
deleted file mode 100644
index 1e80cf70033a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_NVJPG_PRIV_H__
-#define __NVKM_NVJPG_PRIV_H__
-#include <engine/nvjpg.h>
-
-int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
deleted file mode 100644
index 1babddc4eb80..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvjpg_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvjpg_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvjpg_obj = {
- .dtor = r535_nvjpg_obj_dtor,
-};
-
-static int
-r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvjpg_obj *obj;
- NV_NVJPG_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvjpg_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvjpg_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvjpg_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
deleted file mode 100644
index 99f1713d7e51..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/ofa/ga100.o
-nvkm-y += nvkm/engine/ofa/ga102.o
-nvkm-y += nvkm/engine/ofa/ad102.o
-
-nvkm-y += nvkm/engine/ofa/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
deleted file mode 100644
index ef474f61a1b5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga100_ofa = {
- .sclass = {
- { -1, -1, NVC6FA_VIDEO_OFA },
- {}
- }
-};
-
-int
-ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga100_ofa, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
deleted file mode 100644
index bea255529993..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_ofa = {
- .sclass = {
- { -1, -1, NVC7FA_VIDEO_OFA },
- {}
- }
-};
-
-int
-ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga102_ofa, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
deleted file mode 100644
index caf29e6bddb4..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_OFA_PRIV_H__
-#define __NVKM_OFA_PRIV_H__
-#include <engine/ofa.h>
-
-int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
deleted file mode 100644
index 438dc692eefe..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_ofa_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ofa_obj_dtor(struct nvkm_object *object)
-{
- struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_ofa_obj = {
- .dtor = r535_ofa_obj_dtor,
-};
-
-static int
-r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_ofa_obj *obj;
- NV_OFA_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ofa_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_ofa_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_ofa_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 4c2f6fc4ef58..c19ea4ea9bd3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -9,6 +9,7 @@ include $(src)/nvkm/subdev/fault/Kbuild
include $(src)/nvkm/subdev/fb/Kbuild
include $(src)/nvkm/subdev/fuse/Kbuild
include $(src)/nvkm/subdev/gpio/Kbuild
+include $(src)/nvkm/subdev/fsp/Kbuild
include $(src)/nvkm/subdev/gsp/Kbuild
include $(src)/nvkm/subdev/i2c/Kbuild
include $(src)/nvkm/subdev/iccsense/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 9754c6872543..8faee3317a74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -7,5 +7,3 @@ nvkm-y += nvkm/subdev/bar/gk20a.o
nvkm-y += nvkm/subdev/bar/gm107.o
nvkm-y += nvkm/subdev/bar/gm20b.o
nvkm-y += nvkm/subdev/bar/tu102.o
-
-nvkm-y += nvkm/subdev/bar/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 51070b7dda85..e5e60915029c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -82,7 +82,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base)
static int
gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
- struct lock_class_key *key, int bar_nr)
+ struct lock_class_key *key, enum nvkm_bar_id bar_id)
{
struct nvkm_device *device = bar->base.subdev.device;
resource_size_t bar_len;
@@ -93,14 +93,14 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
if (ret)
return ret;
- bar_len = device->func->resource_size(device, bar_nr);
+ bar_len = device->func->resource_size(device, bar_id);
if (!bar_len)
return -ENOMEM;
- if (bar_nr == 3 && bar->bar2_halve)
+ if (bar_id == NVKM_BAR2_INST && bar->bar2_halve)
bar_len >>= 1;
ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key,
- (bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm);
+ (bar_id == NVKM_BAR2_INST) ? "bar2" : "bar1", &bar_vm->vmm);
if (ret)
return ret;
@@ -110,7 +110,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
/*
* Bootstrap page table lookup.
*/
- if (bar_nr == 3) {
+ if (bar_id == NVKM_BAR2_INST) {
ret = nvkm_vmm_boot(bar_vm->vmm);
if (ret)
return ret;
@@ -129,7 +129,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
if (bar->base.func->bar2.init) {
- ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, NVKM_BAR2_INST);
if (ret)
return ret;
@@ -138,7 +138,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
}
/* BAR1 */
- ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, NVKM_BAR1_FB);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 27d8a1be43e4..6a881becb02c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -127,7 +127,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
start = 0x0100000000ULL;
- size = device->func->resource_size(device, 3);
+ size = device->func->resource_size(device, NVKM_BAR2_INST);
if (!size)
return -ENOMEM;
limit = start + size;
@@ -167,7 +167,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR1 */
start = 0x0000000000ULL;
- size = device->func->resource_size(device, 1);
+ size = device->func->resource_size(device, NVKM_BAR1_FB);
if (!size)
return -ENOMEM;
limit = start + size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 6c5bbff12eb4..b918e22df5a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -47,8 +47,8 @@
static inline struct io_mapping *
fbmem_init(struct nvkm_device *dev)
{
- return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
- dev->func->resource_size(dev, 1));
+ return io_mapping_create_wc(dev->func->resource_addr(dev, NVKM_BAR1_FB),
+ dev->func->resource_size(dev, NVKM_BAR1_FB));
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
index c123e5893d76..cd2fbc0472d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
@@ -50,7 +50,7 @@ nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
struct nvkm_device *device = buffer->fault->subdev.device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = device->func->resource_addr(device, 3) + buffer->addr;
+ *addr = device->func->resource_addr(device, NVKM_BAR2_INST) + buffer->addr;
*size = nvkm_memory_size(buffer->mem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index d1611ad3bf81..8d8a5382d1b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -35,6 +35,9 @@ nvkm-y += nvkm/subdev/fb/gv100.o
nvkm-y += nvkm/subdev/fb/tu102.o
nvkm-y += nvkm/subdev/fb/ga100.o
nvkm-y += nvkm/subdev/fb/ga102.o
+nvkm-y += nvkm/subdev/fb/gh100.o
+nvkm-y += nvkm/subdev/fb/gb100.o
+nvkm-y += nvkm/subdev/fb/gb202.o
nvkm-y += nvkm/subdev/fb/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index 25f82b372bca..2819780050d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -25,7 +25,7 @@
#include <subdev/gsp.h>
#include <engine/nvdec.h>
-static u64
+u64
ga102_fb_vidmem_size(struct nvkm_fb *fb)
{
return (u64)nvkm_rd32(fb->subdev.device, 0x1183a4) << 20;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
new file mode 100644
index 000000000000..1c78c8853617
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb100/dev_hshub_base.h>
+
+static void
+gb100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ const u32 addr_hi = upper_32_bits(fb->sysmem.flush_page_addr);
+ const u32 addr_lo = lower_32_bits(fb->sysmem.flush_page_addr);
+ const u32 hshub = DRF_LO(NV_PFB_HSHUB0);
+ struct nvkm_device *device = fb->subdev.device;
+
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo);
+}
+
+static const struct nvkm_fb_func
+gb100_fb = {
+ .sysmem.flush_page_init = gb100_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gb100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gb100_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
new file mode 100644
index 000000000000..848505026d02
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb10b/dev_fbhub.h>
+
+static void
+gb202_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ const u64 addr = fb->sysmem.flush_page_addr;
+
+ nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
+ nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
+}
+
+static const struct nvkm_fb_func
+gb202_fb = {
+ .sysmem.flush_page_init = gb202_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gb202_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gb202_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
new file mode 100644
index 000000000000..2d8c51f882d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_fb.h>
+
+static void
+gh100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ const u64 addr = fb->sysmem.flush_page_addr >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT;
+ struct nvkm_device *device = fb->subdev.device;
+
+ nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
+ nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
+}
+
+static const struct nvkm_fb_func
+gh100_fb = {
+ .sysmem.flush_page_init = gh100_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gh100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gh100_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 35c55dfba23d..ebe996503ab2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -98,4 +98,6 @@ int gp102_fb_vpr_scrub(struct nvkm_fb *);
int gv100_fb_init_page(struct nvkm_fb *);
bool tu102_fb_vpr_scrub_required(struct nvkm_fb *);
+
+u64 ga102_fb_vidmem_size(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild
new file mode 100644
index 000000000000..1a9ded3a86f8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+
+nvkm-y += nvkm/subdev/fsp/base.o
+nvkm-y += nvkm/subdev/fsp/gh100.o
+nvkm-y += nvkm/subdev/fsp/gb100.o
+nvkm-y += nvkm/subdev/fsp/gb202.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c
new file mode 100644
index 000000000000..e366a980baa9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+int
+nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig)
+{
+ return fsp->func->cot.boot_gsp_fmc(fsp, args_addr, rsvd_size, resume,
+ img_addr, hash, pkey, sig);
+}
+
+bool
+nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *fsp, u32 hash_size, u32 pkey_size, u32 sig_size)
+{
+ return hash_size == fsp->func->cot.size_hash &&
+ pkey_size == fsp->func->cot.size_pkey &&
+ sig_size == fsp->func->cot.size_sig;
+}
+
+static int
+nvkm_fsp_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_fsp *fsp = nvkm_fsp(subdev);
+
+ return fsp->func->wait_secure_boot(fsp);
+}
+
+static void *
+nvkm_fsp_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_fsp *fsp = nvkm_fsp(subdev);
+
+ nvkm_falcon_dtor(&fsp->falcon);
+ return fsp;
+}
+
+static const struct nvkm_falcon_func
+nvkm_fsp_flcn = {
+ .emem_pio = &gp102_flcn_emem_pio,
+};
+
+static const struct nvkm_subdev_func
+nvkm_fsp = {
+ .dtor = nvkm_fsp_dtor,
+ .preinit = nvkm_fsp_preinit,
+};
+
+int
+nvkm_fsp_new_(const struct nvkm_fsp_func *func,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fsp **pfsp)
+{
+ struct nvkm_fsp *fsp;
+
+ fsp = *pfsp = kzalloc(sizeof(*fsp), GFP_KERNEL);
+ if (!fsp)
+ return -ENOMEM;
+
+ fsp->func = func;
+ nvkm_subdev_ctor(&nvkm_fsp, device, type, inst, &fsp->subdev);
+
+ return nvkm_falcon_ctor(&nvkm_fsp_flcn, &fsp->subdev, "fsp", 0x8f2000, &fsp->falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c
new file mode 100644
index 000000000000..e06636bf54b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_fsp_func
+gb100_fsp = {
+ .wait_secure_boot = gh100_fsp_wait_secure_boot,
+ .cot = {
+ .version = 2,
+ .size_hash = 48,
+ .size_pkey = 97,
+ .size_sig = 96,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gb100_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gb100_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c
new file mode 100644
index 000000000000..3438aac6383e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb202/dev_therm.h>
+
+static int
+gb202_fsp_wait_secure_boot(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ unsigned timeout_ms = 4000;
+
+ do {
+ u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS);
+
+ if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout_ms--);
+
+ return -ETIMEDOUT;
+}
+
+static const struct nvkm_fsp_func
+gb202_fsp = {
+ .wait_secure_boot = gb202_fsp_wait_secure_boot,
+ .cot = {
+ .version = 2,
+ .size_hash = 48,
+ .size_pkey = 97,
+ .size_sig = 96,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gb202_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gb202_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c
new file mode 100644
index 000000000000..2815be4bf5de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_fsp_pri.h>
+#include <nvhw/ref/gh100/dev_therm.h>
+
+#include <nvrm/nvtypes.h>
+
+#define MCTP_HEADER_VERSION 3:0
+#define MCTP_HEADER_RSVD 7:4
+
+#define MCTP_HEADER_DEID 15:8
+#define MCTP_HEADER_SEID 23:16
+
+#define MCTP_HEADER_TAG 26:24
+#define MCTP_HEADER_TO 27:27
+#define MCTP_HEADER_SEQ 29:28
+#define MCTP_HEADER_EOM 30:30
+#define MCTP_HEADER_SOM 31:31
+
+#define MCTP_MSG_HEADER_TYPE 6:0
+#define MCTP_MSG_HEADER_IC 7:7
+
+#define MCTP_MSG_HEADER_VENDOR_ID 23:8
+#define MCTP_MSG_HEADER_NVDM_TYPE 31:24
+
+#define MCTP_MSG_HEADER_TYPE_VENDOR_PCI 0x7e
+#define MCTP_MSG_HEADER_VENDOR_ID_NV 0x10de
+
+#define NVDM_TYPE_COT 0x14
+#define NVDM_TYPE_FSP_RESPONSE 0x15
+
+#pragma pack(1)
+typedef struct nvdm_payload_cot
+{
+ NvU16 version;
+ NvU16 size;
+ NvU64 gspFmcSysmemOffset;
+ NvU64 frtsSysmemOffset;
+ NvU32 frtsSysmemSize;
+
+ // Note this is an offset from the end of FB
+ NvU64 frtsVidmemOffset;
+ NvU32 frtsVidmemSize;
+
+ // Authentication related fields
+ NvU32 hash384[12];
+ NvU32 publicKey[96];
+ NvU32 signature[96];
+
+ NvU64 gspBootArgsSysmemOffset;
+} NVDM_PAYLOAD_COT;
+#pragma pack()
+
+#pragma pack(1)
+typedef struct
+{
+ NvU32 taskId;
+ NvU32 commandNvdmType;
+ NvU32 errorCode;
+} NVDM_PAYLOAD_COMMAND_RESPONSE;
+#pragma pack()
+
+static u32
+gh100_fsp_poll(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ u32 head, tail;
+
+ head = nvkm_rd32(device, NV_PFSP_MSGQ_HEAD(0));
+ tail = nvkm_rd32(device, NV_PFSP_MSGQ_TAIL(0));
+
+ if (head == tail)
+ return 0;
+
+ return (tail - head) + sizeof(u32); /* TAIL points at last DWORD written. */
+}
+
+static int
+gh100_fsp_recv(struct nvkm_fsp *fsp, u8 *packet, u32 max_packet_size)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ u32 packet_size;
+ int ret;
+
+ packet_size = gh100_fsp_poll(fsp);
+ if (!packet_size || WARN_ON(packet_size % 4 || packet_size > max_packet_size))
+ return -EINVAL;
+
+ ret = nvkm_falcon_pio_rd(&fsp->falcon, 0, EMEM, 0, packet, 0, packet_size);
+ if (ret)
+ return ret;
+
+ nvkm_wr32(device, NV_PFSP_MSGQ_TAIL(0), 0);
+ nvkm_wr32(device, NV_PFSP_MSGQ_HEAD(0), 0);
+
+ return packet_size;
+}
+
+static int
+gh100_fsp_wait(struct nvkm_fsp *fsp)
+{
+ int time = 1000;
+
+ do {
+ if (gh100_fsp_poll(fsp))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ return -ETIMEDOUT;
+}
+
+static int
+gh100_fsp_send(struct nvkm_fsp *fsp, const u8 *packet, u32 packet_size)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ int time = 1000, ret;
+
+ if (WARN_ON(packet_size % sizeof(u32)))
+ return -EINVAL;
+
+ /* Ensure any previously sent message has been consumed. */
+ do {
+ u32 head = nvkm_rd32(device, NV_PFSP_QUEUE_HEAD(0));
+ u32 tail = nvkm_rd32(device, NV_PFSP_QUEUE_TAIL(0));
+
+ if (tail == head)
+ break;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ if (time < 0)
+ return -ETIMEDOUT;
+
+ /* Write message to EMEM. */
+ ret = nvkm_falcon_pio_wr(&fsp->falcon, packet, 0, 0, EMEM, 0, packet_size, 0, false);
+ if (ret)
+ return ret;
+
+ /* Update queue pointers - TAIL points at last DWORD written. */
+ nvkm_wr32(device, NV_PFSP_QUEUE_TAIL(0), packet_size - sizeof(u32));
+ nvkm_wr32(device, NV_PFSP_QUEUE_HEAD(0), 0);
+ return 0;
+}
+
+static int
+gh100_fsp_send_sync(struct nvkm_fsp *fsp, u8 nvdm_type, const u8 *packet, u32 packet_size)
+{
+ struct nvkm_subdev *subdev = &fsp->subdev;
+ struct {
+ u32 mctp_header;
+ u32 nvdm_header;
+ NVDM_PAYLOAD_COMMAND_RESPONSE response;
+ } reply;
+ int ret;
+
+ ret = gh100_fsp_send(fsp, packet, packet_size);
+ if (ret)
+ return ret;
+
+ ret = gh100_fsp_wait(fsp);
+ if (ret)
+ return ret;
+
+ ret = gh100_fsp_recv(fsp, (u8 *)&reply, sizeof(reply));
+ if (ret < 0)
+ return ret;
+
+ if (NVVAL_TEST(reply.mctp_header, MCTP, HEADER, SOM, !=, 1) ||
+ NVVAL_TEST(reply.mctp_header, MCTP, HEADER, EOM, !=, 1)) {
+ nvkm_error(subdev, "unexpected MCTP header in reply: 0x%08x\n", reply.mctp_header);
+ return -EIO;
+ }
+
+ if (NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, TYPE, !=, VENDOR_PCI) ||
+ NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, VENDOR_ID, !=, NV) ||
+ NVVAL_TEST(reply.nvdm_header, MCTP, MSG_HEADER, NVDM_TYPE, !=, NVDM_TYPE_FSP_RESPONSE)) {
+ nvkm_error(subdev, "unexpected NVDM header in reply: 0x%08x\n", reply.nvdm_header);
+ return -EIO;
+ }
+
+ if (reply.response.commandNvdmType != nvdm_type) {
+ nvkm_error(subdev, "expected NVDM type 0x%02x in reply, got 0x%02x\n",
+ nvdm_type, reply.response.commandNvdmType);
+ return -EIO;
+ }
+
+ if (reply.response.errorCode) {
+ nvkm_error(subdev, "NVDM command 0x%02x failed with error 0x%08x\n",
+ nvdm_type, reply.response.errorCode);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig)
+{
+ struct {
+ u32 mctp_header;
+ u32 nvdm_header;
+ NVDM_PAYLOAD_COT cot;
+ } msg = {};
+
+ msg.mctp_header = NVVAL(MCTP, HEADER, SOM, 1) |
+ NVVAL(MCTP, HEADER, EOM, 1) |
+ NVVAL(MCTP, HEADER, SEID, 0) |
+ NVVAL(MCTP, HEADER, SEQ, 0);
+
+ msg.nvdm_header = NVDEF(MCTP, MSG_HEADER, TYPE, VENDOR_PCI) |
+ NVDEF(MCTP, MSG_HEADER, VENDOR_ID, NV) |
+ NVVAL(MCTP, MSG_HEADER, NVDM_TYPE, NVDM_TYPE_COT);
+
+ msg.cot.version = fsp->func->cot.version;
+ msg.cot.size = sizeof(msg.cot);
+ msg.cot.gspFmcSysmemOffset = img_addr;
+ if (!resume) {
+ msg.cot.frtsVidmemOffset = ALIGN(rsvd_size, 0x200000);
+ msg.cot.frtsVidmemSize = 0x100000;
+ }
+
+ memcpy(msg.cot.hash384, hash, fsp->func->cot.size_hash);
+ memcpy(msg.cot.publicKey, pkey, fsp->func->cot.size_pkey);
+ memcpy(msg.cot.signature, sig, fsp->func->cot.size_sig);
+
+ msg.cot.gspBootArgsSysmemOffset = args_addr;
+
+ return gh100_fsp_send_sync(fsp, NVDM_TYPE_COT, (const u8 *)&msg, sizeof(msg));
+}
+
+int
+gh100_fsp_wait_secure_boot(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ unsigned timeout_ms = 4000;
+
+ do {
+ u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS);
+
+ if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout_ms--);
+
+ return -ETIMEDOUT;
+}
+
+static const struct nvkm_fsp_func
+gh100_fsp = {
+ .wait_secure_boot = gh100_fsp_wait_secure_boot,
+ .cot = {
+ .version = 1,
+ .size_hash = 48,
+ .size_pkey = 384,
+ .size_sig = 384,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gh100_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gh100_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h
new file mode 100644
index 000000000000..f0b2c605c33d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_FSP_PRIV_H__
+#define __NVKM_FSP_PRIV_H__
+#define nvkm_fsp(p) container_of((p), struct nvkm_fsp, subdev)
+#include <subdev/fsp.h>
+
+struct nvkm_fsp_func {
+ int (*wait_secure_boot)(struct nvkm_fsp *);
+
+ struct {
+ u32 version;
+ u32 size_hash;
+ u32 size_pkey;
+ u32 size_sig;
+ int (*boot_gsp_fmc)(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+ } cot;
+};
+
+int nvkm_fsp_new_(const struct nvkm_fsp_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+
+int gh100_fsp_wait_secure_boot(struct nvkm_fsp *);
+int gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index 16bf2f1bb780..e9c948b67bbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -7,6 +7,9 @@ nvkm-y += nvkm/subdev/gsp/tu102.o
nvkm-y += nvkm/subdev/gsp/tu116.o
nvkm-y += nvkm/subdev/gsp/ga100.o
nvkm-y += nvkm/subdev/gsp/ga102.o
+nvkm-y += nvkm/subdev/gsp/gh100.o
nvkm-y += nvkm/subdev/gsp/ad102.o
+nvkm-y += nvkm/subdev/gsp/gb100.o
+nvkm-y += nvkm/subdev/gsp/gb202.o
-nvkm-y += nvkm/subdev/gsp/r535.o
+include $(src)/nvkm/subdev/gsp/rm/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index c849c6299c52..eb765da0876e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -22,30 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-ad102_gsp_r535_113_01 = {
+ad102_gsp = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ad10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ad10x_gpu,
};
static struct nvkm_gsp_fwif
ad102_gsps[] = {
- { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true },
+ { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true },
+ { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true },
{}
};
@@ -55,3 +52,15 @@ ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index da1bebb896f7..d23243a83a4c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -52,7 +52,7 @@ nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_gsp *gsp = nvkm_gsp(subdev);
- if (!gsp->func->fini)
+ if (!gsp->func->fini || !gsp->running)
return 0;
return gsp->func->fini(gsp, suspend);
@@ -80,6 +80,21 @@ nvkm_gsp_oneinit(struct nvkm_subdev *subdev)
return gsp->func->oneinit(gsp);
}
+void
+nvkm_gsp_dtor_fws(struct nvkm_gsp *gsp)
+{
+ nvkm_firmware_put(gsp->fws.fmc);
+ gsp->fws.fmc = NULL;
+ nvkm_firmware_put(gsp->fws.bl);
+ gsp->fws.bl = NULL;
+ nvkm_firmware_put(gsp->fws.booter.unload);
+ gsp->fws.booter.unload = NULL;
+ nvkm_firmware_put(gsp->fws.booter.load);
+ gsp->fws.booter.load = NULL;
+ nvkm_firmware_put(gsp->fws.rm);
+ gsp->fws.rm = NULL;
+}
+
static void *
nvkm_gsp_dtor(struct nvkm_subdev *subdev)
{
@@ -89,6 +104,7 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev)
gsp->func->dtor(gsp);
nvkm_falcon_dtor(&gsp->falcon);
+ kfree(gsp->rm);
return gsp;
}
@@ -101,6 +117,16 @@ nvkm_gsp = {
};
int
+nvkm_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
+ const struct firmware **pfw)
+{
+ char fwname[64];
+
+ snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
+ return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
+}
+
+int
nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp)
{
@@ -116,7 +142,19 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
return PTR_ERR(fwif);
gsp->func = fwif->func;
- gsp->rm = gsp->func->rm;
+
+ if (fwif->rm) {
+ nvkm_info(&gsp->subdev, "RM version: %s\n", fwif->ver);
+
+ gsp->rm = kzalloc(sizeof(*gsp->rm), GFP_KERNEL);
+ if (!gsp->rm)
+ return -ENOMEM;
+
+ gsp->rm->device = device;
+ gsp->rm->gpu = fwif->func->rm.gpu;
+ gsp->rm->wpr = fwif->rm->wpr;
+ gsp->rm->api = fwif->rm->api;
+ }
return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000,
&gsp->falcon);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index 223f68b532ef..d201e8697226 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -39,29 +39,27 @@ ga100_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga100_gsp_r535_113_01 = {
+ga100_gsp = {
.flcn = &ga100_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_ga100",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga100_gpu,
};
static struct nvkm_gsp_fwif
ga100_gsps[] = {
- { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga100_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &ga100_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -72,3 +70,6 @@ ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index 4c4b4168a266..917f7e2f6c46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -150,25 +150,21 @@ ga102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga102_gsp_r535_113_01 = {
+ga102_gsp_r535 = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ga10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga1xx_gpu,
};
static const struct nvkm_gsp_func
@@ -178,7 +174,8 @@ ga102_gsp = {
static struct nvkm_gsp_fwif
ga102_gsps[] = {
- { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga102_gsp_r535, &r570_rm_ga102, "570.144" },
+ { 0, tu102_gsp_load, &ga102_gsp_r535, &r535_rm_ga102, "535.113.01" },
{ -1, gv100_gsp_nofw, &ga102_gsp },
{}
};
@@ -189,3 +186,15 @@ ga102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
new file mode 100644
index 000000000000..12a3f2c1ed82
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb10x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb10x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb100_gsps[] = {
+ { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true },
+ {}
+};
+
+int
+gb100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb100, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb102, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
new file mode 100644
index 000000000000..c1d718172ddf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb202_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb20x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb20x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb202_gsps[] = {
+ { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true },
+ {}
+};
+
+int
+gb202_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb202_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb202, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb203, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb205, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb206, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb207, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
new file mode 100644
index 000000000000..ce31e8248807
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <linux/elf.h>
+#include <linux/crc32.h>
+
+#include <subdev/fb.h>
+#include <subdev/fsp.h>
+
+#include <rm/r570/nvrm/gsp.h>
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_falcon_v4.h>
+#include <nvhw/ref/gh100/dev_riscv_pri.h>
+
+int
+gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ struct nvkm_falcon *falcon = &gsp->falcon;
+ int ret, time = 4000;
+
+ /* Shutdown RM. */
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ /* Wait for RISC-V to halt. */
+ do {
+ u32 data = nvkm_falcon_rd32(falcon, falcon->addr2 + NV_PRISCV_RISCV_CPUCTL);
+
+ if (NVVAL_GET(data, NV_PRISCV, RISCV_CPUCTL, HALTED))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ return -ETIMEDOUT;
+}
+
+static bool
+gh100_gsp_lockdown_released(struct nvkm_gsp *gsp, u32 *mbox0)
+{
+ u32 data;
+
+ /* Wait for GSP access via BAR0 to be allowed. */
+ *mbox0 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX0);
+
+ if (*mbox0 && (*mbox0 & 0xffffff00) == 0xbadf4100)
+ return false;
+
+ /* Check if an error code has been reported. */
+ if (*mbox0) {
+ u32 mbox1 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX1);
+
+ /* Any value that's not GSP_FMC_BOOT_PARAMS addr is an error. */
+ if ((((u64)mbox1 << 32) | *mbox0) != gsp->fmc.args.addr)
+ return true;
+ }
+
+ /* Check if lockdown has been released. */
+ data = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_HWCFG2);
+ return !NVVAL_GET(data, NV_PFALCON, FALCON_HWCFG2, RISCV_BR_PRIV_LOCKDOWN);
+}
+
+int
+gh100_gsp_init(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ const bool resume = gsp->sr.meta.data != NULL;
+ struct nvkm_gsp_mem *meta;
+ GSP_FMC_BOOT_PARAMS *args;
+ int ret, time = 4000;
+ u32 rsvd_size;
+ u32 mbox0;
+
+ if (!resume) {
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*args), &gsp->fmc.args);
+ if (ret)
+ return ret;
+
+ meta = &gsp->wpr_meta;
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+ meta = &gsp->sr.meta;
+ }
+
+ args = gsp->fmc.args.data;
+
+ args->bootGspRmParams.gspRmDescOffset = meta->addr;
+ args->bootGspRmParams.gspRmDescSize = meta->size;
+ args->bootGspRmParams.target = GSP_DMA_TARGET_COHERENT_SYSTEM;
+ args->bootGspRmParams.bIsGspRmBoot = 1;
+
+ args->gspRmParams.target = GSP_DMA_TARGET_NONCOHERENT_SYSTEM;
+ args->gspRmParams.bootArgsOffset = gsp->libos.addr;
+
+ rsvd_size = gsp->fb.heap.size;
+ if (gsp->rm->wpr->rsvd_size_pmu)
+ rsvd_size = ALIGN(rsvd_size + gsp->rm->wpr->rsvd_size_pmu, 0x200000);
+
+ ret = nvkm_fsp_boot_gsp_fmc(device->fsp, gsp->fmc.args.addr, rsvd_size, resume,
+ gsp->fmc.fw.addr, gsp->fmc.hash, gsp->fmc.pkey, gsp->fmc.sig);
+ if (ret)
+ return ret;
+
+ do {
+ if (gh100_gsp_lockdown_released(gsp, &mbox0))
+ break;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ if (time < 0) {
+ nvkm_error(subdev, "GSP-FMC boot timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mbox0) {
+ nvkm_error(subdev, "GSP-FMC boot failed (mbox: 0x%08x)\n", mbox0);
+ return -EIO;
+ }
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+gh100_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ gsp->fb.bios.vga_workspace.size = 128 * 1024;
+ gsp->fb.heap.size = gsp->rm->wpr->heap_size_non_wpr;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sizeOfRadix3Elf = gsp->fw.len;
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwHeapSize = tu102_gsp_wpr_heap_size(gsp);
+ meta->frtsSize = 0x100000;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->pmuReservedSize = gsp->rm->wpr->rsvd_size_pmu;
+ return 0;
+}
+
+/* The sh_flags value for the binary blobs in the ELF image */
+#define FMC_SHF_FLAGS (SHF_MASKPROC | SHF_MASKOS | SHF_OS_NONCONFORMING | SHF_ALLOC)
+
+#define ELF_HDR_SIZE ((u8)sizeof(struct elf32_hdr))
+#define ELF_SHDR_SIZE ((u8)sizeof(struct elf32_shdr))
+
+/* The FMC ELF header must be exactly this */
+static const u8 elf_header[] = {
+ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 1, 0, 0, 0, /* e_type, e_machine, e_version */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* e_entry, e_phoff */
+
+ ELF_HDR_SIZE, 0, 0, 0, 0, 0, 0, 0, /* e_shoff, e_flags */
+ ELF_HDR_SIZE, 0, 0, 0, /* e_ehsize, e_phentsize */
+ 0, 0, ELF_SHDR_SIZE, 0, /* e_phnum, e_shentsize */
+
+ 6, 0, 1, 0, /* e_shnum, e_shstrndx */
+};
+
+/**
+ * elf_validate_sections - validate each section in the FMC ELF image
+ * @elf: ELF image
+ * @length: size of the entire ELF image
+ */
+static bool
+elf_validate_sections(const void *elf, size_t length)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+
+ /* The offset of the first section */
+ Elf32_Off section_begin = ehdr->e_shoff + ehdr->e_shnum * ehdr->e_shentsize;
+
+ if (section_begin > length)
+ return false;
+
+ /* The first section header is the null section, so skip it */
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (i == ehdr->e_shstrndx) {
+ if (shdr[i].sh_type != SHT_STRTAB)
+ return false;
+ if (shdr[i].sh_flags != SHF_STRINGS)
+ return false;
+ } else {
+ if (shdr[i].sh_type != SHT_PROGBITS)
+ return false;
+ if (shdr[i].sh_flags != FMC_SHF_FLAGS)
+ return false;
+ }
+
+ /* Ensure that each section is inside the image */
+ if (shdr[i].sh_offset < section_begin ||
+ (u64)shdr[i].sh_offset + shdr[i].sh_size > length)
+ return false;
+
+ /* Non-zero sh_info is a CRC */
+ if (shdr[i].sh_info) {
+ /* The kernel's CRC32 needs a pre- and post-xor to match standard CRCs */
+ u32 crc32 = crc32_le(~0, elf + shdr[i].sh_offset, shdr[i].sh_size) ^ ~0;
+
+ if (shdr[i].sh_info != crc32)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * elf_section - return a pointer to the data for a given section
+ * @elf: ELF image
+ * @name: section name to search for
+ * @len: pointer to returned length of found section
+ */
+static const void *
+elf_section(const void *elf, const char *name, unsigned int *len)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+ const char *names = elf + shdr[ehdr->e_shstrndx].sh_offset;
+
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (!strcmp(&names[shdr[i].sh_name], name)) {
+ *len = shdr[i].sh_size;
+ return elf + shdr[i].sh_offset;
+ }
+ }
+
+ return NULL;
+}
+
+int
+gh100_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fsp *fsp = device->fsp;
+ const void *fw = gsp->fws.fmc->data;
+ const void *hash, *sig, *pkey, *img;
+ unsigned int img_len = 0, hash_len = 0, pkey_len = 0, sig_len = 0;
+ int ret;
+
+ if (gsp->fws.fmc->size < ELF_HDR_SIZE ||
+ memcmp(fw, elf_header, sizeof(elf_header)) ||
+ !elf_validate_sections(fw, gsp->fws.fmc->size)) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ hash = elf_section(fw, "hash", &hash_len);
+ sig = elf_section(fw, "signature", &sig_len);
+ pkey = elf_section(fw, "publickey", &pkey_len);
+ img = elf_section(fw, "image", &img_len);
+
+ if (!hash || !sig || !pkey || !img) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ if (!nvkm_fsp_verify_gsp_fmc(fsp, hash_len, pkey_len, sig_len))
+ return -EINVAL;
+
+ /* Load GSP-FMC FW into memory. */
+ ret = nvkm_gsp_mem_ctor(gsp, img_len, &gsp->fmc.fw);
+ if (ret)
+ return ret;
+
+ memcpy(gsp->fmc.fw.data, img, img_len);
+
+ gsp->fmc.hash = kmemdup(hash, hash_len, GFP_KERNEL);
+ gsp->fmc.pkey = kmemdup(pkey, pkey_len, GFP_KERNEL);
+ gsp->fmc.sig = kmemdup(sig, sig_len, GFP_KERNEL);
+ if (!gsp->fmc.hash || !gsp->fmc.pkey || !gsp->fmc.sig)
+ return -ENOMEM;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ return gh100_gsp_wpr_meta_init(gsp);
+}
+
+static const struct nvkm_gsp_func
+gh100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gh100",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gh100_gpu,
+};
+
+int
+gh100_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "fmc", fwif->ver, &gsp->fws.fmc);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
+static struct nvkm_gsp_fwif
+gh100_gsps[] = {
+ { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true },
+ {}
+};
+
+int
+gh100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gh100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gh100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 9f4a62375a27..4f14e85fc69e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_GSP_PRIV_H__
#define __NVKM_GSP_PRIV_H__
#include <subdev/gsp.h>
+#include <rm/gpu.h>
enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
@@ -11,12 +12,32 @@ struct nvkm_gsp_fwif {
int version;
int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
const struct nvkm_gsp_func *func;
+ const struct nvkm_rm_impl *rm;
const char *ver;
bool enable;
};
+int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver,
+ const struct firmware **);
+void nvkm_gsp_dtor_fws(struct nvkm_gsp *);
+
int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
-int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+int tu102_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+int tu102_gsp_load_rm(struct nvkm_gsp *, const struct nvkm_gsp_fwif *);
+
+int gh100_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+#define NVKM_GSP_FIRMWARE_BOOTER(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
+
+#define NVKM_GSP_FIRMWARE_FMC(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/fmc-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
struct nvkm_gsp_func {
const struct nvkm_falcon_func *flcn;
@@ -25,12 +46,6 @@ struct nvkm_gsp_func {
char *sig_section;
struct {
- u32 os_carveout_size;
- u32 base_size;
- u64 min_size;
- } wpr_heap;
-
- struct {
int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
@@ -41,7 +56,9 @@ struct nvkm_gsp_func {
int (*fini)(struct nvkm_gsp *, bool suspend);
int (*reset)(struct nvkm_gsp *);
- const struct nvkm_gsp_rm *rm;
+ struct {
+ const struct nvkm_rm_gpu *gpu;
+ } rm;
};
extern const struct nvkm_falcon_func tu102_gsp_flcn;
@@ -49,7 +66,10 @@ extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
+int tu102_gsp_init(struct nvkm_gsp *);
+int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
int tu102_gsp_reset(struct nvkm_gsp *);
+u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *);
extern const struct nvkm_falcon_func ga102_gsp_flcn;
extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec;
@@ -57,11 +77,14 @@ int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int ga102_gsp_reset(struct nvkm_gsp *);
+int gh100_gsp_oneinit(struct nvkm_gsp *);
+int gh100_gsp_init(struct nvkm_gsp *);
+int gh100_gsp_fini(struct nvkm_gsp *, bool suspend);
+
void r535_gsp_dtor(struct nvkm_gsp *);
int r535_gsp_oneinit(struct nvkm_gsp *);
int r535_gsp_init(struct nvkm_gsp *);
int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
-extern const struct nvkm_gsp_rm r535_gsp_rm;
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
new file mode 100644
index 000000000000..04037394a2da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+nvkm-y += nvkm/subdev/gsp/rm/client.o
+nvkm-y += nvkm/subdev/gsp/rm/engine.o
+nvkm-y += nvkm/subdev/gsp/rm/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/nvenc.o
+
+nvkm-y += nvkm/subdev/gsp/rm/tu1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ga100.o
+nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ad10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gh100.o
+nvkm-y += nvkm/subdev/gsp/rm/gb10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gb20x.o
+
+include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild
+include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
new file mode 100644
index 000000000000..e1ce6355c35f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ad10x_gpu = {
+ .disp.class = {
+ .root = AD102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = AD102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = ADA_A,
+ .compute = ADA_COMPUTE_A,
+ },
+ .nvdec.class = NVC9B0_VIDEO_DECODER,
+ .nvenc.class = NVC9B7_VIDEO_ENCODER,
+ .ofa.class = NVC9FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
new file mode 100644
index 000000000000..72d3e3ca84c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "rm.h"
+
+void
+nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+ const unsigned int id = client->object.handle - NVKM_RM_CLIENT(0);
+ struct nvkm_gsp *gsp = client->gsp;
+
+ if (!gsp)
+ return;
+
+ if (client->object.client)
+ nvkm_gsp_rm_free(&client->object);
+
+ mutex_lock(&gsp->client_id.mutex);
+ idr_remove(&gsp->client_id.idr, id);
+ mutex_unlock(&gsp->client_id.mutex);
+
+ client->gsp = NULL;
+}
+
+int
+nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+ int id, ret;
+
+ if (WARN_ON(!gsp->rm))
+ return -ENOSYS;
+
+ mutex_lock(&gsp->client_id.mutex);
+ id = idr_alloc(&gsp->client_id.idr, client, 0, NVKM_RM_CLIENT_MASK + 1, GFP_KERNEL);
+ mutex_unlock(&gsp->client_id.mutex);
+ if (id < 0)
+ return id;
+
+ client->gsp = gsp;
+ client->object.client = client;
+ INIT_LIST_HEAD(&client->events);
+
+ ret = gsp->rm->api->client->ctor(client, NVKM_RM_CLIENT(id));
+ if (ret)
+ nvkm_gsp_client_dtor(client);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
new file mode 100644
index 000000000000..3b0e83b2f57f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include "gpu.h"
+
+#include <core/object.h>
+#include <engine/fifo/chan.h>
+
+struct nvkm_rm_engine {
+ struct nvkm_engine engine;
+
+ struct nvkm_engine_func func;
+};
+
+struct nvkm_rm_engine_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void*
+nvkm_rm_engine_obj_dtor(struct nvkm_object *object)
+{
+ struct nvkm_rm_engine_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+nvkm_rm_engine_obj = {
+ .dtor = nvkm_rm_engine_obj_dtor,
+};
+
+int
+nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_rm *rm = chan->client->gsp->rm;
+ const int inst = oclass->engine->subdev.inst;
+ const u32 class = oclass->base.oclass;
+ const u32 handle = oclass->handle;
+ struct nvkm_rm_engine_obj *obj;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ switch (oclass->engine->subdev.type) {
+ case NVKM_ENGINE_CE:
+ ret = rm->api->ce->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_GR:
+ ret = nvkm_gsp_rm_alloc(chan, handle, class, 0, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVDEC:
+ ret = rm->api->nvdec->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVENC:
+ ret = rm->api->nvenc->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVJPG:
+ ret = rm->api->nvjpg->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_OFA:
+ ret = rm->api->ofa->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ default:
+ ret = -EINVAL;
+ WARN_ON(1);
+ break;
+ }
+
+ if (ret) {
+ kfree(obj);
+ return ret;
+ }
+
+ nvkm_object_ctor(&nvkm_rm_engine_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+ return 0;
+}
+
+static int
+nvkm_rm_engine_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+
+ return nvkm_rm_engine_obj_new(&chan->rm.object, chan->id, oclass, pobject);
+}
+
+static void *
+nvkm_rm_engine_dtor(struct nvkm_engine *engine)
+{
+ kfree(engine->func);
+ return engine;
+}
+
+int
+nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *rm,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *engine)
+{
+ struct nvkm_engine_func *func;
+
+ func = kzalloc(struct_size(func, sclass, nclass + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = dtor;
+
+ for (int i = 0; i < nclass; i++) {
+ func->sclass[i].oclass = class[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_engine_obj_ctor;
+ }
+
+ nvkm_engine_ctor(func, rm->device, type, inst, true, engine);
+ return 0;
+}
+
+static int
+nvkm_rm_engine_new_(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst, u32 class,
+ struct nvkm_engine **pengine)
+{
+ struct nvkm_engine *engine;
+ int ret;
+
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_engine_dtor, rm, type, inst, &class, 1, engine);
+ if (ret) {
+ kfree(engine);
+ return ret;
+ }
+
+ *pengine = engine;
+ return 0;
+}
+
+int
+nvkm_rm_engine_new(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst)
+{
+ const struct nvkm_rm_gpu *gpu = rm->gpu;
+ struct nvkm_device *device = rm->device;
+
+ switch (type) {
+ case NVKM_ENGINE_CE:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ce)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ce.class, &device->ce[inst]);
+ case NVKM_ENGINE_GR:
+ if (inst != 0)
+ return -ENODEV; /* MiG not supported, just ignore. */
+
+ return nvkm_rm_gr_new(rm);
+ case NVKM_ENGINE_NVDEC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvdec)))
+ return -EINVAL;
+
+ return nvkm_rm_nvdec_new(rm, inst);
+ case NVKM_ENGINE_NVENC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvenc)))
+ return -EINVAL;
+
+ return nvkm_rm_nvenc_new(rm, inst);
+ case NVKM_ENGINE_NVJPG:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvjpg)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->nvjpg.class, &device->nvjpg[inst]);
+ case NVKM_ENGINE_OFA:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ofa)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ofa.class, &device->ofa[inst]);
+ default:
+ break;
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
new file mode 100644
index 000000000000..5b8c9c3901d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_ENGINE_H__
+#define __NVKM_RM_ENGINE_H__
+#include "gpu.h"
+
+int nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *);
+int nvkm_rm_engine_new(struct nvkm_rm *, enum nvkm_subdev_type, int inst);
+
+int nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *,
+ struct nvkm_object **);
+
+int nvkm_rm_gr_new(struct nvkm_rm *);
+int nvkm_rm_nvdec_new(struct nvkm_rm *, int inst);
+int nvkm_rm_nvenc_new(struct nvkm_rm *, int inst);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
new file mode 100644
index 000000000000..a48c6134075d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga100_gpu = {
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_A,
+ .compute = AMPERE_COMPUTE_A,
+ },
+ .nvdec.class = NVC6B0_VIDEO_DECODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
new file mode 100644
index 000000000000..50536ad7f85d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga1xx_gpu = {
+ .disp.class = {
+ .root = GA102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = GA102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_B,
+ .compute = AMPERE_COMPUTE_B,
+ },
+ .nvdec.class = NVC7B0_VIDEO_DECODER,
+ .nvenc.class = NVC7B7_VIDEO_ENCODER,
+ .ofa.class = NVC7FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
new file mode 100644
index 000000000000..2f517dcd721a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb10x_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = BLACKWELL_DMA_COPY_A,
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_A,
+ .compute = BLACKWELL_COMPUTE_A,
+ },
+ .nvdec.class = NVCDB0_VIDEO_DECODER,
+ .nvjpg.class = NVCDD1_VIDEO_NVJPG,
+ .ofa.class = NVCDFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
new file mode 100644
index 000000000000..950471d9996e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/ce/priv.h>
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb20x_gpu = {
+ .disp.class = {
+ .root = GB202_DISP,
+ .caps = GB202_DISP_CAPS,
+ .core = GB202_DISP_CORE_CHANNEL_DMA,
+ .wndw = GB202_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GB202_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GB202_DISP_CURSOR,
+ },
+
+ .usermode.class = BLACKWELL_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_B,
+ .doorbell_handle = gb202_chan_doorbell_handle,
+ },
+
+ .ce = {
+ .class = BLACKWELL_DMA_COPY_B,
+ .grce_mask = gb202_ce_grce_mask,
+ },
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_B,
+ .compute = BLACKWELL_COMPUTE_B,
+ },
+ .nvdec.class = NVCFB0_VIDEO_DECODER,
+ .nvenc.class = NVCFB7_VIDEO_ENCODER,
+ .nvjpg.class = NVCFD1_VIDEO_NVJPG,
+ .ofa.class = NVCFFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
new file mode 100644
index 000000000000..49e2c54e1aa8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gh100_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = HOPPER_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = HOPPER_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = HOPPER_A,
+ .compute = HOPPER_COMPUTE_A,
+ },
+ .nvdec.class = NVB8B0_VIDEO_DECODER,
+ .nvjpg.class = NVB8D1_VIDEO_NVJPG,
+ .ofa.class = NVB8FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
new file mode 100644
index 000000000000..46a6325641b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_GPU_H__
+#define __NVKM_RM_GPU_H__
+#include "rm.h"
+
+struct nvkm_rm_gpu {
+ struct {
+ struct {
+ u32 root;
+ u32 caps;
+ u32 core;
+ u32 wndw;
+ u32 wimm;
+ u32 curs;
+ } class;
+ } disp;
+
+ struct {
+ u32 class;
+ } usermode;
+
+ struct {
+ struct {
+ u32 class;
+ u32 (*doorbell_handle)(struct nvkm_chan *);
+ } chan;
+ } fifo;
+
+ struct {
+ u32 class;
+ u32 (*grce_mask)(struct nvkm_device *);
+ } ce;
+
+ struct {
+ struct {
+ u32 i2m;
+ u32 twod;
+ u32 threed;
+ u32 compute;
+ } class;
+ } gr;
+
+ struct {
+ u32 class;
+ } nvdec;
+
+ struct {
+ u32 class;
+ } nvenc;
+
+ struct {
+ u32 class;
+ } nvjpg;
+
+ struct {
+ u32 class;
+ } ofa;
+};
+
+extern const struct nvkm_rm_gpu tu1xx_gpu;
+extern const struct nvkm_rm_gpu ga100_gpu;
+extern const struct nvkm_rm_gpu ga1xx_gpu;
+extern const struct nvkm_rm_gpu ad10x_gpu;
+extern const struct nvkm_rm_gpu gh100_gpu;
+extern const struct nvkm_rm_gpu gb10x_gpu;
+extern const struct nvkm_rm_gpu gb20x_gpu;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
new file mode 100644
index 000000000000..f40b8fcc2bcb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gr.h"
+
+#include <engine/fifo.h>
+#include <engine/gr/priv.h>
+
+static int
+nvkm_rm_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
+
+ return nvkm_rm_engine_obj_new(&chan->chan->rm.object, chan->chan->id, oclass, pobject);
+}
+
+static int
+nvkm_rm_gr_fini(struct nvkm_gr *base, bool suspend)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+
+ if (rm->api->gr->scrubber.fini)
+ rm->api->gr->scrubber.fini(gr);
+
+ return 0;
+}
+
+static int
+nvkm_rm_gr_init(struct nvkm_gr *base)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ int ret;
+
+ if (rm->api->gr->scrubber.init) {
+ ret = rm->api->gr->scrubber.init(gr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nvkm_rm_gr_new(struct nvkm_rm *rm)
+{
+ const u32 classes[] = {
+ rm->gpu->gr.class.i2m,
+ rm->gpu->gr.class.twod,
+ rm->gpu->gr.class.threed,
+ rm->gpu->gr.class.compute,
+ };
+ struct nvkm_gr_func *func;
+ struct r535_gr *gr;
+
+ func = kzalloc(struct_size(func, sclass, ARRAY_SIZE(classes) + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = r535_gr_dtor;
+ func->oneinit = r535_gr_oneinit;
+ func->init = nvkm_rm_gr_init;
+ func->fini = nvkm_rm_gr_fini;
+ func->units = r535_gr_units;
+ func->chan_new = r535_gr_chan_new;
+
+ for (int i = 0; i < ARRAY_SIZE(classes); i++) {
+ func->sclass[i].oclass = classes[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_gr_obj_ctor;
+ }
+
+ gr = kzalloc(sizeof(*gr), GFP_KERNEL);
+ if (!gr) {
+ kfree(func);
+ return -ENOMEM;
+ }
+
+ nvkm_gr_ctor(func, rm->device, NVKM_ENGINE_GR, 0, true, &gr->base);
+ gr->scrubber.chid = -1;
+ rm->device->gr = &gr->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
new file mode 100644
index 000000000000..24980f23aab9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_RM_GR_H__
+#define __NVKM_RM_GR_H__
+#include "engine.h"
+
+#include <core/object.h>
+#include <engine/gr.h>
+
+#define R515_GR_MAX_CTXBUFS 9
+
+struct r535_gr_chan {
+ struct nvkm_object object;
+ struct r535_gr *gr;
+
+ struct nvkm_vmm *vmm;
+ struct nvkm_chan *chan;
+
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr {
+ struct nvkm_gr base;
+
+ struct {
+ u16 bufferId;
+ u32 size;
+ u8 page;
+ u8 align;
+ bool global;
+ bool init;
+ bool ro;
+ } ctxbuf[R515_GR_MAX_CTXBUFS];
+ int ctxbuf_nr;
+
+ struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
+
+ struct {
+ int chid;
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_gsp_object threed;
+ struct {
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } ctxbuf;
+ bool enabled;
+ } scrubber;
+};
+
+struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+void r535_gr_get_ctxbuf_info(struct r535_gr *, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
new file mode 100644
index 000000000000..3bdb5ad320d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_HANDLES_H__
+#define __NVKM_RM_HANDLES_H__
+
+/* RMAPI handles for various objects allocated from GSP-RM with RM_ALLOC. */
+
+#define NVKM_RM_CLIENT(id) (0xc1d00000 | (id))
+#define NVKM_RM_CLIENT_MASK 0x0000ffff
+#define NVKM_RM_DEVICE 0xde1d0000
+#define NVKM_RM_SUBDEVICE 0x5d1d0000
+#define NVKM_RM_DISP 0x00730000
+#define NVKM_RM_VASPACE 0x90f10000
+#define NVKM_RM_CHAN(chid) (0xf1f00000 | (chid))
+#define NVKM_RM_THREED 0x97000000
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
new file mode 100644
index 000000000000..d9fbfc377864
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvdec.h>
+
+static void *
+nvkm_rm_nvdec_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvdec, engine);
+}
+
+int
+nvkm_rm_nvdec_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvdec *nvdec;
+ int ret;
+
+ nvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL);
+ if (!nvdec)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvdec_dtor, rm, NVKM_ENGINE_NVDEC, inst,
+ &rm->gpu->nvdec.class, 1, &nvdec->engine);
+ if (ret) {
+ kfree(nvdec);
+ return ret;
+ }
+
+ rm->device->nvdec[inst] = nvdec;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
new file mode 100644
index 000000000000..6dfa7b789e07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvenc.h>
+
+static void *
+nvkm_rm_nvenc_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvenc, engine);
+}
+
+int
+nvkm_rm_nvenc_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvenc *nvenc;
+ int ret;
+
+ nvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL);
+ if (!nvenc)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvenc_dtor, rm, NVKM_ENGINE_NVENC, inst,
+ &rm->gpu->nvenc.class, 1, &nvenc->engine);
+ if (ret) {
+ kfree(nvenc);
+ return ret;
+ }
+
+ rm->device->nvenc[inst] = nvenc;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
new file mode 100644
index 000000000000..a5f6b2abfd33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/device.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/bar.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/vmm.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/disp.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ce.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvenc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvjpg.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
new file mode 100644
index 000000000000..46e3a29f2ad7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/alloc.h"
+#include "nvrm/rpcfn.h"
+
+static int
+r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_free_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
+ client->object.handle, object->handle);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->params.hRoot = client->object.handle;
+ rpc->params.hObjectParent = 0;
+ rpc->params.hObjectOld = object->handle;
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+}
+
+static void
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ void *ret = NULL;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc));
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->status) {
+ ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
+ if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
+ u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_alloc_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
+ client->object.handle, object->parent->handle,
+ object->handle);
+
+ nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
+ params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hParent = object->parent->handle;
+ rpc->hObject = object->handle;
+ rpc->hClass = oclass;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_alloc
+r535_alloc = {
+ .get = r535_gsp_rpc_rm_alloc_get,
+ .push = r535_gsp_rpc_rm_alloc_push,
+ .done = r535_gsp_rpc_rm_alloc_done,
+ .free = r535_gsp_rpc_rm_free,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
index 3a30bea30e36..d06bf95b9a4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
@@ -19,7 +19,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "gf100.h"
+#include <subdev/bar/gf100.h>
#include <core/mm.h>
#include <subdev/fb.h>
@@ -27,14 +27,20 @@
#include <subdev/instmem.h>
#include <subdev/mmu/vmm.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+#include "nvrm/bar.h"
+#include "nvrm/rpcfn.h"
static void
r535_bar_flush(struct nvkm_bar *bar)
{
+ /* Use NV_PFLUSH in resume path - needed on R570 to flush writes before
+ * BAR2 page tables have been restored.
+ */
+ if (unlikely(!bar->bar2)) {
+ g84_bar_flush(bar);
+ return;
+ }
+
ioread32_native(bar->flushBAR2);
}
@@ -44,7 +50,7 @@ r535_bar_bar2_wait(struct nvkm_bar *base)
}
static int
-r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
+r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u8 page_shift, u64 pdbe)
{
rpc_update_bar_pde_v15_00 *rpc;
@@ -53,21 +59,22 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
return -EIO;
rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
- rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
- rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+ rpc->info.entryValue = pdbe;
+ rpc->info.entryLevelShift = page_shift;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
static void
r535_bar_bar2_fini(struct nvkm_bar *bar)
{
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
struct nvkm_gsp *gsp = bar->subdev.device->gsp;
bar->flushBAR2 = bar->flushBAR2PhysMode;
nvkm_done(bar->flushFBZero);
- WARN_ON(r535_bar_bar2_update_pde(gsp, 0));
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, 0));
}
static void
@@ -76,8 +83,18 @@ r535_bar_bar2_init(struct nvkm_bar *bar)
struct nvkm_device *device = bar->subdev.device;
struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
struct nvkm_gsp *gsp = device->gsp;
-
- WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr));
+ struct nvkm_memory *pdb = vmm->pd->pt[0]->memory;
+ u32 pdb_offset = vmm->pd->pt[0]->base;
+ u32 pdbe_lo, pdbe_hi;
+ u64 pdbe;
+
+ nvkm_kmap(pdb);
+ pdbe_lo = nvkm_ro32(pdb, pdb_offset + 0);
+ pdbe_hi = nvkm_ro32(pdb, pdb_offset + 4);
+ pdbe = ((u64)pdbe_hi << 32) | pdbe_lo;
+ nvkm_done(pdb);
+
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, pdbe));
vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
if (!bar->flushFBZero) {
@@ -174,7 +191,7 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
}
*pbar = bar;
- bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
+ bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE);
if (!bar->flushBAR2PhysMode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
new file mode 100644
index 000000000000..2d1ce9db2dcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ce.h"
+#include "nvrm/engine.h"
+
+static int
+r535_ce_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ce)
+{
+ NVC0B5_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ce);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->version = 1;
+ args->engineType = NV2080_ENGINE_TYPE_COPY0 + inst;
+
+ return nvkm_gsp_rm_alloc_wr(ce, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_ce = {
+ .alloc = r535_ce_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
index 1b4619ff9e8e..ec71f683e609 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
@@ -19,26 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/rm.h>
-#include <subdev/gsp.h>
+#include "nvrm/client.h"
-#include <nvif/class.h>
+static int
+r535_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
-static const struct nvkm_engine_func
-ad102_nvenc = {
- .sclass = {
- { -1, -1, NVC9B7_VIDEO_ENCODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
-int
-ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvenc **pnvenc)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc);
+ args->hClient = client->object.handle;
+ args->processID = ~0;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
}
+
+const struct nvkm_rm_api_client
+r535_client = {
+ .ctor = r535_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
new file mode 100644
index 000000000000..70b9ee911c5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/ctrl.h"
+#include "nvrm/rpcfn.h"
+
+static void
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ if (!params)
+ return;
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static int
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ int ret = 0;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc);
+ if (IS_ERR_OR_NULL(rpc)) {
+ *params = NULL;
+ return PTR_ERR(rpc);
+ }
+
+ if (rpc->status) {
+ ret = r535_rpc_status_to_errno(rpc->status);
+ if (ret != -EAGAIN && ret != -EBUSY)
+ nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+ object->client->object.handle, object->handle, rpc->cmd, rpc->status);
+ }
+
+ if (repc)
+ *params = rpc->params;
+ else
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_control_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
+ client->object.handle, object->handle, cmd, params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hObject = object->handle;
+ rpc->cmd = cmd;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_ctrl
+r535_ctrl = {
+ .get = r535_gsp_rpc_rm_ctrl_get,
+ .push = r535_gsp_rpc_rm_ctrl_push,
+ .done = r535_gsp_rpc_rm_ctrl_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
new file mode 100644
index 000000000000..f830e12a8f6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/device.h"
+#include "nvrm/event.h"
+
+static void
+r535_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+
+ mutex_lock(&gsp->client_id.mutex);
+ if (event->func) {
+ list_del(&event->head);
+ event->func = NULL;
+ }
+ mutex_unlock(&gsp->client_id.mutex);
+
+ nvkm_gsp_rm_free(&event->object);
+ event->device = NULL;
+}
+
+static int
+r535_gsp_device_event_get(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
+ NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->event = event->id;
+ ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
+ return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
+}
+
+static int
+r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+ nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+ NV0005_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
+ NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
+ &event->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hParentClient = client->object.handle;
+ args->hSrcResource = 0;
+ args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
+ args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
+ args->data = NULL;
+
+ ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
+ if (ret)
+ return ret;
+
+ event->device = device;
+ event->id = id;
+
+ ret = r535_gsp_device_event_get(event);
+ if (ret) {
+ nvkm_gsp_event_dtor(event);
+ return ret;
+ }
+
+ mutex_lock(&gsp->client_id.mutex);
+ event->func = func;
+ list_add(&event->head, &client->events);
+ mutex_unlock(&gsp->client_id.mutex);
+ return 0;
+}
+
+static void
+r535_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+ nvkm_gsp_rm_free(&device->subdevice);
+ nvkm_gsp_rm_free(&device->object);
+}
+
+static int
+r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
+{
+ NV2080_ALLOC_PARAMETERS *args;
+
+ return nvkm_gsp_rm_alloc(&device->object, NVKM_RM_SUBDEVICE, NV20_SUBDEVICE_0,
+ sizeof(*args), &device->subdevice);
+}
+
+static int
+r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+ NV0080_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, NVKM_RM_DEVICE, NV01_DEVICE_0, sizeof(*args),
+ &device->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClientShare = client->object.handle;
+
+ ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_subdevice_ctor(device);
+ if (ret)
+ nvkm_gsp_rm_free(&device->object);
+
+ return ret;
+}
+
+const struct nvkm_rm_api_device
+r535_device = {
+ .ctor = r535_gsp_device_ctor,
+ .dtor = r535_gsp_device_dtor,
+ .event.ctor = r535_gsp_device_event_ctor,
+ .event.dtor = r535_gsp_event_dtor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
index 99110ab2f44d..7e9e2d3564da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
@@ -19,13 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-#include "chan.h"
-#include "conn.h"
-#include "dp.h"
-#include "head.h"
-#include "ior.h"
-#include "outp.h"
+#include <engine/disp/priv.h>
+#include <engine/disp/chan.h>
+#include <engine/disp/conn.h>
+#include <engine/disp/dp.h>
+#include <engine/disp/head.h>
+#include <engine/disp/ior.h>
+#include <engine/disp/outp.h>
#include <core/ramht.h>
#include <subdev/bios.h>
@@ -34,19 +34,11 @@
#include <subdev/mmu.h>
#include <subdev/vfn.h>
+#include <rm/gpu.h>
+
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h>
+#include "nvrm/disp.h"
#include <linux/acpi.h>
@@ -78,9 +70,9 @@ r535_chan_fini(struct nvkm_disp_chan *chan)
}
static int
-r535_chan_push(struct nvkm_disp_chan *chan)
+r535_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
{
- struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp;
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
@@ -89,8 +81,8 @@ r535_chan_push(struct nvkm_disp_chan *chan)
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- if (chan->memory) {
- switch (nvkm_memory_target(chan->memory)) {
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
case NVKM_MEM_TARGET_NCOH:
ctrl->addressSpace = ADDR_SYSMEM;
ctrl->cacheSnoop = 0;
@@ -107,13 +99,13 @@ r535_chan_push(struct nvkm_disp_chan *chan)
return -EINVAL;
}
- ctrl->physicalAddr = nvkm_memory_addr(chan->memory);
- ctrl->limit = nvkm_memory_size(chan->memory) - 1;
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
}
- ctrl->hclass = chan->object.oclass;
- ctrl->channelInstance = chan->head;
- ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0;
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
}
@@ -121,10 +113,11 @@ r535_chan_push(struct nvkm_disp_chan *chan)
static int
r535_curs_init(struct nvkm_disp_chan *chan)
{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
int ret;
- ret = r535_chan_push(chan);
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, NULL);
if (ret)
return ret;
@@ -172,25 +165,34 @@ r535_dmac_fini(struct nvkm_disp_chan *chan)
}
static int
-r535_dmac_init(struct nvkm_disp_chan *chan)
+r535_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
{
NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
- int ret;
-
- ret = r535_chan_push(chan);
- if (ret)
- return ret;
- args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
- (chan->object.oclass << 16) | chan->head,
- chan->object.oclass, sizeof(*args), &chan->rm.object);
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
if (IS_ERR(args))
return PTR_ERR(args);
- args->channelInstance = chan->head;
- args->offset = chan->suspend_put;
+ args->channelInstance = inst;
+ args->offset = put_offset;
- return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r535_dmac_init(struct nvkm_disp_chan *chan)
+{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, chan->memory);
+ if (ret)
+ return ret;
+
+ return rmapi->disp->chan.dmac_alloc(chan->disp, chan->object.oclass, chan->head,
+ chan->suspend_put, &chan->rm.object);
}
static int
@@ -260,47 +262,47 @@ r535_core = {
};
static int
-r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
+r535_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
{
- struct nvkm_disp *disp = sor->disp;
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
- sizeof(*ctrl));
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl->displayId = BIT(sor->asy.outp->index);
- ctrl->brightness = lvl;
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
- return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
}
static int
-r535_sor_bl_get(struct nvkm_ior *sor)
+r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
{
struct nvkm_disp *disp = sor->disp;
- NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
- int ret, lvl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
- sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
- ctrl->displayId = BIT(sor->asy.outp->index);
+ return rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, true, &lvl);
+}
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
- }
+static int
+r535_sor_bl_get(struct nvkm_ior *sor)
+{
+ struct nvkm_disp *disp = sor->disp;
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
+ int lvl, ret = rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, false, &lvl);
- lvl = ctrl->brightness;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return lvl;
+ return (ret == 0) ? lvl : ret;
}
static const struct nvkm_ior_func_bl
@@ -730,7 +732,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
}
static int
-r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
+r535_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
{
NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
int ret;
@@ -763,7 +765,9 @@ r535_outp_inherit(struct nvkm_outp *outp)
int ret;
list_for_each_entry(head, &disp->heads, head) {
- ret = r535_disp_head_displayid(disp, head->id, &displayid);
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
+
+ ret = rmapi->disp->get_active(disp, head->id, &displayid);
if (WARN_ON(ret))
return NULL;
@@ -858,10 +862,9 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
}
static int
-r535_outp_detect(struct nvkm_outp *outp)
+r535_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
{
NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
- struct nvkm_disp *disp = outp->disp;
int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
@@ -870,23 +873,29 @@ r535_outp_detect(struct nvkm_outp *outp)
return PTR_ERR(ctrl);
ctrl->subDeviceInstance = 0;
- ctrl->displayMask = BIT(outp->index);
+ ctrl->displayMask = BIT(display_id);
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
- }
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
- if (ctrl->displayMask & BIT(outp->index)) {
+static int
+r535_outp_detect(struct nvkm_outp *outp)
+{
+ const struct nvkm_rm_api *rmapi = outp->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->get_connect_state(outp->disp, outp->index);
+ if (ret == 1) {
ret = r535_outp_dfp_get_info(outp);
if (ret == 0)
ret = 1;
- } else {
- ret = 0;
}
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
@@ -1029,15 +1038,11 @@ r535_dp_train(struct nvkm_outp *outp, bool retrain)
}
static int
-r535_dp_rates(struct nvkm_outp *outp)
+r535_dp_set_indexed_link_rates(struct nvkm_outp *outp)
{
NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
- if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
- !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
- return 0;
-
if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
return -EINVAL;
@@ -1054,6 +1059,18 @@ r535_dp_rates(struct nvkm_outp *outp)
}
static int
+r535_dp_rates(struct nvkm_outp *outp)
+{
+ struct nvkm_rm *rm = outp->disp->rm.objcom.client->gsp->rm;
+
+ if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
+ !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
+ return 0;
+
+ return rm->api->disp->dp.set_indexed_link_rates(outp);
+}
+
+static int
r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
{
struct nvkm_disp *disp = outp->disp;
@@ -1151,6 +1168,49 @@ r535_dp = {
};
static int
+r535_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
{
NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
@@ -1194,6 +1254,7 @@ r535_tmds = {
static int
r535_outp_new(struct nvkm_disp *disp, u32 id)
{
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
enum nvkm_ior_proto proto;
struct dcb_output dcbE = {};
@@ -1278,43 +1339,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
if (ret)
return ret;
} else {
- NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
bool mst, wm;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- ctrl->sorIndex = ~0;
-
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ ret = rmapi->disp->dp.get_caps(disp, &dcbE.dpconf.link_bw, &mst, &wm);
+ if (ret)
return ret;
- }
-
- switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
- dcbE.dpconf.link_bw = 0x06;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
- dcbE.dpconf.link_bw = 0x0a;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
- dcbE.dpconf.link_bw = 0x14;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
- dcbE.dpconf.link_bw = 0x1e;
- break;
- default:
- dcbE.dpconf.link_bw = 0x00;
- break;
- }
-
- mst = ctrl->bIsMultistreamSupported;
- wm = ctrl->bHasIncreasedWatermarkLimits;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
if (WARN_ON(!dcbE.dpconf.link_bw))
return -EINVAL;
@@ -1441,11 +1470,47 @@ r535_disp_init(struct nvkm_disp *disp)
}
static int
+r535_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
r535_disp_oneinit(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_gsp *gsp = device->gsp;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
+ unsigned long mask;
int ret, i;
/* RAMIN. */
@@ -1476,24 +1541,14 @@ r535_disp_oneinit(struct nvkm_disp *disp)
if (ret)
return ret;
- ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0,
+ ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, NVKM_RM_DISP, NV04_DISPLAY_COMMON, 0,
&disp->rm.objcom);
if (ret)
return ret;
- {
- NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
- NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
- sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- disp->wndw.mask = ctrl->windowPresentMask;
- disp->wndw.nr = fls(disp->wndw.mask);
- nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
- }
+ ret = rmapi->disp->get_static_info(disp);
+ if (ret)
+ return ret;
/* */
{
@@ -1622,25 +1677,14 @@ r535_disp_oneinit(struct nvkm_disp *disp)
return ret;
}
- /* */
- {
- NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
- unsigned long mask;
- int i;
-
- ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
- NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- mask = ctrl->displayMask;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ ret = rmapi->disp->get_supported(disp, &mask);
+ if (ret)
+ return ret;
- for_each_set_bit(i, &mask, 32) {
- ret = r535_outp_new(disp, i);
- if (ret)
- return ret;
- }
+ for_each_set_bit(i, &mask, 32) {
+ ret = r535_outp_new(disp, i);
+ if (ret)
+ return ret;
}
ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
@@ -1686,6 +1730,7 @@ int
r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_disp_func *rm;
int ret;
@@ -1701,20 +1746,26 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
rm->sor.new = r535_sor_new;
rm->ramht_size = hw->ramht_size;
- rm->root = hw->root;
+ rm->root.oclass = gpu->disp.class.root;
- for (int i = 0; hw->user[i].ctor; i++) {
- switch (hw->user[i].base.oclass & 0xff) {
- case 0x73: rm->user[i] = hw->user[i]; break;
- case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break;
- case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break;
- case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break;
- case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break;
- default:
- WARN_ON(1);
- continue;
- }
- }
+ rm->user[0].base.oclass = gpu->disp.class.caps;
+ rm->user[0].ctor = gv100_disp_caps_new;
+
+ rm->user[1].base.oclass = gpu->disp.class.core;
+ rm->user[1].ctor = nvkm_disp_core_new;
+ rm->user[1].chan = &r535_core;
+
+ rm->user[2].base.oclass = gpu->disp.class.wndw;
+ rm->user[2].ctor = nvkm_disp_wndw_new;
+ rm->user[2].chan = &r535_wndw;
+
+ rm->user[3].base.oclass = gpu->disp.class.wimm;
+ rm->user[3].ctor = nvkm_disp_wndw_new;
+ rm->user[3].chan = &r535_wimm;
+
+ rm->user[4].base.oclass = gpu->disp.class.curs;
+ rm->user[4].ctor = nvkm_disp_chan_new;
+ rm->user[4].chan = &r535_curs;
ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
if (ret)
@@ -1723,3 +1774,20 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
mutex_init(&(*pdisp)->super.mutex); //XXX
return ret;
}
+
+const struct nvkm_rm_api_disp
+r535_disp = {
+ .get_static_info = r535_disp_get_static_info,
+ .get_supported = r535_disp_get_supported,
+ .get_connect_state = r535_disp_get_connect_state,
+ .get_active = r535_disp_get_active,
+ .bl_ctrl = r535_bl_ctrl,
+ .dp = {
+ .get_caps = r535_dp_get_caps,
+ .set_indexed_link_rates = r535_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r535_disp_chan_set_pushbuf,
+ .dmac_alloc = r535_dmac_alloc,
+ }
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
index 5f3c9c02a4c0..150e22fde2ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
@@ -19,19 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-
+#include <subdev/instmem/priv.h>
#include <subdev/gsp.h>
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include "nvrm/fbsr.h"
+#include "nvrm/rpcfn.h"
struct fbsr_item {
const char *type;
@@ -54,9 +48,9 @@ struct fbsr {
u64 sys_offset;
};
-static int
-fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
- u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
+int
+r535_fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
+ u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
{
struct nvkm_gsp_client *client = device->object.client;
struct nvkm_gsp *gsp = client->gsp;
@@ -105,7 +99,7 @@ fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target
rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
}
- ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
+ ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
if (ret)
return ret;
@@ -123,8 +117,8 @@ fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
struct nvkm_gsp_object memlist;
int ret;
- ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
- item->addr, item->size, NULL, &memlist);
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
+ item->addr, item->size, NULL, &memlist);
if (ret)
return ret;
@@ -161,8 +155,8 @@ fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
struct nvkm_gsp_object memlist;
int ret;
- ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
- 0, fbsr->size, sgt, &memlist);
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
+ 0, fbsr->size, sgt, &memlist);
if (ret)
return ret;
@@ -206,22 +200,19 @@ fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
}
-static void
-r535_instmem_resume(struct nvkm_instmem *imem)
+void
+r535_fbsr_resume(struct nvkm_gsp *gsp)
{
/* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
- if (imem->rm.fbsr_valid) {
- nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
- imem->rm.fbsr_valid = false;
- }
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.fbsr);
}
static int
-r535_instmem_suspend(struct nvkm_instmem *imem)
+r535_fbsr_suspend(struct nvkm_gsp *gsp)
{
- struct nvkm_subdev *subdev = &imem->subdev;
+ struct nvkm_subdev *subdev = &gsp->subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_instmem *imem = device->imem;
struct nvkm_instobj *iobj;
struct fbsr fbsr = {};
struct fbsr_item *item, *temp;
@@ -262,7 +253,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
fbsr.size += gsp->fb.bios.vga_workspace.size;
nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
- ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
+ ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &gsp->sr.fbsr);
if (ret)
goto done;
@@ -271,7 +262,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
if (ret)
goto done_sgt;
- ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
+ ret = fbsr_init(&fbsr, &gsp->sr.fbsr, items_size);
if (WARN_ON(ret))
goto done_sgt;
@@ -282,12 +273,10 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
goto done_sgt;
}
- imem->rm.fbsr_valid = true;
-
/* Cleanup everything except the sysmem backup, which will be removed after resume. */
done_sgt:
if (ret) /* ... unless we failed already. */
- nvkm_gsp_sg_free(device, &imem->rm.fbsr);
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
done:
list_for_each_entry_safe(item, temp, &fbsr.items, head) {
list_del(&item->head);
@@ -299,6 +288,12 @@ done:
return ret;
}
+const struct nvkm_rm_api_fbsr
+r535_fbsr = {
+ .suspend = r535_fbsr_suspend,
+ .resume = r535_fbsr_resume,
+};
+
static void *
r535_instmem_dtor(struct nvkm_instmem *imem)
{
@@ -319,11 +314,10 @@ r535_instmem_new(const struct nvkm_instmem_func *hw,
rm->dtor = r535_instmem_dtor;
rm->fini = hw->fini;
- rm->suspend = r535_instmem_suspend;
- rm->resume = r535_instmem_resume;
rm->memory_new = hw->memory_new;
rm->memory_wrap = hw->memory_wrap;
rm->zero = false;
+ rm->set_bar0_window_addr = hw->set_bar0_window_addr;
ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
index 3454c7d29502..1ac5628c5140 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
@@ -19,11 +19,11 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-#include "cgrp.h"
-#include "chan.h"
-#include "chid.h"
-#include "runl.h"
+#include <engine/fifo/priv.h>
+#include <engine/fifo/cgrp.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/chid.h>
+#include <engine/fifo/runl.h>
#include <core/gpuobj.h>
#include <subdev/gsp.h>
@@ -31,24 +31,19 @@
#include <subdev/vfn.h>
#include <engine/gr.h>
+#include <rm/engine.h>
+
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
static u32
r535_chan_doorbell_handle(struct nvkm_chan *chan)
{
- return (chan->cgrp->runl->id << 16) | chan->id;
+ struct nvkm_gsp *gsp = chan->rm.object.client->gsp;
+
+ return gsp->rm->gpu->fifo.chan.doorbell_handle(chan);
}
static void
@@ -77,50 +72,29 @@ r535_chan_ramfc_clear(struct nvkm_chan *chan)
#define CHID_PER_USERD 8
static int
-r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+r535_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
{
- struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
- struct nvkm_engn *engn;
- struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
- const int userd_p = chan->id / CHID_PER_USERD;
- const int userd_i = chan->id % CHID_PER_USERD;
- u32 eT = ~0;
- int ret;
-
- if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
- ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
- if (ret)
- return ret;
- }
-
- nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
- eT = engn->id;
- break;
- }
-
- if (WARN_ON(eT == ~0))
- return -EINVAL;
- chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
- fifo->rm.mthdbuf_size,
- &chan->rm.mthdbuf.addr, GFP_KERNEL);
- if (!chan->rm.mthdbuf.ptr)
- return -ENOMEM;
-
- args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
- fifo->func->chan.user.oclass, sizeof(*args),
- &chan->rm.object);
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
if (WARN_ON(IS_ERR(args)))
return PTR_ERR(args);
- args->gpFifoOffset = offset;
- args->gpFifoEntries = length / 8;
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
- args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
if (!priv)
args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
else
@@ -143,25 +117,25 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
- args->hVASpace = chan->vmm->rm.object.handle;
- args->engineType = eT;
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
- args->instanceMem.base = chan->inst->addr;
- args->instanceMem.size = chan->inst->size;
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
args->instanceMem.addressSpace = 2;
args->instanceMem.cacheAttrib = 1;
- args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+ args->userdMem.base = userd_addr;
args->userdMem.size = fifo->func->chan.func->userd->size;
args->userdMem.addressSpace = 2;
args->userdMem.cacheAttrib = 1;
- args->ramfcMem.base = chan->inst->addr + 0;
+ args->ramfcMem.base = inst_addr;
args->ramfcMem.size = 0x200;
args->ramfcMem.addressSpace = 2;
args->ramfcMem.cacheAttrib = 1;
- args->mthdbufMem.base = chan->rm.mthdbuf.addr;
+ args->mthdbufMem.base = mthdbuf_addr;
args->mthdbufMem.size = fifo->rm.mthdbuf_size;
args->mthdbufMem.addressSpace = 1;
args->mthdbufMem.cacheAttrib = 0;
@@ -173,7 +147,44 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
- ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_engn *engn;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ const struct nvkm_rm_api *rmapi = device->gsp->rm->api;
+ u32 eT = ~0;
+ int ret;
+
+ if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
+ ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
+ if (ret)
+ return ret;
+ }
+
+ nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+ eT = engn->id;
+ break;
+ }
+
+ if (WARN_ON(eT == ~0))
+ return -EINVAL;
+
+ chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
+ fifo->rm.mthdbuf_size,
+ &chan->rm.mthdbuf.addr, GFP_KERNEL);
+ if (!chan->rm.mthdbuf.ptr)
+ return -ENOMEM;
+
+ ret = rmapi->fifo->chan.alloc(&chan->vmm->rm.device, NVKM_RM_CHAN(chan->id),
+ eT, chan->runq, priv, chan->id, chan->inst->addr,
+ nvkm_memory_addr(chan->userd.mem) + chan->userd.base,
+ chan->rm.mthdbuf.addr, chan->vmm, offset, length,
+ &chan->rm.object);
if (ret)
return ret;
@@ -215,123 +226,8 @@ r535_chan_ramfc = {
.priv = true,
};
-struct r535_chan_userd {
- struct nvkm_memory *mem;
- struct nvkm_memory *map;
- int chid;
- u32 used;
-
- struct list_head head;
-} *userd;
-
-static void
-r535_chan_id_put(struct nvkm_chan *chan)
-{
- struct nvkm_runl *runl = chan->cgrp->runl;
- struct nvkm_fifo *fifo = runl->fifo;
- struct r535_chan_userd *userd;
-
- mutex_lock(&fifo->userd.mutex);
- list_for_each_entry(userd, &fifo->userd.list, head) {
- if (userd->map == chan->userd.mem) {
- u32 chid = chan->userd.base / chan->func->userd->size;
-
- userd->used &= ~BIT(chid);
- if (!userd->used) {
- nvkm_memory_unref(&userd->map);
- nvkm_memory_unref(&userd->mem);
- nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
- list_del(&userd->head);
- kfree(userd);
- }
-
- break;
- }
- }
- mutex_unlock(&fifo->userd.mutex);
-
-}
-
-static int
-r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
-{
- const u32 userd_size = CHID_PER_USERD * chan->func->userd->size;
- struct nvkm_runl *runl = chan->cgrp->runl;
- struct nvkm_fifo *fifo = runl->fifo;
- struct r535_chan_userd *userd;
- u32 chid;
- int ret;
-
- if (ouserd + chan->func->userd->size >= userd_size ||
- (ouserd & (chan->func->userd->size - 1))) {
- RUNL_DEBUG(runl, "ouserd %llx", ouserd);
- return -EINVAL;
- }
-
- chid = div_u64(ouserd, chan->func->userd->size);
-
- list_for_each_entry(userd, &fifo->userd.list, head) {
- if (userd->mem == muserd) {
- if (userd->used & BIT(chid))
- return -EBUSY;
- break;
- }
- }
-
- if (&userd->head == &fifo->userd.list) {
- if (nvkm_memory_size(muserd) < userd_size) {
- RUNL_DEBUG(runl, "userd too small");
- return -EINVAL;
- }
-
- userd = kzalloc(sizeof(*userd), GFP_KERNEL);
- if (!userd)
- return -ENOMEM;
-
- userd->chid = nvkm_chid_get(runl->chid, chan);
- if (userd->chid < 0) {
- ret = userd->chid;
- kfree(userd);
- return ret;
- }
-
- userd->mem = nvkm_memory_ref(muserd);
-
- ret = nvkm_memory_kmap(userd->mem, &userd->map);
- if (ret) {
- nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
- kfree(userd);
- return ret;
- }
-
-
- list_add(&userd->head, &fifo->userd.list);
- }
-
- userd->used |= BIT(chid);
-
- chan->userd.mem = nvkm_memory_ref(userd->map);
- chan->userd.base = ouserd;
-
- return (userd->chid * CHID_PER_USERD) + chid;
-}
-
-static int
-r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
-{
- struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
- int ret;
-
- mutex_lock(&fifo->userd.mutex);
- ret = r535_chan_id_get_locked(chan, muserd, ouserd);
- mutex_unlock(&fifo->userd.mutex);
- return ret;
-}
-
static const struct nvkm_chan_func
r535_chan = {
- .id_get = r535_chan_id_get,
- .id_put = r535_chan_id_put,
.inst = &gf100_chan_inst,
.userd = &gv100_chan_userd,
.ramfc = &r535_chan_ramfc,
@@ -340,10 +236,6 @@ r535_chan = {
.doorbell_handle = r535_chan_doorbell_handle,
};
-static const struct nvkm_cgrp_func
-r535_cgrp = {
-};
-
static int
r535_engn_nonstall(struct nvkm_engn *engn)
{
@@ -356,7 +248,7 @@ r535_engn_nonstall(struct nvkm_engn *engn)
}
static const struct nvkm_engn_func
-r535_ce = {
+r535_engn_ce = {
.nonstall = r535_engn_nonstall,
};
@@ -376,7 +268,7 @@ r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *c
}
static const struct nvkm_engn_func
-r535_gr = {
+r535_engn_gr = {
.nonstall = r535_engn_nonstall,
.ctor2 = r535_gr_ctor,
};
@@ -449,57 +341,86 @@ r535_runl = {
.allow = r535_runl_allow,
};
-static int
-r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
+void
+r535_fifo_rc_chid(struct nvkm_fifo *fifo, int chid)
{
- switch (type) {
- case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
- case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
- case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
- case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
- case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
- case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
- case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
- case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
- default:
- break;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
+ if (!chan) {
+ nvkm_error(&fifo->engine.subdev, "rc: chid %d not found!\n", chid);
+ return;
}
- WARN_ON(1);
- return -EINVAL;
+ nvkm_chan_error(chan, false);
+ nvkm_chan_put(&chan, flags);
}
static int
-r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
+r535_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc: engn:%08x chid:%d type:%d scope:%d part:%d\n",
+ msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
+ msg->partitionAttributionId);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r535_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
switch (rm) {
- case RM_ENGINE_TYPE_GR0:
- *ptype = NVKM_ENGINE_GR;
- return 0;
- case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
- *ptype = NVKM_ENGINE_CE;
- return rm - RM_ENGINE_TYPE_COPY0;
- case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
- *ptype = NVKM_ENGINE_NVDEC;
- return rm - RM_ENGINE_TYPE_NVDEC0;
- case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
- *ptype = NVKM_ENGINE_NVENC;
- return rm - RM_ENGINE_TYPE_NVENC0;
- case RM_ENGINE_TYPE_SW:
- *ptype = NVKM_ENGINE_SW;
- return 0;
- case RM_ENGINE_TYPE_SEC2:
- *ptype = NVKM_ENGINE_SEC2;
- return 0;
- case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
- *ptype = NVKM_ENGINE_NVJPG;
- return rm - RM_ENGINE_TYPE_NVJPEG0;
- case RM_ENGINE_TYPE_OFA:
- *ptype = NVKM_ENGINE_OFA;
- return 0;
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA, OFA, 0);
default:
return -EINVAL;
}
+#undef RM_ENGINE_TYPE
}
static int
@@ -536,16 +457,19 @@ static int
r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
- struct nvkm_gsp *gsp = subdev->device->gsp;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
struct nvkm_runl *runl;
struct nvkm_engn *engn;
- u32 cgids = 2048;
u32 chids = 2048;
+ u32 first = rm->api->fifo->rsvd_chids;
+ u32 count = chids - first;
int ret;
NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
- if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
- (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
+ if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->cgid)) ||
+ (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->chid)))
return ret;
ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
@@ -576,25 +500,43 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
if (!runl)
continue;
- inst = r535_fifo_engn_type(rmid, &type);
+ inst = rm->api->fifo->xlat_rm_engine_type(rmid, &type, &nv2080);
if (inst < 0) {
nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
nvkm_runl_del(runl);
continue;
}
- nv2080 = r535_fifo_2080_type(type, inst);
- if (nv2080 < 0) {
+ /* Skip SW engine - there's currently no support for NV SW classes. */
+ if (type == NVKM_ENGINE_SW)
+ continue;
+
+ /* Skip lone GRCEs (ones not paired with GR on a runlist), as they
+ * don't appear to function as async copy engines.
+ */
+ if (type == NVKM_ENGINE_CE &&
+ rm->gpu->ce.grce_mask &&
+ (rm->gpu->ce.grce_mask(device) & BIT(inst)) &&
+ !nvkm_runl_find_engn(engn, runl, engn->engine->subdev.type == NVKM_ENGINE_GR)) {
+ RUNL_DEBUG(runl, "skip LCE %d - GRCE without GR", inst);
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ ret = nvkm_rm_engine_new(gsp->rm, type, inst);
+ if (ret) {
nvkm_runl_del(runl);
continue;
}
+ engn = NULL;
+
switch (type) {
case NVKM_ENGINE_CE:
- engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_ce, type, inst);
break;
case NVKM_ENGINE_GR:
- engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_gr, type, inst);
break;
case NVKM_ENGINE_NVDEC:
case NVKM_ENGINE_NVENC:
@@ -633,7 +575,7 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
}
- return r535_fifo_ectx_size(fifo);
+ return rm->api->fifo->ectx_size(fifo);
}
static void
@@ -646,6 +588,7 @@ int
r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_fifo_func *rm;
if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
@@ -654,12 +597,20 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
rm->dtor = r535_fifo_dtor;
rm->runl_ctor = r535_fifo_runl_ctor;
rm->runl = &r535_runl;
- rm->cgrp = hw->cgrp;
- rm->cgrp.func = &r535_cgrp;
- rm->chan = hw->chan;
+ rm->chan.user.oclass = gpu->fifo.chan.class;
rm->chan.func = &r535_chan;
rm->nonstall = &ga100_fifo_nonstall;
rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
return nvkm_fifo_new_(rm, device, type, inst, pfifo);
}
+
+const struct nvkm_rm_api_fifo
+r535_fifo = {
+ .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type,
+ .ectx_size = r535_fifo_ectx_size,
+ .rc_triggered = r535_fifo_rc_triggered,
+ .chan = {
+ .alloc = r535_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
new file mode 100644
index 000000000000..ddb57d5e73d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/gr.h>
+
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+#include <engine/fifo/priv.h>
+#include <engine/gr/priv.h>
+
+#include <nvif/if900d.h>
+
+#include <nvhw/drf.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/vmm.h"
+
+#define r535_gr(p) container_of((p), struct r535_gr, base)
+
+static void *
+r535_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
+ struct r535_gr *gr = grc->gr;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(grc->vmm, &grc->vma[i]);
+ nvkm_memory_unref(&grc->mem[i]);
+ }
+
+ nvkm_vmm_unref(&grc->vmm);
+ return grc;
+}
+
+static const struct nvkm_object_func
+r535_gr_chan = {
+ .dtor = r535_gr_chan_dtor,
+};
+
+int
+r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
+ struct nvkm_memory **pmem, struct nvkm_vma **pvma,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->engineType = 1;
+ ctrl->hChanClient = vmm->rm.client.object.handle;
+ ctrl->hObject = chan->handle;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
+ &ctrl->promoteEntry[ctrl->entryCount];
+ const bool alloc = golden || !gr->ctxbuf[i].global;
+ int ret;
+
+ entry->bufferId = gr->ctxbuf[i].bufferId;
+ entry->bInitialize = gr->ctxbuf[i].init && alloc;
+
+ if (alloc) {
+ ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
+ NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
+ gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
+ gr->ctxbuf[i].init, &pmem[i]);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
+ entry->bNonmapped = 1;
+ } else {
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
+ continue;
+
+ pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
+ }
+
+ if (!entry->bNonmapped) {
+ struct gf100_vmm_map_v0 args = {
+ .priv = 1,
+ .ro = gr->ctxbuf[i].ro,
+ };
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
+ nvkm_memory_size(pmem[i]), &pvma[i]);
+ mutex_unlock(&vmm->mutex.vmm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
+ if (ret)
+ return ret;
+
+ entry->gpuVirtAddr = pvma[i]->addr;
+ }
+
+ if (entry->bInitialize) {
+ entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
+ entry->size = gr->ctxbuf[i].size;
+ entry->physAttr = 4;
+ }
+
+ nvkm_debug(subdev,
+ "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
+ entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
+ entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
+
+ ctrl->entryCount++;
+ }
+
+ return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
+}
+
+int
+r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr *gr = r535_gr(base);
+ struct r535_gr_chan *grc;
+ int ret;
+
+ if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
+ grc->gr = gr;
+ grc->vmm = nvkm_vmm_ref(chan->vmm);
+ grc->chan = chan;
+ *pobject = &grc->object;
+
+ ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+u64
+r535_gr_units(struct nvkm_gr *gr)
+{
+ struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
+
+ return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
+}
+
+void
+r535_gr_get_ctxbuf_info(struct r535_gr *gr, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *info)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ static const struct {
+ u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
+ u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
+ bool global;
+ bool init;
+ bool ro;
+ } map[] = {
+#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
+ .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
+ .global = (G), .init = (I), .ro = (R) }
+#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
+ /* global init ro */
+ _A( GRAPHICS, MAIN, false, true, false),
+ _B( PATCH, false, true, false),
+ _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
+ _B( PAGEPOOL, true, false, false),
+ _B( ATTRIBUTE_CB, true, false, false),
+ _B( RTV_CB_GLOBAL, true, false, false),
+ _B( FECS_EVENT, true, true, false),
+ _B( PRIV_ACCESS_MAP, true, true, true),
+#undef _B
+#undef _A
+ };
+ u32 size = info->size;
+ u8 align, page;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(map); id++) {
+ if (map[id].id0 == i)
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
+ size, (id < ARRAY_SIZE(map)) ? "*" : "");
+ if (id >= ARRAY_SIZE(map))
+ return;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
+ size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
+
+ if (size >= 1 << 21) page = 21;
+ else if (size >= 1 << 16) page = 16;
+ else page = 12;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
+ align = order_base_2(size);
+ else
+ align = page;
+
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
+ gr->ctxbuf[gr->ctxbuf_nr].size = size;
+ gr->ctxbuf[gr->ctxbuf_nr].page = page;
+ gr->ctxbuf[gr->ctxbuf_nr].align = align;
+ gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
+ gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
+ gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
+ gr->ctxbuf_nr++;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId =
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
+ gr->ctxbuf_nr++;
+ }
+}
+
+static int
+r535_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+int
+r535_gr_oneinit(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ struct {
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } golden = {};
+ struct nvkm_gsp_object threed;
+ int ret;
+
+ /* Allocate a channel to use for golden context init. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(golden.vmm, NVKM_RM_VASPACE, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0),
+ 1, 0, true, rm->api->fifo->rsvd_chids,
+ nvkm_memory_addr(golden.inst),
+ nvkm_memory_addr(golden.inst) + 0x1000,
+ nvkm_memory_addr(golden.inst) + 0x2000,
+ golden.vmm, 0, 0x1000, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Fetch context buffer info from RM and allocate each of them here to use
+ * during golden context init (or later as a global context buffer).
+ *
+ * Also build the information that'll be used to create channel contexts.
+ */
+ ret = rm->api->gr->get_ctxbufs_info(gr);
+ if (ret)
+ goto done;
+
+ /* Promote golden context to RM. */
+ ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Allocate 3D class on channel to trigger golden context init in RM. */
+ ret = nvkm_gsp_rm_alloc(&golden.chan, NVKM_RM_THREED, rm->gpu->gr.class.threed, 0, &threed);
+ if (ret)
+ goto done;
+
+ /* There's no need to keep the golden channel around, as RM caches the context. */
+ nvkm_gsp_rm_free(&threed);
+done:
+ nvkm_gsp_rm_free(&golden.chan);
+ for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
+ nvkm_vmm_put(golden.vmm, &golden.vma[i]);
+ nvkm_vmm_unref(&golden.vmm);
+ nvkm_memory_unref(&golden.inst);
+ return ret;
+
+}
+
+void *
+r535_gr_dtor(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = r535_gr(base);
+
+ while (gr->ctxbuf_nr)
+ nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
+
+ kfree(gr->base.func);
+ return gr;
+}
+
+const struct nvkm_rm_api_gr
+r535_gr = {
+ .get_ctxbufs_info = r535_gr_get_ctxbufs_info,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
index db2602e88006..baf42339f93e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
@@ -19,9 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <rm/rpc.h>
+
#include "priv.h"
#include <core/pci.h>
+#include <subdev/pci/priv.h>
#include <subdev/timer.h>
#include <subdev/vfn.h>
#include <engine/fifo/chan.h>
@@ -30,29 +33,11 @@
#include <nvfw/fw.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
-#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+#include "nvrm/event.h"
+#include "nvrm/fifo.h"
#include <linux/acpi.h>
#include <linux/ctype.h>
@@ -60,990 +45,6 @@
extern struct dentry *nouveau_debugfs_root;
-#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
-#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
-
-/**
- * DOC: GSP message queue element
- *
- * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
- *
- * The GSP command queue and status queue are message queues for the
- * communication between software and GSP. The software submits the GSP
- * RPC via the GSP command queue, GSP writes the status of the submitted
- * RPC in the status queue.
- *
- * A GSP message queue element consists of three parts:
- *
- * - message element header (struct r535_gsp_msg), which mostly maintains
- * the metadata for queuing the element.
- *
- * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
- * of the RPC. E.g., the RPC function number.
- *
- * - The payload, where the RPC message stays. E.g. the params of a
- * specific RPC function. Some RPC functions also have their headers
- * in the payload. E.g. rm_alloc, rm_control.
- *
- * The memory layout of a GSP message element can be illustrated below::
- *
- * +------------------------+
- * | Message Element Header |
- * | (r535_gsp_msg) |
- * | |
- * | (r535_gsp_msg.data) |
- * | | |
- * |----------V-------------|
- * | GSP RPC Header |
- * | (nvfw_gsp_rpc) |
- * | |
- * | (nvfw_gsp_rpc.data) |
- * | | |
- * |----------V-------------|
- * | Payload |
- * | |
- * | header(optional) |
- * | params |
- * +------------------------+
- *
- * The max size of a message queue element is 16 pages (including the
- * headers). When a GSP message to be sent is larger than 16 pages, the
- * message should be split into multiple elements and sent accordingly.
- *
- * In the bunch of the split elements, the first element has the expected
- * function number, while the rest of the elements are sent with the
- * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
- *
- * GSP consumes the elements from the cmdq and always writes the result
- * back to the msgq. The result is also formed as split elements.
- *
- * Terminology:
- *
- * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
- * payload)
- * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
- * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
- * - gsp_rpc_len: size of (GSP RPC header + payload)
- * - params_size: size of params in the payload
- * - payload_size: size of (header if exists + params) in the payload
- */
-
-struct r535_gsp_msg {
- u8 auth_tag_buffer[16];
- u8 aad_buffer[16];
- u32 checksum;
- u32 sequence;
- u32 elem_count;
- u32 pad;
- u8 data[];
-};
-
-struct nvfw_gsp_rpc {
- u32 header_version;
- u32 signature;
- u32 length;
- u32 function;
- u32 rpc_result;
- u32 rpc_result_private;
- u32 sequence;
- union {
- u32 spare;
- u32 cpuRmGfid;
- };
- u8 data[];
-};
-
-#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
-
-#define to_gsp_hdr(p, header) \
- container_of((void *)p, typeof(*header), data)
-
-#define to_payload_hdr(p, header) \
- container_of((void *)p, typeof(*header), params)
-
-static int
-r535_rpc_status_to_errno(uint32_t rpc_status)
-{
- switch (rpc_status) {
- case 0x55: /* NV_ERR_NOT_READY */
- case 0x66: /* NV_ERR_TIMEOUT_RETRY */
- return -EBUSY;
- case 0x51: /* NV_ERR_NO_MEMORY */
- return -ENOMEM;
- default:
- return -EINVAL;
- }
-}
-
-static int
-r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
-{
- u32 size, rptr = *gsp->msgq.rptr;
- int used;
-
- size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
- GSP_PAGE_SIZE);
- if (WARN_ON(!size || size >= gsp->msgq.cnt))
- return -EINVAL;
-
- do {
- u32 wptr = *gsp->msgq.wptr;
-
- used = wptr + gsp->msgq.cnt - rptr;
- if (used >= gsp->msgq.cnt)
- used -= gsp->msgq.cnt;
- if (used >= size)
- break;
-
- usleep_range(1, 2);
- } while (--(*ptime));
-
- if (WARN_ON(!*ptime))
- return -ETIMEDOUT;
-
- return used;
-}
-
-static struct r535_gsp_msg *
-r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
-{
- u32 rptr = *gsp->msgq.rptr;
-
- /* Skip the first page, which is the message queue info */
- return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
- rptr * GSP_PAGE_SIZE);
-}
-
-/**
- * DOC: Receive a GSP message queue element
- *
- * Receiving a GSP message queue element from the message queue consists of
- * the following steps:
- *
- * - Peek the element from the queue: r535_gsp_msgq_peek().
- * Peek the first page of the element to determine the total size of the
- * message before allocating the proper memory.
- *
- * - Allocate memory for the message.
- * Once the total size of the message is determined from the GSP message
- * queue element, the caller of r535_gsp_msgq_recv() allocates the
- * required memory.
- *
- * - Receive the message: r535_gsp_msgq_recv().
- * Copy the message into the allocated memory. Advance the read pointer.
- * If the message is a large GSP message, r535_gsp_msgq_recv() calls
- * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
- * until the complete message is received.
- * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
- * the return of the large GSP message.
- *
- * - Free the allocated memory: r535_gsp_msg_done().
- * The user is responsible for freeing the memory allocated for the GSP
- * message pages after they have been processed.
- */
-static void *
-r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- int ret;
-
- ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- return mqe->data;
-}
-
-struct r535_gsp_msg_info {
- int *retries;
- u32 gsp_rpc_len;
- void *gsp_rpc_buf;
- bool continuation;
-};
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
-
-static void *
-r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
- struct r535_gsp_msg_info *info)
-{
- u8 *buf = info->gsp_rpc_buf;
- u32 rptr = *gsp->msgq.rptr;
- struct r535_gsp_msg *mqe;
- u32 size, expected, len;
- int ret;
-
- expected = info->gsp_rpc_len;
-
- ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- if (info->continuation) {
- struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
- nvkm_error(&gsp->subdev,
- "Not a continuation of a large RPC\n");
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- return ERR_PTR(-EIO);
- }
- }
-
- size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
-
- len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
- len = min_t(u32, expected, len);
-
- if (info->continuation)
- memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
- len - sizeof(struct nvfw_gsp_rpc));
- else
- memcpy(buf, mqe->data, len);
-
- expected -= len;
-
- if (expected) {
- mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
- memcpy(buf + len, mqe, expected);
- }
-
- rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
-
- mb();
- (*gsp->msgq.rptr) = rptr;
- return buf;
-}
-
-static void *
-r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
- struct nvfw_gsp_rpc *rpc;
- struct r535_gsp_msg_info info = {0};
- u32 expected = gsp_rpc_len;
- void *buf;
-
- mqe = r535_gsp_msgq_get_entry(gsp);
- rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (WARN_ON(rpc->length > max_rpc_size))
- return NULL;
-
- buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- info.gsp_rpc_buf = buf;
- info.retries = retries;
- info.gsp_rpc_len = rpc->length;
-
- buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR(buf)) {
- kvfree(info.gsp_rpc_buf);
- info.gsp_rpc_buf = NULL;
- return buf;
- }
-
- if (expected <= max_rpc_size)
- return buf;
-
- info.gsp_rpc_buf += info.gsp_rpc_len;
- expected -= info.gsp_rpc_len;
-
- while (expected) {
- u32 size;
-
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- info.gsp_rpc_len = rpc->length;
- info.continuation = true;
-
- rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- size = info.gsp_rpc_len - sizeof(*rpc);
- expected -= size;
- info.gsp_rpc_buf += size;
- }
-
- rpc = buf;
- rpc->length = gsp_rpc_len;
- return buf;
-}
-
-static int
-r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
-{
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- struct r535_gsp_msg *cqe;
- u32 gsp_rpc_len = msg->checksum;
- u64 *ptr = (void *)msg;
- u64 *end;
- u64 csum = 0;
- int free, time = 1000000;
- u32 wptr, size, step, len;
- u32 off = 0;
-
- len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
-
- end = (u64 *)((char *)ptr + len);
- msg->pad = 0;
- msg->checksum = 0;
- msg->sequence = gsp->cmdq.seq++;
- msg->elem_count = DIV_ROUND_UP(len, 0x1000);
-
- while (ptr < end)
- csum ^= *ptr++;
-
- msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
-
- wptr = *gsp->cmdq.wptr;
- do {
- do {
- free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
- if (free >= gsp->cmdq.cnt)
- free -= gsp->cmdq.cnt;
- if (free >= 1)
- break;
-
- usleep_range(1, 2);
- } while(--time);
-
- if (WARN_ON(!time)) {
- kvfree(msg);
- return -ETIMEDOUT;
- }
-
- cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
- step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
- size = min_t(u32, len, step * GSP_PAGE_SIZE);
-
- memcpy(cqe, (u8 *)msg + off, size);
-
- wptr += DIV_ROUND_UP(size, 0x1000);
- if (wptr == gsp->cmdq.cnt)
- wptr = 0;
-
- off += size;
- len -= size;
- } while (len);
-
- nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
- wmb();
- (*gsp->cmdq.wptr) = wptr;
- mb();
-
- nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
-
- kvfree(msg);
- return 0;
-}
-
-static void *
-r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
-{
- struct r535_gsp_msg *msg;
- u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
-
- size = ALIGN(size, GSP_MSG_MIN_SIZE);
- msg = kvzalloc(size, GFP_KERNEL);
- if (!msg)
- return ERR_PTR(-ENOMEM);
-
- msg->checksum = gsp_rpc_len;
- return msg->data;
-}
-
-static void
-r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
-{
- kvfree(msg);
-}
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
-{
- if (gsp->subdev.debug >= lvl) {
- nvkm_printk__(&gsp->subdev, lvl, info,
- "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
- msg->function, msg->length, msg->length - sizeof(*msg),
- msg->rpc_result, msg->rpc_result_private);
- print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
- msg->data, msg->length - sizeof(*msg), true);
- }
-}
-
-static struct nvfw_gsp_rpc *
-r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvfw_gsp_rpc *rpc;
- int retries = 4000000, i;
-
-retry:
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->rpc_result) {
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EINVAL);
- }
-
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
-
- if (fn && rpc->function == fn) {
- if (gsp_rpc_len) {
- if (rpc->length < gsp_rpc_len) {
- nvkm_error(subdev, "rpc len %d < %d\n",
- rpc->length, gsp_rpc_len);
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EIO);
- }
-
- return rpc;
- }
-
- r535_gsp_msg_done(gsp, rpc);
- return NULL;
- }
-
- for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
- struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
-
- if (ntfy->fn == rpc->function) {
- if (ntfy->func)
- ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
- rpc->length - sizeof(*rpc));
- break;
- }
- }
-
- if (i == gsp->msgq.ntfy_nr)
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
-
- r535_gsp_msg_done(gsp, rpc);
- if (fn)
- goto retry;
-
- if (*gsp->msgq.rptr != *gsp->msgq.wptr)
- goto retry;
-
- return NULL;
-}
-
-static int
-r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
-{
- int ret = 0;
-
- mutex_lock(&gsp->msgq.mutex);
- if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
- ret = -ENOSPC;
- } else {
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
- gsp->msgq.ntfy_nr++;
- }
- mutex_unlock(&gsp->msgq.mutex);
- return ret;
-}
-
-static int
-r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
-{
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- repv = r535_gsp_msg_recv(gsp, fn, 0);
- mutex_unlock(&gsp->cmdq.mutex);
- if (IS_ERR(repv))
- return PTR_ERR(repv);
-
- return 0;
-}
-
-static void *
-r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct nvfw_gsp_rpc *msg;
- u32 fn = rpc->function;
- void *repv = NULL;
- int ret;
-
- if (gsp->subdev.debug >= NV_DBG_TRACE) {
- nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
- rpc->length, rpc->length - sizeof(*rpc));
- print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
- rpc->data, rpc->length - sizeof(*rpc), true);
- }
-
- ret = r535_gsp_cmdq_push(gsp, rpc);
- if (ret)
- return ERR_PTR(ret);
-
- if (wait) {
- msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
- if (!IS_ERR_OR_NULL(msg))
- repv = msg->data;
- else
- repv = msg;
- }
-
- return repv;
-}
-
-static void
-r535_gsp_event_dtor(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
-
- mutex_lock(&gsp->client_id.mutex);
- if (event->func) {
- list_del(&event->head);
- event->func = NULL;
- }
- mutex_unlock(&gsp->client_id.mutex);
-
- nvkm_gsp_rm_free(&event->object);
- event->device = NULL;
-}
-
-static int
-r535_gsp_device_event_get(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
- NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- ctrl->event = event->id;
- ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
- return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
-}
-
-static int
-r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
- nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
- NV0005_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
- NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
- &event->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hParentClient = client->object.handle;
- args->hSrcResource = 0;
- args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
- args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
- args->data = NULL;
-
- ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
- if (ret)
- return ret;
-
- event->device = device;
- event->id = id;
-
- ret = r535_gsp_device_event_get(event);
- if (ret) {
- nvkm_gsp_event_dtor(event);
- return ret;
- }
-
- mutex_lock(&gsp->client_id.mutex);
- event->func = func;
- list_add(&event->head, &client->events);
- mutex_unlock(&gsp->client_id.mutex);
- return 0;
-}
-
-static void
-r535_gsp_device_dtor(struct nvkm_gsp_device *device)
-{
- nvkm_gsp_rm_free(&device->subdevice);
- nvkm_gsp_rm_free(&device->object);
-}
-
-static int
-r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
-{
- NV2080_ALLOC_PARAMETERS *args;
-
- return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args),
- &device->subdevice);
-}
-
-static int
-r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
-{
- NV0080_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args),
- &device->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hClientShare = client->object.handle;
-
- ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
- if (ret)
- return ret;
-
- ret = r535_gsp_subdevice_ctor(device);
- if (ret)
- nvkm_gsp_rm_free(&device->object);
-
- return ret;
-}
-
-static void
-r535_gsp_client_dtor(struct nvkm_gsp_client *client)
-{
- struct nvkm_gsp *gsp = client->gsp;
-
- nvkm_gsp_rm_free(&client->object);
-
- mutex_lock(&gsp->client_id.mutex);
- idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff);
- mutex_unlock(&gsp->client_id.mutex);
-
- client->gsp = NULL;
-}
-
-static int
-r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
-{
- NV0000_ALLOC_PARAMETERS *args;
- int ret;
-
- mutex_lock(&gsp->client_id.mutex);
- ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL);
- mutex_unlock(&gsp->client_id.mutex);
- if (ret < 0)
- return ret;
-
- client->gsp = gsp;
- client->object.client = client;
- INIT_LIST_HEAD(&client->events);
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
- &client->object);
- if (IS_ERR(args)) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- args->hClient = client->object.handle;
- args->processID = ~0;
-
- ret = nvkm_gsp_rm_alloc_wr(&client->object, args);
- if (ret) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- return 0;
-}
-
-static int
-r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_free_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
- client->object.handle, object->handle);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
- if (WARN_ON(IS_ERR_OR_NULL(rpc)))
- return -EIO;
-
- rpc->params.hRoot = client->object.handle;
- rpc->params.hObjectParent = 0;
- rpc->params.hObjectOld = object->handle;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
-}
-
-static void
-r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- void *ret = NULL;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc));
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->status) {
- ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
- if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
- nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- }
-
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
- u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_alloc_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
- client->object.handle, object->parent->handle,
- object->handle);
-
- nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
- params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hParent = object->parent->handle;
- rpc->hObject = object->handle;
- rpc->hClass = oclass;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- if (!params)
- return;
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static int
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- int ret = 0;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
- if (IS_ERR_OR_NULL(rpc)) {
- *params = NULL;
- return PTR_ERR(rpc);
- }
-
- if (rpc->status) {
- ret = r535_rpc_status_to_errno(rpc->status);
- if (ret != -EAGAIN && ret != -EBUSY)
- nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
- object->client->object.handle, object->handle, rpc->cmd, rpc->status);
- }
-
- if (repc)
- *params = rpc->params;
- else
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_control_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
- client->object.handle, object->handle, cmd, params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hObject = object->handle;
- rpc->cmd = cmd;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
-{
- struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
-
- r535_gsp_msg_done(gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
-{
- struct nvfw_gsp_rpc *rpc;
-
- rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
- sizeof(u64)));
- if (IS_ERR(rpc))
- return ERR_CAST(rpc);
-
- rpc->header_version = 0x03000000;
- rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
- rpc->function = fn;
- rpc->rpc_result = 0xffffffff;
- rpc->rpc_result_private = 0xffffffff;
- rpc->length = sizeof(*rpc) + payload_size;
- return rpc->data;
-}
-
-static void *
-r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
- const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
- u32 payload_size = rpc->length - sizeof(*rpc);
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- if (payload_size > max_payload_size) {
- const u32 fn = rpc->function;
- u32 remain_payload_size = payload_size;
-
- /* Adjust length, and send initial RPC. */
- rpc->length = sizeof(*rpc) + max_payload_size;
- msg->checksum = rpc->length;
-
- repv = r535_gsp_rpc_send(gsp, payload, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += max_payload_size;
- remain_payload_size -= max_payload_size;
-
- /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
- while (remain_payload_size) {
- u32 size = min(remain_payload_size,
- max_payload_size);
- void *next;
-
- next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
- if (IS_ERR(next)) {
- repv = next;
- goto done;
- }
-
- memcpy(next, payload, size);
-
- repv = r535_gsp_rpc_send(gsp, next, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += size;
- remain_payload_size -= size;
- }
-
- /* Wait for reply. */
- rpc = r535_gsp_msg_recv(gsp, fn, payload_size +
- sizeof(*rpc));
- if (!IS_ERR_OR_NULL(rpc)) {
- if (wait) {
- repv = rpc->data;
- } else {
- nvkm_gsp_rpc_done(gsp, rpc);
- repv = NULL;
- }
- } else {
- repv = wait ? rpc : NULL;
- }
- } else {
- repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len);
- }
-
-done:
- mutex_unlock(&gsp->cmdq.mutex);
- return repv;
-}
-
-const struct nvkm_gsp_rm
-r535_gsp_rm = {
- .rpc_get = r535_gsp_rpc_get,
- .rpc_push = r535_gsp_rpc_push,
- .rpc_done = r535_gsp_rpc_done,
-
- .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get,
- .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push,
- .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done,
-
- .rm_alloc_get = r535_gsp_rpc_rm_alloc_get,
- .rm_alloc_push = r535_gsp_rpc_rm_alloc_push,
- .rm_alloc_done = r535_gsp_rpc_rm_alloc_done,
-
- .rm_free = r535_gsp_rpc_rm_free,
-
- .client_ctor = r535_gsp_client_ctor,
- .client_dtor = r535_gsp_client_dtor,
-
- .device_ctor = r535_gsp_device_ctor,
- .device_dtor = r535_gsp_device_dtor,
-
- .event_ctor = r535_gsp_device_event_ctor,
- .event_dtor = r535_gsp_event_dtor,
-};
-
static void
r535_gsp_msgq_work(struct work_struct *work)
{
@@ -1086,10 +87,52 @@ r535_gsp_intr(struct nvkm_inth *inth)
return IRQ_HANDLED;
}
+static bool
+r535_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_MSENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = 0;
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
{
NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret = 0;
ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
@@ -1112,42 +155,8 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
- switch (ctrl->table[i].engineIdx) {
- case MC_ENGINE_IDX_GSP:
- type = NVKM_SUBDEV_GSP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_DISP:
- type = NVKM_ENGINE_DISP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
- type = NVKM_ENGINE_CE;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
- break;
- case MC_ENGINE_IDX_GR0:
- type = NVKM_ENGINE_GR;
- inst = 0;
- break;
- case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
- type = NVKM_ENGINE_NVDEC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
- break;
- case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
- type = NVKM_ENGINE_NVENC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
- break;
- case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
- type = NVKM_ENGINE_NVJPG;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
- break;
- case MC_ENGINE_IDX_OFA0:
- type = NVKM_ENGINE_OFA;
- inst = 0;
- break;
- default:
+ if (!rmapi->gsp->xlat_mc_engine_idx(ctrl->table[i].engineIdx, &type, &inst))
continue;
- }
if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
ret = -ENOSPC;
@@ -1165,35 +174,14 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
return ret;
}
-static int
-r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
+void
+r535_gsp_get_static_info_fb(struct nvkm_gsp *gsp,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *info)
{
- GspStaticConfigInfo *rpc;
int last_usable = -1;
- rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
- if (IS_ERR(rpc))
- return PTR_ERR(rpc);
-
- gsp->internal.client.object.client = &gsp->internal.client;
- gsp->internal.client.object.parent = NULL;
- gsp->internal.client.object.handle = rpc->hInternalClient;
- gsp->internal.client.gsp = gsp;
-
- gsp->internal.device.object.client = &gsp->internal.client;
- gsp->internal.device.object.parent = &gsp->internal.client.object;
- gsp->internal.device.object.handle = rpc->hInternalDevice;
-
- gsp->internal.device.subdevice.client = &gsp->internal.client;
- gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
- gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
-
- gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
- gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
-
- for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
- NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
- &rpc->fbRegionInfoParams.fbRegion[i];
+ for (int i = 0; i < info->numFBRegions; i++) {
+ const NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = &info->fbRegion[i];
nvkm_debug(&gsp->subdev, "fb region %d: "
"%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
@@ -1215,10 +203,38 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
}
if (last_usable >= 0) {
- u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
+ u32 rsvd_base = info->fbRegion[last_usable].limit + 1;
gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
}
+}
+
+static int
+r535_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
@@ -1231,7 +247,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
return 0;
}
-static void
+void
nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
{
if (mem->data) {
@@ -1260,7 +276,7 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
* so we take a device reference to ensure its lifetime. The reference is
* dropped in the destructor.
*/
-static int
+int
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
{
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
@@ -1277,9 +293,10 @@ static int
r535_gsp_postinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret;
- ret = r535_gsp_rpc_get_gsp_static_info(gsp);
+ ret = rmapi->gsp->get_static_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -1327,7 +344,7 @@ r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
}
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
enum registry_type {
@@ -1684,7 +701,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
build_registry(gsp, rpc);
- return nvkm_gsp_rpc_wr(gsp, rpc, false);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT);
fail:
clean_registry(gsp);
@@ -1692,7 +709,7 @@ fail:
}
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-static void
+void
r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
{
const guid_t NVOP_DSM_GUID =
@@ -1726,7 +743,7 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
kfree(argv4.buffer.pointer);
}
-static void
+void
r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
{
const guid_t JT_DSM_GUID =
@@ -1818,7 +835,7 @@ r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux
}
}
-static void
+void
r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
{
acpi_status status;
@@ -1871,7 +888,7 @@ r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
}
static int
-r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
+r535_gsp_set_system_info(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
@@ -1884,16 +901,16 @@ r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
if (IS_ERR(info))
return PTR_ERR(info);
- info->gpuPhysAddr = device->func->resource_addr(device, 0);
- info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
- info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
info->maxUserVa = TASK_SIZE;
- info->pciConfigMirrorBase = 0x088000;
- info->pciConfigMirrorSize = 0x001000;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
r535_gsp_acpi_info(gsp, &info->acpiMethodData);
- return nvkm_gsp_rpc_wr(gsp, info, false);
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
}
static int
@@ -1911,33 +928,6 @@ r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
-{
- rpc_rc_triggered_v17_02 *msg = repv;
- struct nvkm_gsp *gsp = priv;
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_chan *chan;
- unsigned long flags;
-
- if (WARN_ON(repc < sizeof(*msg)))
- return -EINVAL;
-
- nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
- msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
- msg->partitionAttributionId);
-
- chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags);
- if (!chan) {
- nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
- return 0;
- }
-
- nvkm_chan_error(chan, false);
- nvkm_chan_put(&chan, flags);
- return 0;
-}
-
-static int
r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
{
struct nvkm_gsp *gsp = priv;
@@ -2130,97 +1120,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_device *device = subdev->device;
- u32 wpr2_hi;
- int ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (!wpr2_hi) {
- nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
- return 0;
- }
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (WARN_ON(ret))
- return ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (WARN_ON(wpr2_hi))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- int ret;
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (ret)
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
-
- if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
-{
- GspFwWprMeta *meta;
- int ret;
-
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
- if (ret)
- return ret;
-
- meta = gsp->wpr_meta.data;
-
- meta->magic = GSP_FW_WPR_META_MAGIC;
- meta->revision = GSP_FW_WPR_META_REVISION;
-
- meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
- meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
-
- meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
- meta->sizeOfBootloader = gsp->boot.fw.size;
- meta->bootloaderCodeOffset = gsp->boot.code_offset;
- meta->bootloaderDataOffset = gsp->boot.data_offset;
- meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
-
- meta->sysmemAddrOfSignature = gsp->sig.addr;
- meta->sizeOfSignature = gsp->sig.size;
-
- meta->gspFwRsvdStart = gsp->fb.heap.addr;
- meta->nonWprHeapOffset = gsp->fb.heap.addr;
- meta->nonWprHeapSize = gsp->fb.heap.size;
- meta->gspFwWprStart = gsp->fb.wpr2.addr;
- meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
- meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
- meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
- meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
- meta->frtsOffset = gsp->fb.wpr2.frts.addr;
- meta->frtsSize = gsp->fb.wpr2.frts.size;
- meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
- meta->fbSize = gsp->fb.size;
- meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
- meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
- meta->bootCount = 0;
- meta->partitionRpcAddr = 0;
- meta->partitionRpcRequestOffset = 0;
- meta->partitionRpcReplyOffset = 0;
- meta->verified = 0;
- return 0;
-}
-
-static int
r535_gsp_shared_init(struct nvkm_gsp *gsp)
{
struct {
@@ -2271,23 +1170,11 @@ r535_gsp_shared_init(struct nvkm_gsp *gsp)
return 0;
}
-static int
-r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+static void
+r535_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
{
- GSP_ARGUMENTS_CACHED *args;
- int ret;
-
- if (!resume) {
- ret = r535_gsp_shared_init(gsp);
- if (ret)
- return ret;
+ GSP_ARGUMENTS_CACHED *args = gsp->rmargs.data;
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
- if (ret)
- return ret;
- }
-
- args = gsp->rmargs.data;
args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
args->messageQueueInitArguments.cmdQueueOffset =
@@ -2304,7 +1191,24 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
args->srInitArguments.flags = 0;
args->srInitArguments.bInPMTransition = 1;
}
+}
+static int
+r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+{
+ int ret;
+
+ if (!resume) {
+ ret = r535_gsp_shared_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
+ if (ret)
+ return ret;
+ }
+
+ gsp->rm->api->gsp->set_rmargs(gsp, resume);
return 0;
}
@@ -2797,18 +1701,22 @@ lvl1_fail:
return ret;
}
+static u32
+r535_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return meta->gspFwWprEnd - meta->gspFwWprStart;
+}
+
int
r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
{
- u32 mbox0 = 0xff, mbox1 = 0xff;
+ struct nvkm_rm *rm = gsp->rm;
int ret;
- if (!gsp->running)
- return 0;
-
if (suspend) {
- GspFwWprMeta *meta = gsp->wpr_meta.data;
- u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
+ u32 len = rm->api->gsp->sr_data_size(gsp);
GspFwSRMeta *sr;
ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
@@ -2829,8 +1737,13 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
sr->sizeOfSuspendResumeData = len;
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ ret = rm->api->fbsr->suspend(gsp);
+ if (ret) {
+ nvkm_gsp_mem_dtor(&gsp->sr.meta);
+ nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
+ return ret;
+ }
}
ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
@@ -2838,18 +1751,10 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
return ret;
nvkm_msec(gsp->subdev.device, 2000,
- if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000)
+ if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000)
break;
);
- nvkm_falcon_reset(&gsp->falcon);
-
- ret = nvkm_gsp_fwsec_sb(gsp);
- WARN_ON(ret);
-
- ret = r535_gsp_booter_unload(gsp, mbox0, mbox1);
- WARN_ON(ret);
-
gsp->running = false;
return 0;
}
@@ -2857,23 +1762,12 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
int
r535_gsp_init(struct nvkm_gsp *gsp)
{
- u32 mbox0, mbox1;
int ret;
- if (!gsp->sr.meta.data) {
- mbox0 = lower_32_bits(gsp->wpr_meta.addr);
- mbox1 = upper_32_bits(gsp->wpr_meta.addr);
- } else {
- r535_gsp_rmargs_init(gsp, true);
-
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
- }
+ nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
- /* Execute booter to handle (eventually...) booting GSP-RM. */
- ret = r535_gsp_booter_load(gsp, mbox0, mbox1);
- if (WARN_ON(ret))
- goto done;
+ if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+ return -EIO;
ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
if (ret)
@@ -2883,6 +1777,8 @@ r535_gsp_init(struct nvkm_gsp *gsp)
done:
if (gsp->sr.meta.data) {
+ gsp->rm->api->fbsr->resume(gsp);
+
nvkm_gsp_mem_dtor(&gsp->sr.meta);
nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
@@ -2944,19 +1840,6 @@ r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u
return -ENOENT;
}
-static void
-r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
-{
- nvkm_firmware_put(gsp->fws.bl);
- gsp->fws.bl = NULL;
- nvkm_firmware_put(gsp->fws.booter.unload);
- gsp->fws.booter.unload = NULL;
- nvkm_firmware_put(gsp->fws.booter.load);
- gsp->fws.booter.load = NULL;
- nvkm_firmware_put(gsp->fws.rm);
- gsp->fws.rm = NULL;
-}
-
#ifdef CONFIG_DEBUG_FS
struct r535_gsp_log {
@@ -3190,10 +2073,16 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_falcon_fw_dtor(&gsp->booter.unload);
nvkm_falcon_fw_dtor(&gsp->booter.load);
+ nvkm_gsp_mem_dtor(&gsp->fmc.args);
+ kfree(gsp->fmc.sig);
+ kfree(gsp->fmc.pkey);
+ kfree(gsp->fmc.hash);
+ nvkm_gsp_mem_dtor(&gsp->fmc.fw);
+
mutex_destroy(&gsp->msgq.mutex);
mutex_destroy(&gsp->cmdq.mutex);
- r535_gsp_dtor_fws(gsp);
+ nvkm_gsp_dtor_fws(gsp);
nvkm_gsp_mem_dtor(&gsp->rmargs);
nvkm_gsp_mem_dtor(&gsp->wpr_meta);
@@ -3206,10 +2095,17 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_gsp_mem_dtor(&gsp->logrm);
}
+static void
+r535_gsp_drop_send_user_shared_data(struct nvkm_gsp *gsp)
+{
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+}
+
int
r535_gsp_oneinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
const u8 *data;
u64 size;
int ret;
@@ -3217,16 +2113,6 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
mutex_init(&gsp->cmdq.mutex);
mutex_init(&gsp->msgq.mutex);
- ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
- &device->sec2->falcon, &gsp->booter.load);
- if (ret)
- return ret;
-
- ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
- &device->sec2->falcon, &gsp->booter.unload);
- if (ret)
- return ret;
-
/* Load GSP firmware from ELF image into DMA-accessible memory. */
ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
if (ret)
@@ -3255,65 +2141,29 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
r535_gsp_msg_run_cpu_sequencer, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
- r535_gsp_msg_rc_triggered, gsp);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, rmapi->fifo->rc_triggered, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
r535_gsp_msg_mmu_fault_queued, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+ if (rmapi->gsp->drop_send_user_shared_data)
+ rmapi->gsp->drop_send_user_shared_data(gsp);
+ if (rmapi->gsp->drop_post_nocat_record)
+ rmapi->gsp->drop_post_nocat_record(gsp);
+
ret = r535_gsp_rm_boot_ctor(gsp);
if (ret)
return ret;
/* Release FW images - we've copied them to DMA buffers now. */
- r535_gsp_dtor_fws(gsp);
-
- /* Calculate FB layout. */
- gsp->fb.wpr2.frts.size = 0x100000;
- gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
-
- gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
- gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
-
- gsp->fb.wpr2.elf.size = gsp->fw.len;
- gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
-
- {
- u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
-
- gsp->fb.wpr2.heap.size =
- gsp->func->wpr_heap.os_carveout_size +
- gsp->func->wpr_heap.base_size +
- ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
- ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
-
- gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
- }
-
- gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
- gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
-
- gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
- gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
-
- gsp->fb.heap.size = 0x100000;
- gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
-
- ret = nvkm_gsp_fwsec_frts(gsp);
- if (WARN_ON(ret))
- return ret;
+ nvkm_gsp_dtor_fws(gsp);
ret = r535_gsp_libos_init(gsp);
if (WARN_ON(ret))
return ret;
- ret = r535_gsp_wpr_meta_init(gsp);
- if (WARN_ON(ret))
- return ret;
-
- ret = r535_gsp_rpc_set_system_info(gsp);
+ ret = rmapi->gsp->set_system_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -3321,76 +2171,17 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
if (WARN_ON(ret))
return ret;
- /* Reset GSP into RISC-V mode. */
- ret = gsp->func->reset(gsp);
- if (WARN_ON(ret))
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
- nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
-
mutex_init(&gsp->client_id.mutex);
idr_init(&gsp->client_id.idr);
return 0;
}
-static int
-r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
- const struct firmware **pfw)
-{
- char fwname[64];
-
- snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
- return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
-}
-
-int
-r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- int ret;
- bool enable_gsp = fwif->enable;
-
-#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
- enable_gsp = true;
-#endif
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
- return -EINVAL;
-
- if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) ||
- (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) {
- r535_gsp_dtor_fws(gsp);
- return ret;
- }
-
- return 0;
-}
-
-#define NVKM_GSP_FIRMWARE(chip) \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin")
-
-NVKM_GSP_FIRMWARE(tu102);
-NVKM_GSP_FIRMWARE(tu104);
-NVKM_GSP_FIRMWARE(tu106);
-
-NVKM_GSP_FIRMWARE(tu116);
-NVKM_GSP_FIRMWARE(tu117);
-
-NVKM_GSP_FIRMWARE(ga100);
-
-NVKM_GSP_FIRMWARE(ga102);
-NVKM_GSP_FIRMWARE(ga103);
-NVKM_GSP_FIRMWARE(ga104);
-NVKM_GSP_FIRMWARE(ga106);
-NVKM_GSP_FIRMWARE(ga107);
-
-NVKM_GSP_FIRMWARE(ad102);
-NVKM_GSP_FIRMWARE(ad103);
-NVKM_GSP_FIRMWARE(ad104);
-NVKM_GSP_FIRMWARE(ad106);
-NVKM_GSP_FIRMWARE(ad107);
+const struct nvkm_rm_api_gsp
+r535_gsp = {
+ .set_rmargs = r535_gsp_set_rmargs,
+ .set_system_info = r535_gsp_set_system_info,
+ .get_static_info = r535_gsp_get_static_info,
+ .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx,
+ .drop_send_user_shared_data = r535_gsp_drop_send_user_shared_data,
+ .sr_data_size = r535_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
index 7bfa6240d283..a8c42ec0367b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
@@ -19,28 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "gf100.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/nvdec.h"
-#include <nvif/class.h>
+static int
+r535_nvdec_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvdec)
+{
+ NV_BSP_ALLOCATION_PARAMETERS *args;
-static const struct gf100_gr_func
-ad102_gr = {
- .sclass = {
- { -1, -1, FERMI_TWOD_A },
- { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
- { -1, -1, ADA_A },
- { -1, -1, ADA_COMPUTE_A },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvdec);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&ad102_gr, device, type, inst, pgr);
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(nvdec, args);
}
+
+const struct nvkm_rm_api_engine
+r535_nvdec = {
+ .alloc = r535_nvdec_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
index 932934227b9c..acb3ce8bb9de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
@@ -19,26 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/nvenc.h"
-#include <nvif/class.h>
+static int
+r535_nvenc_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvenc)
+{
+ NV_MSENC_ALLOCATION_PARAMETERS *args;
-static const struct nvkm_engine_func
-ga100_nvdec = {
- .sclass = {
- { -1, -1, NVC6B0_VIDEO_DECODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvenc);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvdec **pnvdec)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec);
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(nvenc, args);
}
+
+const struct nvkm_rm_api_engine
+r535_nvenc = {
+ .alloc = r535_nvenc_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
new file mode 100644
index 000000000000..fbc4080ad8d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/nvjpg.h"
+
+static int
+r535_nvjpg_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvjpg)
+{
+ NV_NVJPG_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvjpg);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(nvjpg, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_nvjpg = {
+ .alloc = r535_nvjpg_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
new file mode 100644
index 000000000000..cbc7e611fbda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ALLOC_H__
+#define __NVRM_ALLOC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_alloc_v03_00
+{
+ NvHandle hClient;
+ NvHandle hParent;
+ NvHandle hObject;
+ NvU32 hClass;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 reserved[4];
+ NvU8 params[];
+} rpc_gsp_rm_alloc_v03_00;
+
+typedef struct NVOS00_PARAMETERS_v03_00
+{
+ NvHandle hRoot;
+ NvHandle hObjectParent;
+ NvHandle hObjectOld;
+ NvV32 status;
+} NVOS00_PARAMETERS_v03_00;
+
+typedef struct rpc_free_v03_00
+{
+ NVOS00_PARAMETERS_v03_00 params;
+} rpc_free_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
new file mode 100644
index 000000000000..60b0b08491ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_BAR_H__
+#define __NVRM_BAR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef enum
+{
+ NV_RPC_UPDATE_PDE_BAR_1,
+ NV_RPC_UPDATE_PDE_BAR_2,
+ NV_RPC_UPDATE_PDE_BAR_INVALID,
+} NV_RPC_UPDATE_PDE_BAR_TYPE;
+
+typedef struct UpdateBarPde_v15_00
+{
+ NV_RPC_UPDATE_PDE_BAR_TYPE barType;
+ NvU64 entryValue NV_ALIGN_BYTES(8);
+ NvU64 entryLevelShift NV_ALIGN_BYTES(8);
+} UpdateBarPde_v15_00;
+
+typedef struct rpc_update_bar_pde_v15_00
+{
+ UpdateBarPde_v15_00 info;
+} rpc_update_bar_pde_v15_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
new file mode 100644
index 000000000000..90b0325203d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CE_H__
+#define __NVRM_CE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct NVC0B5_ALLOCATION_PARAMETERS {
+ NvU32 version;
+ NvU32 engineType;
+} NVC0B5_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
new file mode 100644
index 000000000000..df0e63c0cb6b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
new file mode 100644
index 000000000000..77f10acd82c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CTRL_H__
+#define __NVRM_CTRL_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_control_v03_00
+{
+ NvHandle hClient;
+ NvHandle hObject;
+ NvU32 cmd;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 params[];
+} rpc_gsp_rm_control_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
new file mode 100644
index 000000000000..3933b9ad61ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DEVICE_H__
+#define __NVRM_DEVICE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0080_ALLOC_PARAMETERS {
+ NvU32 deviceId;
+ NvHandle hClientShare;
+ NvHandle hTargetClient;
+ NvHandle hTargetDevice;
+ NvV32 flags;
+ NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
+ NvV32 vaMode;
+} NV0080_ALLOC_PARAMETERS;
+
+#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV2080_ALLOC_PARAMETERS {
+ NvU32 subDeviceId;
+} NV2080_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
new file mode 100644
index 000000000000..7b7539639540
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
@@ -0,0 +1,741 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
+ NvU32 instMemAddrSpace;
+ NvU32 instMemCpuCacheAttr;
+} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
+
+#define NV_MEMORY_WRITECOMBINED 2
+
+#define NV04_DISPLAY_COMMON (0x00000073)
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvBool bPrimaryVga;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
+
+#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
+ NvU32 status;
+ NvU16 backLightDataSize;
+ NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
+} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
+ NvU32 subDeviceInstance;
+} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 numHeads;
+} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 headMask;
+} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 index;
+ NvU32 type;
+ NvU32 protocol;
+ NvU32 ditherType;
+ NvU32 ditherAlgo;
+ NvU32 location;
+ NvU32 rootPortId;
+ NvU32 dcbIndex;
+ NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
+ NvBool bIsLitByVbios;
+ NvBool bIsDispDynamic;
+} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupported;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV2080_NOTIFIERS_HOTPLUG (1)
+
+typedef struct {
+ NvU32 plugDisplayMask;
+ NvU32 unplugDisplayMask;
+} Nv2080HotplugNotification;
+
+#define NV2080_NOTIFIERS_DP_IRQ (7)
+
+typedef struct Nv2080DpIrqNotificationRec {
+ NvU32 displayId;
+} Nv2080DpIrqNotification;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 flags2;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
+ NvU32 displayMask;
+ NvU32 sorType;
+} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 sorExcludeMask;
+ NvU32 slaveDisplayId;
+ NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
+ NvBool bIs2Head1Or;
+ NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NvU8 reservedSorMask;
+ NvU32 flags;
+} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
+
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
+
+typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numELDSize;
+ NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
+ NvU32 maxFreqSupported;
+ NvU32 ctrl;
+ NvU32 deviceEntry;
+} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
+
+#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 bufferSize;
+ NvU32 flags;
+ NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
+} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 enable;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 caps;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+
+#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 transmitControl;
+ NvU32 packetSize;
+ NvU32 targetHead;
+ NvBool bUsePsrHeadforSdp;
+ NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
+} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 mute;
+} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
+
+#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool bAddrOnly;
+ NvU32 cmd;
+ NvU32 addr;
+ NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
+ NvU32 size;
+ NvU32 replyType;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_UNUSED 3:3
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
+
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET 22:19
+#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
+
+#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_MAX_LANES 8U
+
+typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numLanes;
+ NvU32 data[NV0073_CTRL_MAX_LANES];
+} NV0073_CTRL_DP_LANE_DATA_PARAMS;
+
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 preferredDisplayId;
+
+ NvBool force;
+ NvBool useBFM;
+
+ NvU32 displayIdAssigned;
+ NvU32 allDisplayMask;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvU32 actualPclkHz; // deprecated -Use MvidWarParams
+ NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
+ NvBool bEnableAudioOverRightPanel;
+ struct {
+ NvU32 activeCnt;
+ NvU32 activeFrac;
+ NvU32 activePolarity;
+ NvBool mvidWarEnabled;
+ struct {
+ NvU32 actualPclkHz;
+ NvU32 linkClkFreqHz;
+ } MvidWarParams;
+ } Legacy;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool enable;
+} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1) // System memory (PCI)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // All PIO channels have two instances (one per head).
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
+} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
new file mode 100644
index 000000000000..b26dfc8f8087
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE9
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_MSENC 38
+#define MC_ENGINE_IDX_MSENC1 39
+#define MC_ENGINE_IDX_MSENC2 40
+#define MC_ENGINE_IDX_C2C 41
+#define MC_ENGINE_IDX_LTC 42
+#define MC_ENGINE_IDX_FBHUB 43
+#define MC_ENGINE_IDX_HDACODEC 44
+#define MC_ENGINE_IDX_GMMU 45
+#define MC_ENGINE_IDX_SEC2 46
+#define MC_ENGINE_IDX_FSP 47
+#define MC_ENGINE_IDX_NVLINK 48
+#define MC_ENGINE_IDX_GSP 49
+#define MC_ENGINE_IDX_NVJPG 50
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 51
+#define MC_ENGINE_IDX_NVJPEG2 52
+#define MC_ENGINE_IDX_NVJPEG3 53
+#define MC_ENGINE_IDX_NVJPEG4 54
+#define MC_ENGINE_IDX_NVJPEG5 55
+#define MC_ENGINE_IDX_NVJPEG6 56
+#define MC_ENGINE_IDX_NVJPEG7 57
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 58
+#define MC_ENGINE_IDX_ACCESS_CNTR 59
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 60
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 61
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_INFO_FAULT 63
+#define MC_ENGINE_IDX_BSP 64
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 65
+#define MC_ENGINE_IDX_NVDEC2 66
+#define MC_ENGINE_IDX_NVDEC3 67
+#define MC_ENGINE_IDX_NVDEC4 68
+#define MC_ENGINE_IDX_NVDEC5 69
+#define MC_ENGINE_IDX_NVDEC6 70
+#define MC_ENGINE_IDX_NVDEC7 71
+#define MC_ENGINE_IDX_CPU_DOORBELL 72
+#define MC_ENGINE_IDX_PRIV_DOORBELL 73
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 74
+#define MC_ENGINE_IDX_BLG 75
+#define MC_ENGINE_IDX_PERFMON 76
+#define MC_ENGINE_IDX_BUF_RESET 77
+#define MC_ENGINE_IDX_XBAR 78
+#define MC_ENGINE_IDX_ZPW 79
+#define MC_ENGINE_IDX_OFA0 80
+#define MC_ENGINE_IDX_TEGRA 81
+#define MC_ENGINE_IDX_GR 82
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 83
+#define MC_ENGINE_IDX_GR2 84
+#define MC_ENGINE_IDX_GR3 85
+#define MC_ENGINE_IDX_GR4 86
+#define MC_ENGINE_IDX_GR5 87
+#define MC_ENGINE_IDX_GR6 88
+#define MC_ENGINE_IDX_GR7 89
+#define MC_ENGINE_IDX_ESCHED 90
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 154
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 155
+#define MC_ENGINE_IDX_GR2_FECS_LOG 156
+#define MC_ENGINE_IDX_GR3_FECS_LOG 157
+#define MC_ENGINE_IDX_GR4_FECS_LOG 158
+#define MC_ENGINE_IDX_GR5_FECS_LOG 159
+#define MC_ENGINE_IDX_GR6_FECS_LOG 160
+#define MC_ENGINE_IDX_GR7_FECS_LOG 161
+#define MC_ENGINE_IDX_TMR_SWRL 162
+#define MC_ENGINE_IDX_DISP_GSP 163
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 164
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 165
+#define MC_ENGINE_IDX_PXUC 166
+#define MC_ENGINE_IDX_MAX 167 // This must be kept as the max bit if
+#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF
+#define MC_ENGINE_IDX_GRn(x) (MC_ENGINE_IDX_GR0 + (x))
+#define MC_ENGINE_IDX_GRn_FECS_LOG(x) (MC_ENGINE_IDX_GR0_FECS_LOG + (x))
+#define MC_ENGINE_IDX_CE(x) (MC_ENGINE_IDX_CE0 + (x))
+#define MC_ENGINE_IDX_MSENCn(x) (MC_ENGINE_IDX_MSENC + (x))
+#define MC_ENGINE_IDX_NVDECn(x) (MC_ENGINE_IDX_NVDEC + (x))
+#define MC_ENGINE_IDX_NVJPEGn(x) (MC_ENGINE_IDX_NVJPEG + (x))
+#define MC_ENGINE_IDX_ESCHEDn(x) (MC_ENGINE_IDX_ESCHED + (x))
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ RM_ENGINE_TYPE_VP = (0x00000028),
+ RM_ENGINE_TYPE_ME = (0x00000029),
+ RM_ENGINE_TYPE_PPP = (0x0000002a),
+ RM_ENGINE_TYPE_MPEG = (0x0000002b),
+ RM_ENGINE_TYPE_SW = (0x0000002c),
+ RM_ENGINE_TYPE_TSEC = (0x0000002d),
+ RM_ENGINE_TYPE_VIC = (0x0000002e),
+ RM_ENGINE_TYPE_MP = (0x0000002f),
+ RM_ENGINE_TYPE_SEC2 = (0x00000030),
+ RM_ENGINE_TYPE_HOST = (0x00000031),
+ RM_ENGINE_TYPE_DPU = (0x00000032),
+ RM_ENGINE_TYPE_PMU = (0x00000033),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000034),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
+ RM_ENGINE_TYPE_OFA = (0x0000003d),
+ RM_ENGINE_TYPE_LAST = (0x0000003e),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_LAST (0x0000003e)
+#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff)
+#define NV2080_ENGINE_TYPE_COPY_SIZE 10
+#define NV2080_ENGINE_TYPE_NVENC_SIZE 3
+#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 8
+#define NV2080_ENGINE_TYPE_NVDEC_SIZE 8
+#define NV2080_ENGINE_TYPE_GR_SIZE 8
+#define NV2080_ENGINE_TYPE_COPY(i) (NV2080_ENGINE_TYPE_COPY0+(i))
+#define NV2080_ENGINE_TYPE_IS_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) <= NV2080_ENGINE_TYPE_COPY9))
+#define NV2080_ENGINE_TYPE_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COPY0)
+#define NV2080_ENGINE_TYPE_NVENC(i) (NV2080_ENGINE_TYPE_NVENC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVENC(i) (((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) < NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVENC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVENC0)
+#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0)
+#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE)))
+#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0)
+#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i))
+#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE)))
+#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0)
+#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST)))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
new file mode 100644
index 000000000000..057f7220c225
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_EVENT_H__
+#define __NVRM_EVENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
+
+typedef struct NV0005_ALLOC_PARAMETERS {
+ NvHandle hParentClient;
+ NvHandle hSrcResource;
+
+ NvV32 hClass;
+ NvV32 notifyIndex;
+ NV_DECLARE_ALIGNED(NvP64 data, 8);
+} NV0005_ALLOC_PARAMETERS;
+
+#define NV01_EVENT_CLIENT_RM (0x04000000)
+
+#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
+ NvU32 event;
+ NvU32 action;
+ NvBool bNotifyState;
+ NvU32 info32;
+ NvU16 info16;
+} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
+
+#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
+
+typedef struct rpc_post_event_v17_00
+{
+ NvHandle hClient;
+ NvHandle hEvent;
+ NvU32 notifyIndex;
+ NvU32 data;
+ NvU16 info16;
+ NvU32 status;
+ NvU32 eventDataSize;
+ NvBool bNotifyList;
+ NvU8 eventData[];
+} rpc_post_event_v17_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
new file mode 100644
index 000000000000..28786ef013a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_MEMORY_LIST_FBMEM (0x00000082)
+
+#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
+
+#define NVOS02_FLAGS_PHYSICALITY 7:4
+#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
+#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
+#define NVOS02_FLAGS_LOCATION 11:8
+#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
+#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
+#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
+#define NVOS02_FLAGS_COHERENCY 15:12
+#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
+#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
+#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
+#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
+#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
+#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
+#define NVOS02_FLAGS_ALLOC 17:16
+#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
+#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
+#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
+#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
+#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
+#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
+#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
+#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
+#define NVOS02_FLAGS_MAPPING 31:30
+#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
+#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
+
+struct pte_desc
+{
+ NvU32 idr:2;
+ NvU32 reserved1:14;
+ NvU32 length:16;
+ union {
+ NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
+ NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
+ } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
+};
+
+typedef struct rpc_alloc_memory_v13_01
+{
+ NvHandle hClient;
+ NvHandle hDevice;
+ NvHandle hMemory;
+ NvU32 hClass;
+ NvU32 flags;
+ NvU32 pteAdjust;
+ NvU32 format;
+ NvU64 length NV_ALIGN_BYTES(8);
+ NvU32 pageCount;
+ struct pte_desc pteDesc;
+} rpc_alloc_memory_v13_01;
+
+#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvU32 fbsrType;
+ NvU32 numRegions;
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
+ NvBool bEnteringGcoffState;
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
+ NvU32 fbsrType;
+ NvHandle hClient;
+ NvHandle hVidMem;
+ NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
new file mode 100644
index 000000000000..325fdd8b6090
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
+
+typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
+ NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
+ NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 numPbdmas;
+ char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
+} NV2080_CTRL_FIFO_DEVICE_ENTRY;
+
+#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
+ NvU32 baseIndex;
+ NvU32 numEntries;
+ NvBool bMore;
+ // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+ NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
+
+typedef enum
+{
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+
+ // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
+ ENGINE_INFO_TYPE_ENG_DESC = 0,
+
+ // HW engine ID
+ ENGINE_INFO_TYPE_FIFO_TAG,
+
+ // RM_ENGINE_TYPE_*
+ ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
+
+ //
+ // runlist id (meaning varies by GPU)
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST,
+
+ // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
+ ENGINE_INFO_TYPE_MMU_FAULT_ID,
+
+ // ROBUST_CHANNEL_*
+ ENGINE_INFO_TYPE_RC_MASK,
+
+ // Reset Bit Position. On Ampere, only valid if not _INVALID
+ ENGINE_INFO_TYPE_RESET,
+
+ // Interrupt Bit Position
+ ENGINE_INFO_TYPE_INTR,
+
+ // log2(MC_ENGINE_*)
+ ENGINE_INFO_TYPE_MC,
+
+ // The DEV_TYPE_ENUM for this engine
+ ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
+
+ // The particular instance of this engine type
+ ENGINE_INFO_TYPE_INSTANCE_ID,
+
+ //
+ // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
+
+ //
+ // If this entry is a host-driven engine.
+ // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
+ //
+ ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
+
+ //
+ // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
+
+ //
+ // The base address for this engine's NV_CHRAM registers. Valid only on
+ // Ampere+
+ //
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
+
+ // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
+ ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+ // Used for iterating the engine info table by the index passed.
+ ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+
+ // Size of FIFO_ENGINE_LIST.engineData
+ ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
+
+ // Input-only parameter for kfifoEngineInfoXlate.
+ ENGINE_INFO_TYPE_PBDMA_ID
+
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+} ENGINE_INFO_TYPE;
+
+#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
+ NvU32 size;
+} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+#define NV_MAX_SUBDEVICES 8
+
+typedef struct NV_MEMORY_DESC_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 addressSpace;
+ NvU32 cacheAttrib;
+} NV_MEMORY_DESC_PARAMS;
+
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
+#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
+#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
+#define NVOS04_FLAGS_VPR 2:2
+#define NVOS04_FLAGS_VPR_FALSE 0x00000000
+#define NVOS04_FLAGS_VPR_TRUE 0x00000001
+#define NVOS04_FLAGS_CC_SECURE 2:2
+#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
+#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
+#define NVOS04_FLAGS_MAP_CHANNEL 30:30
+#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
+
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+
+#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_BIND_PARAMS {
+ NvU32 engineType;
+} NVA06F_CTRL_BIND_PARAMS;
+
+#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
+ NvBool bEnable;
+ NvBool bSkipSubmit;
+} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
+
+#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
+ NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 physAttr;
+ NvU16 bufferId;
+ NvU8 bInitialize;
+ NvU8 bNonmapped;
+} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
+
+#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
+ NvU32 engineType;
+ NvHandle hClient;
+ NvU32 ChID;
+ NvHandle hChanClient;
+ NvHandle hObject;
+ NvHandle hVirtMemory;
+ NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 entryCount;
+ // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
+ NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
+} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+} rpc_rc_triggered_v17_02;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
index 6acb3f73242d..82c5ec727bb4 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
@@ -1,30 +1,31 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
@@ -54,4 +55,19 @@
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019)
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
+
+#include "fifo.h"
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
new file mode 100644
index 000000000000..b6683a5bf870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 zcullMask;
+} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[4];
+ char chipSKUMod[2];
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+typedef enum
+{
+ COMPUTE_BRANDING_TYPE_NONE,
+ COMPUTE_BRANDING_TYPE_TESLA,
+} COMPUTE_BRANDING_TYPE;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+typedef struct GspSMInfo_t
+{
+ NvU32 version;
+ NvU32 regBankCount;
+ NvU32 regBankRegCount;
+ NvU32 maxWarpsPerSM;
+ NvU32 maxThreadsPerWarp;
+ NvU32 geomGsObufEntries;
+ NvU32 geomXbufEntries;
+ NvU32 maxSPPerSM;
+ NvU32 rtCoreCount;
+} GspSMInfo;
+
+typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
+} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+ COMPUTE_BRANDING_TYPE computeBranding;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ GspSMInfo SM_info;
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU32 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU32 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+ NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+} GspStaticConfigInfo;
+
+typedef struct rpc_unloading_guest_driver_v1F_07
+{
+ NvBool bInPMTransition;
+ NvBool bGc6Entering;
+ NvU32 newLevel;
+} rpc_unloading_guest_driver_v1F_07;
+
+typedef struct PACKED_REGISTRY_ENTRY
+{
+ NvU32 nameOffset;
+ NvU8 type;
+ NvU32 data;
+ NvU32 length;
+} PACKED_REGISTRY_ENTRY;
+
+typedef struct PACKED_REGISTRY_TABLE
+{
+ NvU32 size;
+ NvU32 numEntries;
+ PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
+} PACKED_REGISTRY_TABLE;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bMnocAvailable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+} rpc_os_error_log_v17_00;
+
+typedef struct rpc_run_cpu_sequencer_v17_00
+{
+ NvU32 bufferSizeDWord;
+ NvU32 cmdIndex;
+ NvU32 regSaveArea[8];
+ NvU32 commandBuffer[];
+} rpc_run_cpu_sequencer_v17_00;
+
+typedef enum GSP_SEQ_BUF_OPCODE
+{
+ GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
+ GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+ GSP_SEQ_BUF_OPCODE_REG_POLL,
+ GSP_SEQ_BUF_OPCODE_DELAY_US,
+ GSP_SEQ_BUF_OPCODE_REG_STORE,
+ GSP_SEQ_BUF_OPCODE_CORE_RESET,
+ GSP_SEQ_BUF_OPCODE_CORE_START,
+ GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+ GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+} GSP_SEQ_BUF_OPCODE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+ NvU32 timeout;
+ NvU32 error;
+} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
+
+typedef struct
+{
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 index;
+} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
+
+typedef struct GSP_SEQUENCER_BUFFER_CMD
+{
+ GSP_SEQ_BUF_OPCODE opCode;
+ union
+ {
+ GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
+ GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
+ GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
+ GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
+ GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
+ } payload;
+} GSP_SEQUENCER_BUFFER_CMD;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[7];
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct
+{
+ NvU32 version; // queue version
+ NvU32 size; // bytes, page aligned
+ NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
+ NvU32 msgCount; // number of entries in queue
+ NvU32 writePtr; // message id of next slot
+ NvU32 flags; // if set it means "i want to swap RX"
+ NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
+ NvU32 entryOff; // Offset of entries from start of backing store.
+} msgqTxHeader;
+
+typedef struct
+{
+ NvU32 readPtr; // message id of last message read
+} msgqRxHeader;
+
+typedef struct {
+ RmPhysAddr sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+ NvLength locklessCmdQueueOffset;
+ NvLength locklessStatQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef NvU64 LibosAddress;
+
+typedef struct
+{
+ LibosAddress id8; // Id tag.
+ LibosAddress pa; // Physical address.
+ LibosAddress size; // Size of memory area.
+ NvU8 kind; // See LibosMemoryRegionKind above.
+ NvU8 loc; // See LibosMemoryRegionLoc above.
+} LibosMemoryRegionInitArgument;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_NONE,
+ LIBOS_MEMORY_REGION_CONTIGUOUS,
+ LIBOS_MEMORY_REGION_RADIX3
+} LibosMemoryRegionKind;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_LOC_NONE,
+ LIBOS_MEMORY_REGION_LOC_SYSMEM,
+ LIBOS_MEMORY_REGION_LOC_FB
+} LibosMemoryRegionLoc;
+
+typedef struct
+{
+ //
+ // Magic
+ // Use for verification by Booter
+ //
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ //
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+ //
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ // ---- Members for crypto ops across S/R ---------------------------
+
+ //
+ // HMAC over the entire GspFwSRMeta structure (including padding)
+ // with the hmac field itself zeroed.
+ //
+ NvU8 hmac[32];
+
+ // Hash over GspFwWprMeta structure
+ NvU8 wprMetaHash[32];
+
+ // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
+ NvU8 heapFreeListHash[32];
+
+ // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
+ NvU8 dataHash[32];
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[24];
+
+} GspFwSRMeta;
+
+#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
+
+#define GSP_FW_SR_META_REVISION 2
+
+#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
+ ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
+ 0)
+
+typedef struct {
+ //
+ // Version 1
+ // Version 2
+ // Version 3 = for Partition boot
+ // Version 4 = for eb riscv boot
+ // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
+ //
+ NvU32 version; // structure version
+ NvU32 bootloaderOffset;
+ NvU32 bootloaderSize;
+ NvU32 bootloaderParamOffset;
+ NvU32 bootloaderParamSize;
+ NvU32 riscvElfOffset;
+ NvU32 riscvElfSize;
+ NvU32 appVersion; // Changelist number associated with the image
+ //
+ // Manifest contains information about Monitor and it is
+ // input to BR
+ //
+ NvU32 manifestOffset;
+ NvU32 manifestSize;
+ //
+ // Monitor Data offset within RISCV image and size
+ //
+ NvU32 monitorDataOffset;
+ NvU32 monitorDataSize;
+ //
+ // Monitor Code offset withtin RISCV image and size
+ //
+ NvU32 monitorCodeOffset;
+ NvU32 monitorCodeSize;
+ NvU32 bIsMonitorEnabled;
+ //
+ // Swbrom Code offset within RISCV image and size
+ //
+ NvU32 swbromCodeOffset;
+ NvU32 swbromCodeSize;
+ //
+ // Swbrom Data offset within RISCV image and size
+ //
+ NvU32 swbromDataOffset;
+ NvU32 swbromDataSize;
+ //
+ // Total size of FB carveout (image and reserved space).
+ //
+ NvU32 fbReservedSize;
+ //
+ // Indicates whether the entire RISC-V image is signed as "code" in code section.
+ //
+ NvU32 bSignedAsCode;
+} RM_RISCV_UCODE_DESC;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
+ NvU16 engineIdx;
+ NvU32 pmcIntrMask;
+ NvU32 vectorStall;
+ NvU32 vectorNonStall;
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
+
+typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
+ NvU8 subtreeStart;
+ NvU8 subtreeEnd;
+} NV2080_INTR_CATEGORY_SUBTREE_MAP;
+
+#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
+
+typedef enum NV2080_INTR_CATEGORY {
+ NV2080_INTR_CATEGORY_DEFAULT = 0,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
+ NV2080_INTR_CATEGORY_RUNLIST = 3,
+ NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
+ NV2080_INTR_CATEGORY_UVM_OWNED = 5,
+ NV2080_INTR_CATEGORY_UVM_SHARED = 6,
+ NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
+} NV2080_INTR_CATEGORY;
+
+#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
+ NvU32 tableLen;
+ NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
+ NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
+
+#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
+
+typedef union rpc_message_rpc_union_field_v03_00
+{
+ NvU32 spare;
+ NvU32 cpuRmGfid;
+} rpc_message_rpc_union_field_v03_00;
+
+typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v;
+
+typedef struct rpc_message_header_v03_00
+{
+ NvU32 header_version;
+ NvU32 signature;
+ NvU32 length;
+ NvU32 function;
+ NvU32 rpc_result;
+ NvU32 rpc_result_private;
+ NvU32 sequence;
+ rpc_message_rpc_union_field_v u;
+ rpc_generic_union rpc_message_data[];
+} rpc_message_header_v03_00;
+
+typedef rpc_message_header_v03_00 rpc_message_header_v;
+
+typedef struct GSP_MSG_QUEUE_ELEMENT
+{
+ NvU8 authTagBuffer[16]; // Authentication tag buffer.
+ NvU8 aadBuffer[16]; // AAD buffer.
+ NvU32 checkSum; // Set to value needed to make checksum always zero.
+ NvU32 seqNum; // Sequence number maintained by the message queue.
+ NvU32 elemCount; // Number of message queue elements this message has.
+ NV_DECLARE_ALIGNED(rpc_message_header_v rpc, 8);
+} GSP_MSG_QUEUE_ELEMENT;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3 (20 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB (84u)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
new file mode 100644
index 000000000000..642c13aec325
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#ifndef E
+# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT = 0x1000) // 0x1000
+ E(GSP_INIT_DONE) // 0x1001
+ E(GSP_RUN_CPU_SEQUENCER) // 0x1002
+ E(POST_EVENT) // 0x1003
+ E(RC_TRIGGERED) // 0x1004
+ E(MMU_FAULT_QUEUED) // 0x1005
+ E(OS_ERROR_LOG) // 0x1006
+ E(RG_LINE_INTR) // 0x1007
+ E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
+ E(SIM_READ) // 0x1009
+ E(SIM_WRITE) // 0x100a
+ E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
+ E(UCODE_LIBOS_PRINT) // 0x100c
+ E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
+ E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
+ E(VGPU_CONFIG) // 0x1010
+ E(DISPLAY_MODESET) // 0x1011
+ E(EXTDEV_INTR_SERVICE) // 0x1012
+ E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
+ E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
+ E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
+ E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
+ E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
+ E(TIMED_SEMAPHORE_RELEASE) // 0x1018
+ E(NVLINK_IS_GPU_DEGRADED) // 0x1019
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
+ E(GSP_SEND_USER_SHARED_DATA) // 0x101b
+ E(NVLINK_FAULT_UP) // 0x101c
+ E(GSP_LOCKDOWN_NOTICE) // 0x101d
+ E(MIG_CI_CONFIG_UPDATE) // 0x101e
+ E(NUM_EVENTS) // END
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
new file mode 100644
index 000000000000..3a04e702677f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVDEC_H__
+#define __NVRM_NVDEC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances;
+ NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
+} NV_BSP_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
new file mode 100644
index 000000000000..203c1d5304d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVENC_H__
+#define __NVRM_NVENC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
+ NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
+} NV_MSENC_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
new file mode 100644
index 000000000000..71fc53889ec7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVJPG_H__
+#define __NVRM_NVJPG_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
+ NvU32 engineInstance;
+} NV_NVJPG_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
new file mode 100644
index 000000000000..49d81c7673d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
index 73c57f235f6a..2a037acc6b1e 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
@@ -1,5 +1,10 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
-#define __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
@@ -209,54 +214,12 @@ enum {
X(RM, CTRL_SET_HS_CREDITS) // 198
X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199
X(RM, INVALIDATE_TLB) // 200
+ X(RM, RESERVED_201) // 201
+ X(RM, ECC_NOTIFIER_WRITE_ACK) // 202
X(RM, NUM_FUNCTIONS) //END
#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
};
# undef X
# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
#endif
-
-#ifndef E
-# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
-# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-enum {
-#endif
- E(FIRST_EVENT = 0x1000) // 0x1000
- E(GSP_INIT_DONE) // 0x1001
- E(GSP_RUN_CPU_SEQUENCER) // 0x1002
- E(POST_EVENT) // 0x1003
- E(RC_TRIGGERED) // 0x1004
- E(MMU_FAULT_QUEUED) // 0x1005
- E(OS_ERROR_LOG) // 0x1006
- E(RG_LINE_INTR) // 0x1007
- E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
- E(SIM_READ) // 0x1009
- E(SIM_WRITE) // 0x100a
- E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
- E(UCODE_LIBOS_PRINT) // 0x100c
- E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
- E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
- E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
- E(VGPU_CONFIG) // 0x1010
- E(DISPLAY_MODESET) // 0x1011
- E(EXTDEV_INTR_SERVICE) // 0x1012
- E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
- E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
- E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
- E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
- E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
- E(TIMED_SEMAPHORE_RELEASE) // 0x1018
- E(NVLINK_IS_GPU_DEGRADED) // 0x1019
- E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
- E(GSP_SEND_USER_SHARED_DATA) // 0x101b
- E(NVLINK_FAULT_UP) // 0x101c
- E(GSP_LOCKDOWN_NOTICE) // 0x101d
- E(MIG_CI_CONFIG_UPDATE) // 0x101e
- E(NUM_EVENTS) // END
-#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-};
-# undef E
-# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-#endif
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
new file mode 100644
index 000000000000..f6ec04efd119
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_VMM_H__
+#define __NVRM_VMM_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define FERMI_VASPACE_A (0x000090f1)
+
+typedef struct
+{
+ NvU32 index;
+ NvV32 flags;
+ NvU64 vaSize NV_ALIGN_BYTES(8);
+ NvU64 vaStartInternal NV_ALIGN_BYTES(8);
+ NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
+ NvU32 bigPageSize;
+ NvU64 vaBase NV_ALIGN_BYTES(8);
+} NV_VASPACE_ALLOCATION_PARAMETERS;
+
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
+
+#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3)
+
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_START 0x100000000ULL // 4GB
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE 0x20000000ULL // 512MB
+
+#define GMMU_FMT_MAX_LEVELS 6U
+
+#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
+typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
+ /*!
+ * [in] GPU sub-device handle - this API only supports unicast.
+ * Pass 0 to use subDeviceId instead.
+ */
+ NvHandle hSubDevice;
+
+ /*!
+ * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
+ */
+ NvU32 subDeviceId;
+
+ /*!
+ * [in] Page size (VA coverage) of the level to reserve.
+ * This need not be a leaf (page table) page size - it can be
+ * the coverage of an arbitrary level (including root page directory).
+ */
+ NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
+
+ /*!
+ * [in] First GPU virtual address of the range to reserve.
+ * This must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
+
+ /*!
+ * [in] Last GPU virtual address of the range to reserve.
+ * This (+1) must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
+
+ /*!
+ * [in] Number of PDE levels to copy.
+ */
+ NvU32 numLevelsToCopy;
+
+ /*!
+ * [in] Per-level information.
+ */
+ struct {
+ /*!
+ * Physical address of this page level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+
+ /*!
+ * Size in bytes allocated for this level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+
+ /*!
+ * Aperture in which this page level instance resides.
+ */
+ NvU32 aperture;
+
+ /*!
+ * Page shift corresponding to the level
+ */
+ NvU8 pageShift;
+ } levels[GMMU_FMT_MAX_LEVELS];
+} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
+
+#define NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (0x801813U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_
+ID << 8) | NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+ NvU32 numEntries;
+ NvU32 flags;
+ NvHandle hVASpace;
+ NvU32 chId;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+ NvU32 pasid;
+} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS;
+
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE 1:0
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH (0x00000002U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES 2:2
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS 3:3
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY 4:4
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE 5:5
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_TRUE (0x00000001U)
+
+#define NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY (0x801814U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS {
+ NvHandle hVASpace;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
index d72b3aae9a2b..2156808cba4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
@@ -19,26 +19,26 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/ofa.h"
-#include <nvif/class.h>
+static int
+r535_ofa_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
-static const struct nvkm_engine_func
-ad102_nvdec = {
- .sclass = {
- { -1, -1, NVC9B0_VIDEO_DECODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvdec **pnvdec)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec);
+ args->size = sizeof(*args);
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
}
+
+const struct nvkm_rm_api_engine
+r535_ofa = {
+ .alloc = r535_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
new file mode 100644
index 000000000000..a4190676e1ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_api
+r535_api = {
+ .gsp = &r535_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r535_client,
+ .device = &r535_device,
+ .fbsr = &r535_fbsr,
+ .disp = &r535_disp,
+ .fifo = &r535_fifo,
+ .ce = &r535_ce,
+ .gr = &r535_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r535_ofa,
+};
+
+const struct nvkm_rm_impl
+r535_rm_tu102 = {
+ .wpr = &r535_wpr_libos2,
+ .api = &r535_api,
+};
+
+const struct nvkm_rm_impl
+r535_rm_ga102 = {
+ .wpr = &r535_wpr_libos3,
+ .api = &r535_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
new file mode 100644
index 000000000000..5acb98d137bd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/rpcfn.h"
+
+#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
+#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
+
+/**
+ * DOC: GSP message queue element
+ *
+ * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
+ *
+ * The GSP command queue and status queue are message queues for the
+ * communication between software and GSP. The software submits the GSP
+ * RPC via the GSP command queue, GSP writes the status of the submitted
+ * RPC in the status queue.
+ *
+ * A GSP message queue element consists of three parts:
+ *
+ * - message element header (struct r535_gsp_msg), which mostly maintains
+ * the metadata for queuing the element.
+ *
+ * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
+ * of the RPC. E.g., the RPC function number.
+ *
+ * - The payload, where the RPC message stays. E.g. the params of a
+ * specific RPC function. Some RPC functions also have their headers
+ * in the payload. E.g. rm_alloc, rm_control.
+ *
+ * The memory layout of a GSP message element can be illustrated below::
+ *
+ * +------------------------+
+ * | Message Element Header |
+ * | (r535_gsp_msg) |
+ * | |
+ * | (r535_gsp_msg.data) |
+ * | | |
+ * |----------V-------------|
+ * | GSP RPC Header |
+ * | (nvfw_gsp_rpc) |
+ * | |
+ * | (nvfw_gsp_rpc.data) |
+ * | | |
+ * |----------V-------------|
+ * | Payload |
+ * | |
+ * | header(optional) |
+ * | params |
+ * +------------------------+
+ *
+ * The max size of a message queue element is 16 pages (including the
+ * headers). When a GSP message to be sent is larger than 16 pages, the
+ * message should be split into multiple elements and sent accordingly.
+ *
+ * In the bunch of the split elements, the first element has the expected
+ * function number, while the rest of the elements are sent with the
+ * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
+ *
+ * GSP consumes the elements from the cmdq and always writes the result
+ * back to the msgq. The result is also formed as split elements.
+ *
+ * Terminology:
+ *
+ * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
+ * payload)
+ * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
+ * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
+ * - gsp_rpc_len: size of (GSP RPC header + payload)
+ * - params_size: size of params in the payload
+ * - payload_size: size of (header if exists + params) in the payload
+ */
+
+struct r535_gsp_msg {
+ u8 auth_tag_buffer[16];
+ u8 aad_buffer[16];
+ u32 checksum;
+ u32 sequence;
+ u32 elem_count;
+ u32 pad;
+ u8 data[];
+};
+
+struct nvfw_gsp_rpc {
+ u32 header_version;
+ u32 signature;
+ u32 length;
+ u32 function;
+ u32 rpc_result;
+ u32 rpc_result_private;
+ u32 sequence;
+ union {
+ u32 spare;
+ u32 cpuRmGfid;
+ };
+ u8 data[];
+};
+
+#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+
+#define to_gsp_hdr(p, header) \
+ container_of((void *)p, typeof(*header), data)
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int
+r535_rpc_status_to_errno(uint32_t rpc_status)
+{
+ switch (rpc_status) {
+ case 0x55: /* NV_ERR_NOT_READY */
+ case 0x66: /* NV_ERR_TIMEOUT_RETRY */
+ return -EBUSY;
+ case 0x51: /* NV_ERR_NO_MEMORY */
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
+{
+ u32 size, rptr = *gsp->msgq.rptr;
+ int used;
+
+ size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
+ GSP_PAGE_SIZE);
+ if (WARN_ON(!size || size >= gsp->msgq.cnt))
+ return -EINVAL;
+
+ do {
+ u32 wptr = *gsp->msgq.wptr;
+
+ used = wptr + gsp->msgq.cnt - rptr;
+ if (used >= gsp->msgq.cnt)
+ used -= gsp->msgq.cnt;
+ if (used >= size)
+ break;
+
+ usleep_range(1, 2);
+ } while (--(*ptime));
+
+ if (WARN_ON(!*ptime))
+ return -ETIMEDOUT;
+
+ return used;
+}
+
+static struct r535_gsp_msg *
+r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
+{
+ u32 rptr = *gsp->msgq.rptr;
+
+ /* Skip the first page, which is the message queue info */
+ return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
+ rptr * GSP_PAGE_SIZE);
+}
+
+/**
+ * DOC: Receive a GSP message queue element
+ *
+ * Receiving a GSP message queue element from the message queue consists of
+ * the following steps:
+ *
+ * - Peek the element from the queue: r535_gsp_msgq_peek().
+ * Peek the first page of the element to determine the total size of the
+ * message before allocating the proper memory.
+ *
+ * - Allocate memory for the message.
+ * Once the total size of the message is determined from the GSP message
+ * queue element, the caller of r535_gsp_msgq_recv() allocates the
+ * required memory.
+ *
+ * - Receive the message: r535_gsp_msgq_recv().
+ * Copy the message into the allocated memory. Advance the read pointer.
+ * If the message is a large GSP message, r535_gsp_msgq_recv() calls
+ * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
+ * until the complete message is received.
+ * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
+ * the return of the large GSP message.
+ *
+ * - Free the allocated memory: r535_gsp_msg_done().
+ * The user is responsible for freeing the memory allocated for the GSP
+ * message pages after they have been processed.
+ */
+static void *
+r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ int ret;
+
+ ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ return mqe->data;
+}
+
+struct r535_gsp_msg_info {
+ int *retries;
+ u32 gsp_rpc_len;
+ void *gsp_rpc_buf;
+ bool continuation;
+};
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
+
+static void *
+r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
+ struct r535_gsp_msg_info *info)
+{
+ u8 *buf = info->gsp_rpc_buf;
+ u32 rptr = *gsp->msgq.rptr;
+ struct r535_gsp_msg *mqe;
+ u32 size, expected, len;
+ int ret;
+
+ expected = info->gsp_rpc_len;
+
+ ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ if (info->continuation) {
+ struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
+ nvkm_error(&gsp->subdev,
+ "Not a continuation of a large RPC\n");
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ return ERR_PTR(-EIO);
+ }
+ }
+
+ size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+
+ len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
+ len = min_t(u32, expected, len);
+
+ if (info->continuation)
+ memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
+ len - sizeof(struct nvfw_gsp_rpc));
+ else
+ memcpy(buf, mqe->data, len);
+
+ expected -= len;
+
+ if (expected) {
+ mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+ memcpy(buf + len, mqe, expected);
+ }
+
+ rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
+
+ mb();
+ (*gsp->msgq.rptr) = rptr;
+ return buf;
+}
+
+static void *
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
+ struct nvfw_gsp_rpc *rpc;
+ struct r535_gsp_msg_info info = {0};
+ u32 expected = gsp_rpc_len;
+ void *buf;
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+ rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (WARN_ON(rpc->length > max_rpc_size))
+ return NULL;
+
+ buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ info.gsp_rpc_buf = buf;
+ info.retries = retries;
+ info.gsp_rpc_len = rpc->length;
+
+ buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR(buf)) {
+ kvfree(info.gsp_rpc_buf);
+ info.gsp_rpc_buf = NULL;
+ return buf;
+ }
+
+ if (expected <= max_rpc_size)
+ return buf;
+
+ info.gsp_rpc_buf += info.gsp_rpc_len;
+ expected -= info.gsp_rpc_len;
+
+ while (expected) {
+ u32 size;
+
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ info.gsp_rpc_len = rpc->length;
+ info.continuation = true;
+
+ rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ size = info.gsp_rpc_len - sizeof(*rpc);
+ expected -= size;
+ info.gsp_rpc_buf += size;
+ }
+
+ rpc = buf;
+ rpc->length = gsp_rpc_len;
+ return buf;
+}
+
+static int
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
+{
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ struct r535_gsp_msg *cqe;
+ u32 gsp_rpc_len = msg->checksum;
+ u64 *ptr = (void *)msg;
+ u64 *end;
+ u64 csum = 0;
+ int free, time = 1000000;
+ u32 wptr, size, step, len;
+ u32 off = 0;
+
+ len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
+
+ end = (u64 *)((char *)ptr + len);
+ msg->pad = 0;
+ msg->checksum = 0;
+ msg->sequence = gsp->cmdq.seq++;
+ msg->elem_count = DIV_ROUND_UP(len, 0x1000);
+
+ while (ptr < end)
+ csum ^= *ptr++;
+
+ msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+
+ wptr = *gsp->cmdq.wptr;
+ do {
+ do {
+ free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
+ if (free >= gsp->cmdq.cnt)
+ free -= gsp->cmdq.cnt;
+ if (free >= 1)
+ break;
+
+ usleep_range(1, 2);
+ } while(--time);
+
+ if (WARN_ON(!time)) {
+ kvfree(msg);
+ return -ETIMEDOUT;
+ }
+
+ cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+ step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
+ size = min_t(u32, len, step * GSP_PAGE_SIZE);
+
+ memcpy(cqe, (u8 *)msg + off, size);
+
+ wptr += DIV_ROUND_UP(size, 0x1000);
+ if (wptr == gsp->cmdq.cnt)
+ wptr = 0;
+
+ off += size;
+ len -= size;
+ } while (len);
+
+ nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
+ wmb();
+ (*gsp->cmdq.wptr) = wptr;
+ mb();
+
+ nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
+
+ kvfree(msg);
+ return 0;
+}
+
+static void *
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
+{
+ struct r535_gsp_msg *msg;
+ u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
+
+ size = ALIGN(size, GSP_MSG_MIN_SIZE);
+ msg = kvzalloc(size, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ msg->checksum = gsp_rpc_len;
+ return msg->data;
+}
+
+static void
+r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
+{
+ kvfree(msg);
+}
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
+{
+ if (gsp->subdev.debug >= lvl) {
+ nvkm_printk__(&gsp->subdev, lvl, info,
+ "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
+ msg->function, msg->length, msg->length - sizeof(*msg),
+ msg->rpc_result, msg->rpc_result_private);
+ print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg->data, msg->length - sizeof(*msg), true);
+ }
+}
+
+struct nvfw_gsp_rpc *
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvfw_gsp_rpc *rpc;
+ int retries = 4000000, i;
+
+retry:
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->rpc_result) {
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EINVAL);
+ }
+
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
+
+ if (fn && rpc->function == fn) {
+ if (gsp_rpc_len) {
+ if (rpc->length < gsp_rpc_len) {
+ nvkm_error(subdev, "rpc len %d < %d\n",
+ rpc->length, gsp_rpc_len);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EIO);
+ }
+
+ return rpc;
+ }
+
+ r535_gsp_msg_done(gsp, rpc);
+ return NULL;
+ }
+
+ for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
+ struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
+
+ if (ntfy->fn == rpc->function) {
+ if (ntfy->func)
+ ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
+ rpc->length - sizeof(*rpc));
+ break;
+ }
+ }
+
+ if (i == gsp->msgq.ntfy_nr)
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
+
+ r535_gsp_msg_done(gsp, rpc);
+ if (fn)
+ goto retry;
+
+ if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+ goto retry;
+
+ return NULL;
+}
+
+int
+r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
+{
+ int ret = 0;
+
+ mutex_lock(&gsp->msgq.mutex);
+ if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
+ ret = -ENOSPC;
+ } else {
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
+ gsp->msgq.ntfy_nr++;
+ }
+ mutex_unlock(&gsp->msgq.mutex);
+ return ret;
+}
+
+int
+r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
+{
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ mutex_unlock(&gsp->cmdq.mutex);
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static void *
+r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn,
+ enum nvkm_gsp_rpc_reply_policy policy,
+ u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *reply;
+ void *repv = NULL;
+
+ switch (policy) {
+ case NVKM_GSP_RPC_REPLY_NOWAIT:
+ break;
+ case NVKM_GSP_RPC_REPLY_RECV:
+ reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
+ if (!IS_ERR_OR_NULL(reply))
+ repv = reply->data;
+ else
+ repv = reply;
+ break;
+ case NVKM_GSP_RPC_REPLY_POLL:
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ break;
+ }
+
+ return repv;
+}
+
+static void *
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ u32 fn = rpc->function;
+ int ret;
+
+ if (gsp->subdev.debug >= NV_DBG_TRACE) {
+ nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
+ rpc->length, rpc->length - sizeof(*rpc));
+ print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
+ rpc->data, rpc->length - sizeof(*rpc), true);
+ }
+
+ ret = r535_gsp_cmdq_push(gsp, rpc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len);
+}
+
+static void
+r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+ struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
+
+ r535_gsp_msg_done(gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
+{
+ struct nvfw_gsp_rpc *rpc;
+
+ rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
+ sizeof(u64)));
+ if (IS_ERR(rpc))
+ return ERR_CAST(rpc);
+
+ rpc->header_version = 0x03000000;
+ rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
+ rpc->function = fn;
+ rpc->rpc_result = 0xffffffff;
+ rpc->rpc_result_private = 0xffffffff;
+ rpc->length = sizeof(*rpc) + payload_size;
+ return rpc->data;
+}
+
+static void *
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
+ const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
+ u32 payload_size = rpc->length - sizeof(*rpc);
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ if (payload_size > max_payload_size) {
+ const u32 fn = rpc->function;
+ u32 remain_payload_size = payload_size;
+
+ /* Adjust length, and send initial RPC. */
+ rpc->length = sizeof(*rpc) + max_payload_size;
+ msg->checksum = rpc->length;
+
+ repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += max_payload_size;
+ remain_payload_size -= max_payload_size;
+
+ /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
+ while (remain_payload_size) {
+ u32 size = min(remain_payload_size,
+ max_payload_size);
+ void *next;
+
+ next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
+ if (IS_ERR(next)) {
+ repv = next;
+ goto done;
+ }
+
+ memcpy(next, payload, size);
+
+ repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += size;
+ remain_payload_size -= size;
+ }
+
+ /* Wait for reply. */
+ repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size +
+ sizeof(*rpc));
+ } else {
+ repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len);
+ }
+
+done:
+ mutex_unlock(&gsp->cmdq.mutex);
+ return repv;
+}
+
+const struct nvkm_rm_api_rpc
+r535_rpc = {
+ .get = r535_gsp_rpc_get,
+ .push = r535_gsp_rpc_push,
+ .done = r535_gsp_rpc_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
index d3e95453f25d..52f2e5f14517 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
@@ -19,15 +19,38 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "vmm.h"
+#include <subdev/mmu/vmm.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvhw/drf.h>
+#include "nvrm/vmm.h"
-static int
-r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+void
+r535_mmu_vaspace_del(struct nvkm_vmm *vmm)
+{
+ if (vmm->rm.external) {
+ NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (!IS_ERR(ctrl)) {
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl));
+ }
+
+ vmm->rm.external = false;
+ }
+
+ nvkm_gsp_rm_free(&vmm->rm.object);
+ nvkm_gsp_device_dtor(&vmm->rm.device);
+ nvkm_gsp_client_dtor(&vmm->rm.client);
+
+ nvkm_vmm_put(vmm, &vmm->rm.rsvd);
+}
+
+int
+r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external)
{
NV_VASPACE_ALLOCATION_PARAMETERS *args;
int ret;
@@ -37,58 +60,103 @@ r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
if (ret)
return ret;
- args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
+ args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, handle, FERMI_VASPACE_A,
sizeof(*args), &vmm->rm.object);
if (IS_ERR(args))
return PTR_ERR(args);
args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+ if (external)
+ args->flags = NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED;
ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
if (ret)
return ret;
- {
+ if (!external) {
NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+ u8 page_shift = 29; /* 512MiB */
+ const u64 page_size = BIT_ULL(page_shift);
+ const struct nvkm_vmm_page *page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vmm_pt *pd = vmm->pd;
+
+ for (page = vmm->func->page; page->shift; page++) {
+ if (page->shift == page_shift)
+ break;
+ }
+
+ if (WARN_ON(!page->shift))
+ return -EINVAL;
mutex_lock(&vmm->mutex.vmm);
- ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
+ ret = nvkm_vmm_get_locked(vmm, true, false, false, page_shift, 32, page_size,
&vmm->rm.rsvd);
mutex_unlock(&vmm->mutex.vmm);
if (ret)
return ret;
+ /* Some parts of RM expect the server-reserved area to be in a specific location. */
+ if (WARN_ON(vmm->rm.rsvd->addr != SPLIT_VAS_SERVER_RM_MANAGED_VA_START ||
+ vmm->rm.rsvd->size != SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE))
+ return -EINVAL;
+
ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl->pageSize = 0x20000000;
+ ctrl->pageSize = page_size;
ctrl->virtAddrLo = vmm->rm.rsvd->addr;
ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
- ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
- ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
- ctrl->levels[0].size = 0x20;
- ctrl->levels[0].aperture = 1;
- ctrl->levels[0].pageShift = 0x2f;
- ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
- ctrl->levels[1].size = 0x1000;
- ctrl->levels[1].aperture = 1;
- ctrl->levels[1].pageShift = 0x26;
- if (vmm->pd->pde[0]->pde[0]) {
- ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
- ctrl->levels[2].size = 0x1000;
- ctrl->levels[2].aperture = 1;
- ctrl->levels[2].pageShift = 0x1d;
+
+ for (desc = page->desc; desc->bits; desc++) {
+ ctrl->numLevelsToCopy++;
+ page_shift += desc->bits;
+ }
+ desc--;
+
+ for (int i = 0; i < ctrl->numLevelsToCopy; i++, desc--) {
+ page_shift -= desc->bits;
+
+ ctrl->levels[i].physAddress = pd->pt[0]->addr;
+ ctrl->levels[i].size = (1 << desc->bits) * desc->size;
+ ctrl->levels[i].aperture = 1;
+ ctrl->levels[i].pageShift = page_shift;
+
+ pd = pd->pde[0];
}
ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
+ } else {
+ NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->physAddress = vmm->pd->pt[0]->addr;
+ ctrl->numEntries = 1 << vmm->func->page[0].desc->bits;
+ ctrl->flags = NVDEF(NV0080_CTRL_DMA_SET_PAGE_DIRECTORY, FLAGS, APERTURE, VIDMEM);
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl);
+ if (ret == 0)
+ vmm->rm.external = true;
}
return ret;
}
+static int
+r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+{
+ return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE, true);
+}
+
static void
r535_mmu_dtor(struct nvkm_mmu *mmu)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
new file mode 100644
index 000000000000..5db0e7009e1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/gsp/rm/r570/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/disp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
new file mode 100644
index 000000000000..87e6240662ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/client.h"
+
+static int
+r570_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClient = client->object.handle;
+ args->processID = ~0;
+
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
+}
+
+const struct nvkm_rm_api_client
+r570_client = {
+ .ctor = r570_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
new file mode 100644
index 000000000000..a96e31c2d80b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <engine/disp.h>
+#include <engine/disp/outp.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/disp.h"
+
+static int
+r570_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
+{
+ NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->channelInstance = inst;
+ args->offset = put_offset;
+ args->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r570_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
+{
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+ NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_NCOH:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 0;
+ ctrl->pbTargetAperture = PHYS_PCI;
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 1;
+ ctrl->pbTargetAperture = PHYS_PCI_COHERENT;
+ break;
+ case NVKM_MEM_TARGET_VRAM:
+ ctrl->addressSpace = ADDR_FBMEM;
+ ctrl->pbTargetAperture = PHYS_NVM;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
+ }
+
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
+ ctrl->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r570_dp_set_indexed_link_rates(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+ for (int i = 0; i < outp->dp.rates; i++)
+ ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r570_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
+{
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
+ NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
+ ctrl->brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
+{
+ NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ *displayid = ctrl->displayId;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+static int
+r570_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
+{
+ NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayMask = BIT(display_id);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static int
+r570_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->engine.subdev.device->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+const struct nvkm_rm_api_disp
+r570_disp = {
+ .get_static_info = r570_disp_get_static_info,
+ .get_supported = r570_disp_get_supported,
+ .get_connect_state = r570_disp_get_connect_state,
+ .get_active = r570_disp_get_active,
+ .bl_ctrl = r570_bl_ctrl,
+ .dp = {
+ .get_caps = r570_dp_get_caps,
+ .set_indexed_link_rates = r570_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r570_disp_chan_set_pushbuf,
+ .dmac_alloc = r570_dmac_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
new file mode 100644
index 000000000000..2945d5b4e570
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/instmem/priv.h>
+#include <subdev/bar.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+
+#include "nvrm/fbsr.h"
+#include "nvrm/fifo.h"
+
+static int
+r570_fbsr_suspend_channels(struct nvkm_gsp *gsp, bool suspend)
+{
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bDisableActiveChannels = suspend;
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static void
+r570_fbsr_resume(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ struct nvkm_vmm *vmm;
+
+ /* Restore BAR2 page tables via BAR0 window, and re-enable BAR2. */
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ device->bar->bar2 = true;
+
+ vmm = nvkm_bar_bar2_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Restore remaining BAR2 allocations (including BAR1 page tables) via BAR2. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ vmm = nvkm_bar_bar1_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Resume channel scheduling. */
+ r570_fbsr_suspend_channels(device->gsp, false);
+
+ /* Finish cleaning up. */
+ r535_fbsr_resume(gsp);
+}
+
+static int
+r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size)
+{
+ NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+ struct nvkm_gsp_object memlist;
+ int ret;
+
+ ret = r535_fbsr_memlist(&gsp->internal.device, 0xcaf00003, NVKM_MEM_TARGET_HOST,
+ 0, size, sgt, &memlist);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->hClient = gsp->internal.client.object.handle;
+ ctrl->hSysMem = memlist.handle;
+ ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr;
+ ctrl->bEnteringGcoffState = 1;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+ if (ret)
+ return ret;
+
+ nvkm_gsp_rm_free(&memlist);
+ return 0;
+}
+
+static int
+r570_fbsr_suspend(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ u64 size;
+ int ret;
+
+ /* Stop channel scheduling. */
+ r570_fbsr_suspend_channels(gsp, true);
+
+ /* Save BAR2 allocations to system memory. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->preserve) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+ }
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable BAR2 access. */
+ device->bar->bar2 = false;
+
+ /* Allocate system memory to hold RM's VRAM allocations across suspend. */
+ size = gsp->fb.heap.size;
+ size += gsp->fb.rsvd_size;
+ size += gsp->fb.bios.vga_workspace.size;
+ nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", size);
+
+ ret = nvkm_gsp_sg(device, size, &gsp->sr.fbsr);
+ if (ret)
+ return ret;
+
+ /* Initialise FBSR on RM. */
+ ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size);
+ if (ret) {
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct nvkm_rm_api_fbsr
+r570_fbsr = {
+ .suspend = r570_fbsr_suspend,
+ .resume = r570_fbsr_resume,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
new file mode 100644
index 000000000000..79132805cfcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo/priv.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/runl.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
+
+#define CHID_PER_USERD 8
+
+static int
+r570_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
+ NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
+
+ args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+ args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
+ if (!priv)
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+ else
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+ args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+ args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
+
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
+ args->instanceMem.addressSpace = 2;
+ args->instanceMem.cacheAttrib = 1;
+
+ args->userdMem.base = userd_addr;
+ args->userdMem.size = fifo->func->chan.func->userd->size;
+ args->userdMem.addressSpace = 2;
+ args->userdMem.cacheAttrib = 1;
+
+ args->ramfcMem.base = inst_addr;
+ args->ramfcMem.size = 0x200;
+ args->ramfcMem.addressSpace = 2;
+ args->ramfcMem.cacheAttrib = 1;
+
+ args->mthdbufMem.base = mthdbuf_addr;
+ args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+ args->mthdbufMem.addressSpace = 1;
+ args->mthdbufMem.cacheAttrib = 0;
+
+ if (!priv)
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+ else
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r570_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc engn:%08x chid:%d gfid:%d level:%d type:%d scope:%d part:%d "
+ "fault_addr:%08x%08x fault_type:%08x\n",
+ msg->nv2080EngineType, msg->chid, msg->gfid, msg->exceptLevel, msg->exceptType,
+ msg->scope, msg->partitionAttributionId,
+ msg->mmuFaultAddrHi, msg->mmuFaultAddrLo, msg->mmuFaultType);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r570_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+ NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO,
+ sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+ engn->rm.size =
+ ctrl->constructedFalconsTable[i].ctxBufferSize;
+ break;
+ }
+ }
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
+ switch (rm) {
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( COPY10, CE, 10);
+ case RM_ENGINE_TYPE( COPY11, CE, 11);
+ case RM_ENGINE_TYPE( COPY12, CE, 12);
+ case RM_ENGINE_TYPE( COPY13, CE, 13);
+ case RM_ENGINE_TYPE( COPY14, CE, 14);
+ case RM_ENGINE_TYPE( COPY15, CE, 15);
+ case RM_ENGINE_TYPE( COPY16, CE, 16);
+ case RM_ENGINE_TYPE( COPY17, CE, 17);
+ case RM_ENGINE_TYPE( COPY18, CE, 18);
+ case RM_ENGINE_TYPE( COPY19, CE, 19);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE( NVENC3, NVENC, 3);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA0, OFA, 0);
+ case RM_ENGINE_TYPE( OFA1, OFA, 1);
+ default:
+ return -EINVAL;
+ }
+#undef RM_ENGINE_TYPE
+}
+
+const struct nvkm_rm_api_fifo
+r570_fifo = {
+ .xlat_rm_engine_type = r570_fifo_xlat_rm_engine_type,
+ .ectx_size = r570_fifo_ectx_size,
+ .rsvd_chids = 1,
+ .rc_triggered = r570_fifo_rc_triggered,
+ .chan = {
+ .alloc = r570_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
new file mode 100644
index 000000000000..b6cced9b8aa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/gr.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+#include <engine/fifo/chid.h>
+#include <engine/gr/priv.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/engine.h"
+
+int
+r570_gr_tpc_mask(struct nvkm_gsp *gsp, int gpc, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->gpcId = gpc;
+
+ ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pmask = ctrl->tpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+int
+r570_gr_gpc_mask(struct nvkm_gsp *gsp, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->gpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_gr_scrubber_ctrl(struct r535_gr *gr, bool teardown)
+{
+ NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gr->scrubber.vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bTeardown = teardown;
+
+ return nvkm_gsp_rm_ctrl_wr(&gr->scrubber.vmm->rm.device.subdevice, ctrl);
+}
+
+static void
+r570_gr_scrubber_fini(struct r535_gr *gr)
+{
+ /* Teardown scrubber channel on RM. */
+ if (gr->scrubber.enabled) {
+ WARN_ON(r570_gr_scrubber_ctrl(gr, true));
+ gr->scrubber.enabled = false;
+ }
+
+ /* Free scrubber channel. */
+ nvkm_gsp_rm_free(&gr->scrubber.threed);
+ nvkm_gsp_rm_free(&gr->scrubber.chan);
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(gr->scrubber.vmm, &gr->scrubber.ctxbuf.vma[i]);
+ nvkm_memory_unref(&gr->scrubber.ctxbuf.mem[i]);
+ }
+
+ nvkm_vmm_unref(&gr->scrubber.vmm);
+ nvkm_memory_unref(&gr->scrubber.inst);
+}
+
+static int
+r570_gr_scrubber_init(struct r535_gr *gr)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ int ret;
+
+ /* Scrubber channel only required on TU10x. */
+ switch (device->chipset) {
+ case 0x162:
+ case 0x164:
+ case 0x166:
+ break;
+ default:
+ return 0;
+ }
+
+ if (gr->scrubber.chid < 0) {
+ gr->scrubber.chid = nvkm_chid_get(device->fifo->chid, NULL);
+ if (gr->scrubber.chid < 0)
+ return gr->scrubber.chid;
+ }
+
+ /* Allocate scrubber channel. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x2000 + device->fifo->rm.mthdbuf_size, 0, true,
+ &gr->scrubber.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grScrubberVmm",
+ &gr->scrubber.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&gr->scrubber.vmm->rm.device, KGRAPHICS_SCRUBBER_HANDLE_CHANNEL,
+ NV2080_ENGINE_TYPE_GR0, 0, false, gr->scrubber.chid,
+ nvkm_memory_addr(gr->scrubber.inst),
+ nvkm_memory_addr(gr->scrubber.inst) + 0x1000,
+ nvkm_memory_addr(gr->scrubber.inst) + 0x2000,
+ gr->scrubber.vmm, 0, 0x1000, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = r535_gr_promote_ctx(gr, false, gr->scrubber.vmm, gr->scrubber.ctxbuf.mem,
+ gr->scrubber.ctxbuf.vma, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_rm_alloc(&gr->scrubber.chan, KGRAPHICS_SCRUBBER_HANDLE_3DOBJ,
+ rm->gpu->gr.class.threed, 0, &gr->scrubber.threed);
+ if (ret)
+ goto done;
+
+ /* Initialise scrubber channel on RM. */
+ ret = r570_gr_scrubber_ctrl(gr, false);
+ if (ret)
+ goto done;
+
+ gr->scrubber.enabled = true;
+
+done:
+ if (ret)
+ r570_gr_scrubber_fini(gr);
+
+ return ret;
+}
+
+static int
+r570_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+const struct nvkm_rm_api_gr
+r570_gr = {
+ .get_ctxbufs_info = r570_gr_get_ctxbufs_info,
+ .scrubber.init = r570_gr_scrubber_init,
+ .scrubber.fini = r570_gr_scrubber_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
new file mode 100644
index 000000000000..9d2fa4e66d59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+#include <rm/rpc.h>
+
+#include <asm-generic/video.h>
+
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+
+#include <core/pci.h>
+#include <subdev/pci/priv.h>
+
+static u32
+r570_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return (meta->frtsOffset + meta->frtsSize) -
+ (meta->nonWprHeapOffset + meta->nonWprHeapSize);
+}
+
+static void
+r570_gsp_drop_post_nocat_record(struct nvkm_gsp *gsp)
+{
+ if (gsp->subdev.debug < NV_DBG_DEBUG) {
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, NULL, NULL);
+ }
+}
+
+static bool
+r570_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE19:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_NVENC ... MC_ENGINE_IDX_NVENC3:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0 ... MC_ENGINE_IDX_OFA1:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_OFA0;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int
+r570_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+ u32 gpc_mask;
+ u32 tpc_mask;
+ int ret;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+ INIT_LIST_HEAD(&gsp->internal.client.events);
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
+
+ if (gsp->rm->wpr->offset_set_by_acr) {
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ meta->nonWprHeapOffset = rpc->fwWprLayoutOffset.nonWprHeapOffset;
+ meta->frtsOffset = rpc->fwWprLayoutOffset.frtsOffset;
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ ret = r570_gr_gpc_mask(gsp, &gpc_mask);
+ if (ret)
+ return ret;
+
+ for (int gpc = 0; gpc < 32; gpc++) {
+ if (gpc_mask & BIT(gpc)) {
+ ret = r570_gr_tpc_mask(gsp, gpc, &tpc_mask);
+ if (ret)
+ return ret;
+
+ gsp->gr.tpcs += hweight32(tpc_mask);
+ gsp->gr.gpcs++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+r570_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
+{
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
+
+ if (!handle)
+ return;
+
+ acpi->bValid = 1;
+
+ r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
+ r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
+ r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
+#endif
+}
+
+static int
+r570_gsp_set_system_info(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct pci_dev *pdev = container_of(device, struct nvkm_device_pci, device)->pdev;
+ GspSystemInfo *info;
+
+ if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
+ return -ENOSYS;
+
+ info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
+ info->nvDomainBusDeviceFunc = pci_dev_id(pdev);
+ info->maxUserVa = TASK_SIZE;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
+ info->PCIDeviceID = (pdev->device << 16) | pdev->vendor;
+ info->PCISubDeviceID = (pdev->subsystem_device << 16) | pdev->subsystem_vendor;
+ info->PCIRevisionID = pdev->revision;
+ r570_gsp_acpi_info(gsp, &info->acpiMethodData);
+ info->bIsPrimary = video_is_primary_device(device->dev);
+ info->bPreserveVideoMemoryAllocations = false;
+
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
+}
+
+static void
+r570_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
+{
+ GSP_ARGUMENTS_CACHED *args;
+
+ args = gsp->rmargs.data;
+ args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
+ args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
+ args->messageQueueInitArguments.cmdQueueOffset =
+ (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
+ args->messageQueueInitArguments.statQueueOffset =
+ (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
+
+ if (!resume) {
+ args->srInitArguments.oldLevel = 0;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 0;
+ } else {
+ args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 1;
+ }
+
+ args->bDmemStack = 1;
+}
+
+const struct nvkm_rm_api_gsp
+r570_gsp = {
+ .set_rmargs = r570_gsp_set_rmargs,
+ .set_system_info = r570_gsp_set_system_info,
+ .get_static_info = r570_gsp_get_static_info,
+ .xlat_mc_engine_idx = r570_gsp_xlat_mc_engine_idx,
+ .drop_post_nocat_record = r570_gsp_drop_post_nocat_record,
+ .sr_data_size = r570_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
new file mode 100644
index 000000000000..e8714e0abc37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+ NV_DECLARE_ALIGNED(NvP64 pOsPidInfo, 8);
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
new file mode 100644
index 000000000000..06e972835d77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+ NvU32 embeddedDisplayPortMask;
+ NvBool bExternalMuxSupported;
+ NvBool bInternalMuxSupported;
+ NvU32 numDispChannels;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730107U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupportedByGpu;
+ NvU32 minPClkForCompressed;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NvBool bUseRgFlushSequence;
+ NvBool bSupportDPDownSpread;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 UHBRSupportedByDfp;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR 13:13
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS 0:0
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS 1:1
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS 2:2
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x73010cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+ NvU8 brightnessType;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU16 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvBool bEnableAudioOverRightPanel;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+ NvU32 pbTargetAperture;
+ NvU32 channelPBSize;
+ NvU32 subDeviceId;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef enum
+{
+ PB_SIZE_4KB = 0,
+ PB_SIZE_8KB,
+ PB_SIZE_16KB,
+ PB_SIZE_32KB,
+ PB_SIZE_64KB
+} ChannelPBSize;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+ ChannelPBSize channelPBSize; // Size of Push Buffer requested by client (allowed values in enum)
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+ NvU32 subDeviceId; // One-hot encoded subDeviceId (i.e. SDM) that will be used to address the channel in the pushbuffer stream (via SSDM method)
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100 1
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT1000 2
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_NITS 3
+
+typedef enum
+{
+ IOVA,
+ PHYS_NVM,
+ PHYS_PCI,
+ PHYS_PCI_COHERENT
+} PBTARGETAPERTURE;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
new file mode 100644
index 000000000000..7997050a4f29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE10 25
+#define MC_ENGINE_IDX_CE11 26
+#define MC_ENGINE_IDX_CE12 27
+#define MC_ENGINE_IDX_CE13 28
+#define MC_ENGINE_IDX_CE14 29
+#define MC_ENGINE_IDX_CE15 30
+#define MC_ENGINE_IDX_CE16 31
+#define MC_ENGINE_IDX_CE17 32
+#define MC_ENGINE_IDX_CE18 33
+#define MC_ENGINE_IDX_CE19 34
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE19
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_NVENC 38
+#define MC_ENGINE_IDX_NVENC1 39
+#define MC_ENGINE_IDX_NVENC2 40
+#define MC_ENGINE_IDX_NVENC3 41
+#define MC_ENGINE_IDX_C2C 42
+#define MC_ENGINE_IDX_LTC 43
+#define MC_ENGINE_IDX_FBHUB 44
+#define MC_ENGINE_IDX_HDACODEC 45
+#define MC_ENGINE_IDX_GMMU 46
+#define MC_ENGINE_IDX_SEC2 47
+#define MC_ENGINE_IDX_FSP 48
+#define MC_ENGINE_IDX_NVLINK 49
+#define MC_ENGINE_IDX_GSP 50
+#define MC_ENGINE_IDX_NVJPG 51
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 52
+#define MC_ENGINE_IDX_NVJPEG2 53
+#define MC_ENGINE_IDX_NVJPEG3 54
+#define MC_ENGINE_IDX_NVJPEG4 55
+#define MC_ENGINE_IDX_NVJPEG5 56
+#define MC_ENGINE_IDX_NVJPEG6 57
+#define MC_ENGINE_IDX_NVJPEG7 58
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 59
+#define MC_ENGINE_IDX_ACCESS_CNTR 60
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 61
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 63
+#define MC_ENGINE_IDX_INFO_FAULT 64
+#define MC_ENGINE_IDX_BSP 65
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 66
+#define MC_ENGINE_IDX_NVDEC2 67
+#define MC_ENGINE_IDX_NVDEC3 68
+#define MC_ENGINE_IDX_NVDEC4 69
+#define MC_ENGINE_IDX_NVDEC5 70
+#define MC_ENGINE_IDX_NVDEC6 71
+#define MC_ENGINE_IDX_NVDEC7 72
+#define MC_ENGINE_IDX_CPU_DOORBELL 73
+#define MC_ENGINE_IDX_PRIV_DOORBELL 74
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 75
+#define MC_ENGINE_IDX_BLG 76
+#define MC_ENGINE_IDX_PERFMON 77
+#define MC_ENGINE_IDX_BUF_RESET 78
+#define MC_ENGINE_IDX_XBAR 79
+#define MC_ENGINE_IDX_ZPW 80
+#define MC_ENGINE_IDX_OFA0 81
+#define MC_ENGINE_IDX_OFA1 82
+#define MC_ENGINE_IDX_TEGRA 83
+#define MC_ENGINE_IDX_GR 84
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 85
+#define MC_ENGINE_IDX_GR2 86
+#define MC_ENGINE_IDX_GR3 87
+#define MC_ENGINE_IDX_GR4 88
+#define MC_ENGINE_IDX_GR5 89
+#define MC_ENGINE_IDX_GR6 90
+#define MC_ENGINE_IDX_GR7 91
+#define MC_ENGINE_IDX_ESCHED 92
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 156
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 157
+#define MC_ENGINE_IDX_GR2_FECS_LOG 158
+#define MC_ENGINE_IDX_GR3_FECS_LOG 159
+#define MC_ENGINE_IDX_GR4_FECS_LOG 160
+#define MC_ENGINE_IDX_GR5_FECS_LOG 161
+#define MC_ENGINE_IDX_GR6_FECS_LOG 162
+#define MC_ENGINE_IDX_GR7_FECS_LOG 163
+#define MC_ENGINE_IDX_TMR_SWRL 164
+#define MC_ENGINE_IDX_DISP_GSP 165
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 166
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 167
+#define MC_ENGINE_IDX_PXUC 168
+#define MC_ENGINE_IDX_SYSLTC 169
+#define MC_ENGINE_IDX_LRCC 170
+#define MC_ENGINE_IDX_GSPLITE 171
+#define MC_ENGINE_IDX_GSPLITE0 MC_ENGINE_IDX_GSPLITE
+#define MC_ENGINE_IDX_GSPLITE1 172
+#define MC_ENGINE_IDX_GSPLITE2 173
+#define MC_ENGINE_IDX_GSPLITE3 174
+#define MC_ENGINE_IDX_GSPLITE_MAX MC_ENGINE_IDX_GSPLITE3
+#define MC_ENGINE_IDX_DPAUX 175
+#define MC_ENGINE_IDX_DISP_LOW 176
+#define MC_ENGINE_IDX_MAX 177
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_COPY10 = (0x00000013),
+ RM_ENGINE_TYPE_COPY11 = (0x00000014),
+ RM_ENGINE_TYPE_COPY12 = (0x00000015),
+ RM_ENGINE_TYPE_COPY13 = (0x00000016),
+ RM_ENGINE_TYPE_COPY14 = (0x00000017),
+ RM_ENGINE_TYPE_COPY15 = (0x00000018),
+ RM_ENGINE_TYPE_COPY16 = (0x00000019),
+ RM_ENGINE_TYPE_COPY17 = (0x0000001a),
+ RM_ENGINE_TYPE_COPY18 = (0x0000001b),
+ RM_ENGINE_TYPE_COPY19 = (0x0000001c),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ // Bug 4175886 - Use this new value for all chips once GB20X is released
+ RM_ENGINE_TYPE_NVENC3 = (0x00000028),
+ RM_ENGINE_TYPE_VP = (0x00000029),
+ RM_ENGINE_TYPE_ME = (0x0000002a),
+ RM_ENGINE_TYPE_PPP = (0x0000002b),
+ RM_ENGINE_TYPE_MPEG = (0x0000002c),
+ RM_ENGINE_TYPE_SW = (0x0000002d),
+ RM_ENGINE_TYPE_TSEC = (0x0000002e),
+ RM_ENGINE_TYPE_VIC = (0x0000002f),
+ RM_ENGINE_TYPE_MP = (0x00000030),
+ RM_ENGINE_TYPE_SEC2 = (0x00000031),
+ RM_ENGINE_TYPE_HOST = (0x00000032),
+ RM_ENGINE_TYPE_DPU = (0x00000033),
+ RM_ENGINE_TYPE_PMU = (0x00000034),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003c),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003d),
+ RM_ENGINE_TYPE_OFA0 = (0x0000003e),
+ RM_ENGINE_TYPE_OFA1 = (0x0000003f),
+ RM_ENGINE_TYPE_RESERVED40 = (0x00000040),
+ RM_ENGINE_TYPE_RESERVED41 = (0x00000041),
+ RM_ENGINE_TYPE_RESERVED42 = (0x00000042),
+ RM_ENGINE_TYPE_RESERVED43 = (0x00000043),
+ RM_ENGINE_TYPE_RESERVED44 = (0x00000044),
+ RM_ENGINE_TYPE_RESERVED45 = (0x00000045),
+ RM_ENGINE_TYPE_RESERVED46 = (0x00000046),
+ RM_ENGINE_TYPE_RESERVED47 = (0x00000047),
+ RM_ENGINE_TYPE_RESERVED48 = (0x00000048),
+ RM_ENGINE_TYPE_RESERVED49 = (0x00000049),
+ RM_ENGINE_TYPE_RESERVED4a = (0x0000004a),
+ RM_ENGINE_TYPE_RESERVED4b = (0x0000004b),
+ RM_ENGINE_TYPE_RESERVED4c = (0x0000004c),
+ RM_ENGINE_TYPE_RESERVED4d = (0x0000004d),
+ RM_ENGINE_TYPE_RESERVED4e = (0x0000004e),
+ RM_ENGINE_TYPE_RESERVED4f = (0x0000004f),
+ RM_ENGINE_TYPE_RESERVED50 = (0x00000050),
+ RM_ENGINE_TYPE_RESERVED51 = (0x00000051),
+ RM_ENGINE_TYPE_RESERVED52 = (0x00000052),
+ RM_ENGINE_TYPE_RESERVED53 = (0x00000053),
+ RM_ENGINE_TYPE_LAST = (0x00000054),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_OFA0 NV2080_ENGINE_TYPE_OFA
+#define NV2080_ENGINE_TYPE_COPY10 (0x00000034)
+#define NV2080_ENGINE_TYPE_COPY11 (0x00000035)
+#define NV2080_ENGINE_TYPE_COPY12 (0x00000036)
+#define NV2080_ENGINE_TYPE_COPY13 (0x00000037)
+#define NV2080_ENGINE_TYPE_COPY14 (0x00000038)
+#define NV2080_ENGINE_TYPE_COPY15 (0x00000039)
+#define NV2080_ENGINE_TYPE_COPY16 (0x0000003a)
+#define NV2080_ENGINE_TYPE_COPY17 (0x0000003b)
+#define NV2080_ENGINE_TYPE_COPY18 (0x0000003c)
+#define NV2080_ENGINE_TYPE_COPY19 (0x0000003d)
+#define NV2080_ENGINE_TYPE_OFA1 (0x0000003e)
+#define NV2080_ENGINE_TYPE_NVENC3 (0x0000003f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0 (0x00000040)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY1 (0x00000041)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY2 (0x00000042)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY3 (0x00000043)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY4 (0x00000044)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY5 (0x00000045)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY6 (0x00000046)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY7 (0x00000047)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY8 (0x00000048)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY9 (0x00000049)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY10 (0x0000004a)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY11 (0x0000004b)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY12 (0x0000004c)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY13 (0x0000004d)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY14 (0x0000004e)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY15 (0x0000004f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY16 (0x00000050)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY17 (0x00000051)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY18 (0x00000052)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19 (0x00000053)
+#define NV2080_ENGINE_TYPE_LAST (0x00000054)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
new file mode 100644
index 000000000000..8af432375f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NvBool bEnteringGcoffState;
+ NV_DECLARE_ALIGNED(NvU64 sysmemAddrOfSuspendResumeData, 8);
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
index 7157c7757698..2b002ca64e0f 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
@@ -1,31 +1,14 @@
-#ifndef __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
-#define __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV_MAX_SUBDEVICES 8
typedef struct NV_MEMORY_DESC_PARAMS {
NV_DECLARE_ALIGNED(NvU64 base, 8);
@@ -34,137 +17,197 @@ typedef struct NV_MEMORY_DESC_PARAMS {
NvU32 cacheAttrib;
} NV_MEMORY_DESC_PARAMS;
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+ NvU32 tpcConfigID; // TPC Configuration Id as supported by DTD-PG Feature
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
-
#define NVOS04_FLAGS_VPR 2:2
#define NVOS04_FLAGS_VPR_FALSE 0x00000000
#define NVOS04_FLAGS_VPR_TRUE 0x00000001
-
#define NVOS04_FLAGS_CC_SECURE 2:2
#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
-
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
-
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
-
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
-
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
-
#define NVOS04_FLAGS_MAP_CHANNEL 30:30
#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
-#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
-#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
-
-typedef struct NV_CHANNEL_ALLOC_PARAMS {
-
- NvHandle hObjectError; // error context DMA
- NvHandle hObjectBuffer; // no longer used
- NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
- NvU32 gpFifoEntries; // number of GP FIFO entries
-
- NvU32 flags;
-
-
- NvHandle hContextShare; // context share handle
- NvHandle hVASpace; // VASpace for the channel
-
- // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
- NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
-
- // offset to beginning of UserD within hUserdMemory[x]
- NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
-
- // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
- NvU32 engineType;
- // Channel identifier that is unique for the duration of a RM session
- NvU32 cid;
- // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
- NvU32 subDeviceId;
- NvHandle hObjectEccError; // ECC error context DMA
-
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
-
- NvHandle hPhysChannelGroup; // reserved
- NvU32 internalFlags; // reserved
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
- NvU32 ProcessID; // reserved
- NvU32 SubProcessID; // reserved
- // IV used for CPU-side encryption / GPU-side decryption.
- NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
- // IV used for CPU-side decryption / GPU-side encryption.
- NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
- // Nonce used CPU-side signing / GPU-side signature verification.
- NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
-} NV_CHANNEL_ALLOC_PARAMS;
-
-typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
-
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED 6:6
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_YES 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED 7:7
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_YES 0x1
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 gfid;
+ NvU32 exceptLevel;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+ NvU32 mmuFaultAddrLo;
+ NvU32 mmuFaultAddrHi;
+ NvU32 mmuFaultType;
+ NvBool bCallbackNeeded;
+ NvU32 rcJournalBufferSize;
+ NvU8 rcJournalBuffer[];
+} rpc_rc_triggered_v17_02;
+
+#define NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO (0x208001b0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS {
+ NvBool bDisableActiveChannels;
+} NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS_MESSAGE_ID" */
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
new file mode 100644
index 000000000000..feed1dabd9d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x1a
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
+
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SETUP (0x00000019)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x0000001a)
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+#define KGRAPHICS_SCRUBBER_HANDLE_VAS 0xdada0042
+#define KGRAPHICS_SCRUBBER_HANDLE_CHANNEL (KGRAPHICS_SCRUBBER_HANDLE_VAS + 3)
+#define KGRAPHICS_SCRUBBER_HANDLE_3DOBJ (KGRAPHICS_SCRUBBER_HANDLE_VAS + 4)
+
+typedef struct NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS {
+ NvBool bTeardown;
+} NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR (0x20800a46) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID" */
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
new file mode 100644
index 000000000000..b6075021e74f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[9];
+ char chipSKUMod[5];
+ NvU32 skuConfigVersion;
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+ NvBool bNonPowerOf2ChannelCountSupported;
+ NvBool bVfResizableBAR1Supported;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+#define MAX_GROUP_COUNT 2
+
+typedef struct
+{
+ NvU32 ecidLow;
+ NvU32 ecidHigh;
+ NvU32 ecidExtended;
+} EcidManufacturingInfo;
+
+typedef struct
+{
+ NvU64 nonWprHeapOffset;
+ NvU64 frtsOffset;
+} FW_WPR_LAYOUT_OFFSET;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU64 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU64 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGc8Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+ NvBool bIsMigSupported;
+
+ /* "Total Board Power" refers to power requirement of GPU,
+ * while in GC6 state. Majority of this power will be used
+ * to keep V-RAM active to preserve its content.
+ * Some energy maybe consumed by Always-on components on GPU chip.
+ * This power will be provided by 3.3v voltage rail.
+ */
+ NvU16 RTD3GC6TotalBoardPower;
+
+ /* PERST# (i.e. PCI Express Reset) is a sideband signal
+ * generated by the PCIe Host to indicate the PCIe devices,
+ * that the power-rails and the reference-clock are stable.
+ * The endpoint device typically uses this signal as a global reset.
+ */
+ NvU16 RTD3GC6PerstDelay;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+ NvBool bIsEfiInit;
+
+ EcidManufacturingInfo ecidInfo[MAX_GROUP_COUNT];
+
+ FW_WPR_LAYOUT_OFFSET fwWprLayoutOffset;
+} GspStaticConfigInfo;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxStateTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct
+{
+ // Link capabilities
+ NvU32 linkCap;
+} GSP_PCIE_CONFIG_REG;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 gpuPhysIoAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 notifyOpSharedSurfacePhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU32 PCIDeviceID;
+ NvU32 PCISubDeviceID;
+ NvU32 PCIRevisionID;
+ NvU32 pcieAtomicsCplDeviceCapMask;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bFlrSupported;
+ NvBool b64bBar0Supported;
+ NvBool bMnocAvailable;
+ NvU32 chipsetL1ssEnable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvBool bSystemHasMux;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+ NvBool bIsPrimary;
+ NvBool isGridBuild;
+ GSP_PCIE_CONFIG_REG pcieConfigReg;
+ NvU32 gridBuildCsp;
+ NvBool bPreserveVideoMemoryAllocations;
+ NvBool bTdrEventSupported;
+ NvBool bFeatureStretchVblankCapable;
+ NvBool bEnableDynamicGranularityPageArrays;
+ NvBool bClockBoostSupported;
+ NvBool bRouteDispIntrsToCPU;
+ NvU64 hostPageSize;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+ NvU32 preemptiveRemovalPreviousXid;
+} rpc_os_error_log_v17_00;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Flags to help decide GSP-FW flow.
+ NvU8 flags;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[2];
+
+ //
+ // Starts at gspFwWprEnd+frtsSize b/c FRTS is positioned
+ // to end where this allocation starts (when RM requests FSP to create
+ // FRTS).
+ //
+ NvU32 pmuReservedSize;
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct {
+ NvU64 sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+ NvBool bDmemStack;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef struct
+{
+ // Magic for verification by secure ucode
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ // Members regarding data in SYSMEM
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ //
+ // Internal members for use by secure ucode
+ // Must be exactly GSP_FW_SR_META_INTERNAL_SIZE bytes.
+ //
+ NvU32 internal[32];
+
+ // Same as flags of GspFwWprMeta
+ NvU32 flags;
+
+ // Subrevision number used by secure ucode
+ NvU32 subrevision;
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[22];
+} GspFwSRMeta;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL (22 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA (12u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA (70u)
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB \
+ (88u + (BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA) + \
+ (BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA))
+
+typedef struct GSP_FMC_INIT_PARAMS
+{
+ // CC initialization "registry keys"
+ NvU32 regkeys;
+} GSP_FMC_INIT_PARAMS;
+
+typedef enum {
+ GSP_DMA_TARGET_LOCAL_FB,
+ GSP_DMA_TARGET_COHERENT_SYSTEM,
+ GSP_DMA_TARGET_NONCOHERENT_SYSTEM,
+ GSP_DMA_TARGET_COUNT
+} GSP_DMA_TARGET;
+
+typedef struct GSP_ACR_BOOT_GSP_RM_PARAMS
+{
+ // Physical memory aperture through which gspRmDescPa is accessed
+ GSP_DMA_TARGET target;
+ // Size in bytes of the GSP-RM descriptor structure
+ NvU32 gspRmDescSize;
+ // Physical offset in the target aperture of the GSP-RM descriptor structure
+ NvU64 gspRmDescOffset;
+ // Physical offset in FB to set the start of the WPR containing GSP-RM
+ NvU64 wprCarveoutOffset;
+ // Size in bytes of the WPR containing GSP-RM
+ NvU32 wprCarveoutSize;
+ // Whether to boot GSP-RM or GSP-Proxy through ACR
+ NvBool bIsGspRmBoot;
+} GSP_ACR_BOOT_GSP_RM_PARAMS;
+
+typedef struct GSP_RM_PARAMS
+{
+ // Physical memory aperture through which bootArgsOffset is accessed
+ GSP_DMA_TARGET target;
+ // Physical offset in the memory aperture that will be passed to GSP-RM
+ NvU64 bootArgsOffset;
+} GSP_RM_PARAMS;
+
+typedef struct GSP_SPDM_PARAMS
+{
+ // Physical Memory Aperture through which all addresses are accessed
+ GSP_DMA_TARGET target;
+
+ // Physical offset in the memory aperture where SPDM payload is stored
+ NvU64 payloadBufferOffset;
+
+ // Size of the above payload buffer
+ NvU32 payloadBufferSize;
+} GSP_SPDM_PARAMS;
+
+typedef struct GSP_FMC_BOOT_PARAMS
+{
+ GSP_FMC_INIT_PARAMS initParams;
+ GSP_ACR_BOOT_GSP_RM_PARAMS bootGspRmParams;
+ GSP_RM_PARAMS gspRmParams;
+ GSP_SPDM_PARAMS gspSpdmParams;
+} GSP_FMC_BOOT_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100 (14 << 20) // Hopper+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
new file mode 100644
index 000000000000..e06643f57695
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef E
+# define E(RPC, VAL) NV_VGPU_MSG_EVENT_##RPC = VAL,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT, 0x1000)
+ E(GSP_INIT_DONE, 0x1001)
+ E(GSP_RUN_CPU_SEQUENCER, 0x1002)
+ E(POST_EVENT, 0x1003)
+ E(RC_TRIGGERED, 0x1004)
+ E(MMU_FAULT_QUEUED, 0x1005)
+ E(OS_ERROR_LOG, 0x1006)
+ E(RG_LINE_INTR, 0x1007)
+ E(GPUACCT_PERFMON_UTIL_SAMPLES, 0x1008)
+ E(SIM_READ, 0x1009)
+ E(SIM_WRITE, 0x100a)
+ E(SEMAPHORE_SCHEDULE_CALLBACK, 0x100b)
+ E(UCODE_LIBOS_PRINT, 0x100c)
+ E(VGPU_GSP_PLUGIN_TRIGGERED, 0x100d)
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK, 0x100e)
+ E(PERF_BRIDGELESS_INFO_UPDATE, 0x100f)
+ E(VGPU_CONFIG, 0x1010)
+ E(DISPLAY_MODESET, 0x1011)
+ E(EXTDEV_INTR_SERVICE, 0x1012)
+ E(NVLINK_INBAND_RECEIVED_DATA_256, 0x1013)
+ E(NVLINK_INBAND_RECEIVED_DATA_512, 0x1014)
+ E(NVLINK_INBAND_RECEIVED_DATA_1024, 0x1015)
+ E(NVLINK_INBAND_RECEIVED_DATA_2048, 0x1016)
+ E(NVLINK_INBAND_RECEIVED_DATA_4096, 0x1017)
+ E(TIMED_SEMAPHORE_RELEASE, 0x1018)
+ E(NVLINK_IS_GPU_DEGRADED, 0x1019)
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK, 0x101a)
+ E(NVLINK_FAULT_UP, 0x101b)
+ E(GSP_LOCKDOWN_NOTICE, 0x101c)
+ E(MIG_CI_CONFIG_UPDATE, 0x101d)
+ E(UPDATE_GSP_TRACE, 0x101e)
+ E(NVLINK_FATAL_ERROR_RECOVERY, 0x101f)
+ E(GSP_POST_NOCAT_RECORD, 0x1020)
+ E(FECS_ERROR, 0x1021)
+ E(RECOVERY_ACTION, 0x1022)
+ E(NUM_EVENTS, 0x1023)
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
new file mode 100644
index 000000000000..fcaef7f553a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+ NvU32 engineInstance;
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
new file mode 100644
index 000000000000..2d67b598c58b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef X
+# define X(UNIT, RPC, VAL) NV_VGPU_MSG_FUNCTION_##RPC = VAL,
+# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ X(RM, NOP, 0)
+ X(RM, SET_GUEST_SYSTEM_INFO, 1)
+ X(RM, ALLOC_ROOT, 2)
+ X(RM, ALLOC_DEVICE, 3) // deprecated
+ X(RM, ALLOC_MEMORY, 4)
+ X(RM, ALLOC_CTX_DMA, 5)
+ X(RM, ALLOC_CHANNEL_DMA, 6)
+ X(RM, MAP_MEMORY, 7)
+ X(RM, BIND_CTX_DMA, 8) // deprecated
+ X(RM, ALLOC_OBJECT, 9)
+ X(RM, FREE, 10)
+ X(RM, LOG, 11)
+ X(RM, ALLOC_VIDMEM, 12)
+ X(RM, UNMAP_MEMORY, 13)
+ X(RM, MAP_MEMORY_DMA, 14)
+ X(RM, UNMAP_MEMORY_DMA, 15)
+ X(RM, GET_EDID, 16) // deprecated
+ X(RM, ALLOC_DISP_CHANNEL, 17)
+ X(RM, ALLOC_DISP_OBJECT, 18)
+ X(RM, ALLOC_SUBDEVICE, 19)
+ X(RM, ALLOC_DYNAMIC_MEMORY, 20)
+ X(RM, DUP_OBJECT, 21)
+ X(RM, IDLE_CHANNELS, 22)
+ X(RM, ALLOC_EVENT, 23)
+ X(RM, SEND_EVENT, 24) // deprecated
+ X(RM, REMAPPER_CONTROL, 25) // deprecated
+ X(RM, DMA_CONTROL, 26) // deprecated
+ X(RM, DMA_FILL_PTE_MEM, 27)
+ X(RM, MANAGE_HW_RESOURCE, 28)
+ X(RM, BIND_ARBITRARY_CTX_DMA, 29) // deprecated
+ X(RM, CREATE_FB_SEGMENT, 30)
+ X(RM, DESTROY_FB_SEGMENT, 31)
+ X(RM, ALLOC_SHARE_DEVICE, 32)
+ X(RM, DEFERRED_API_CONTROL, 33)
+ X(RM, REMOVE_DEFERRED_API, 34)
+ X(RM, SIM_ESCAPE_READ, 35)
+ X(RM, SIM_ESCAPE_WRITE, 36)
+ X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA, 37)
+ X(RM, FREE_VIDMEM_VIRT, 38)
+ X(RM, PERF_GET_PSTATE_INFO, 39) // deprecated
+ X(RM, PERF_GET_PERFMON_SAMPLE, 40)
+ X(RM, PERF_GET_VIRTUAL_PSTATE_INFO, 41) // deprecated
+ X(RM, PERF_GET_LEVEL_INFO, 42)
+ X(RM, MAP_SEMA_MEMORY, 43)
+ X(RM, UNMAP_SEMA_MEMORY, 44)
+ X(RM, SET_SURFACE_PROPERTIES, 45)
+ X(RM, CLEANUP_SURFACE, 46)
+ X(RM, UNLOADING_GUEST_DRIVER, 47)
+ X(RM, TDR_SET_TIMEOUT_STATE, 48)
+ X(RM, SWITCH_TO_VGA, 49)
+ X(RM, GPU_EXEC_REG_OPS, 50)
+ X(RM, GET_STATIC_INFO, 51)
+ X(RM, ALLOC_VIRTMEM, 52)
+ X(RM, UPDATE_PDE_2, 53)
+ X(RM, SET_PAGE_DIRECTORY, 54)
+ X(RM, GET_STATIC_PSTATE_INFO, 55)
+ X(RM, TRANSLATE_GUEST_GPU_PTES, 56)
+ X(RM, RESERVED_57, 57)
+ X(RM, RESET_CURRENT_GR_CONTEXT, 58)
+ X(RM, SET_SEMA_MEM_VALIDATION_STATE, 59)
+ X(RM, GET_ENGINE_UTILIZATION, 60)
+ X(RM, UPDATE_GPU_PDES, 61)
+ X(RM, GET_ENCODER_CAPACITY, 62)
+ X(RM, VGPU_PF_REG_READ32, 63) // deprecated
+ X(RM, SET_GUEST_SYSTEM_INFO_EXT, 64)
+ X(GSP, GET_GSP_STATIC_INFO, 65)
+ X(RM, RMFS_INIT, 66) // deprecated
+ X(RM, RMFS_CLOSE_QUEUE, 67) // deprecated
+ X(RM, RMFS_CLEANUP, 68) // deprecated
+ X(RM, RMFS_TEST, 69) // deprecated
+ X(RM, UPDATE_BAR_PDE, 70)
+ X(RM, CONTINUATION_RECORD, 71)
+ X(RM, GSP_SET_SYSTEM_INFO, 72)
+ X(RM, SET_REGISTRY, 73)
+ X(GSP, GSP_INIT_POST_OBJGPU, 74) // deprecated
+ X(RM, SUBDEV_EVENT_SET_NOTIFICATION, 75) // deprecated
+ X(GSP, GSP_RM_CONTROL, 76)
+ X(RM, GET_STATIC_INFO2, 77)
+ X(RM, DUMP_PROTOBUF_COMPONENT, 78)
+ X(RM, UNSET_PAGE_DIRECTORY, 79)
+ X(RM, GET_CONSOLIDATED_STATIC_INFO, 80) // deprecated
+ X(RM, GMMU_REGISTER_FAULT_BUFFER, 81) // deprecated
+ X(RM, GMMU_UNREGISTER_FAULT_BUFFER, 82) // deprecated
+ X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, 83) // deprecated
+ X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, 84) // deprecated
+ X(RM, CTRL_SET_VGPU_FB_USAGE, 85)
+ X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO, 86)
+ X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO, 87)
+ X(RM, CTRL_RESET_CHANNEL, 88)
+ X(RM, CTRL_RESET_ISOLATED_CHANNEL, 89)
+ X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT, 90)
+ X(RM, CTRL_CLK_GET_EXTENDED_INFO, 91)
+ X(RM, CTRL_PERF_BOOST, 92)
+ X(RM, CTRL_PERF_VPSTATES_GET_CONTROL, 93)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE, 94)
+ X(RM, CTRL_SET_ZBC_COLOR_CLEAR, 95)
+ X(RM, CTRL_SET_ZBC_DEPTH_CLEAR, 96)
+ X(RM, CTRL_GPFIFO_SCHEDULE, 97)
+ X(RM, CTRL_SET_TIMESLICE, 98)
+ X(RM, CTRL_PREEMPT, 99)
+ X(RM, CTRL_FIFO_DISABLE_CHANNELS, 100)
+ X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL, 101)
+ X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL, 102)
+ X(GSP, GSP_RM_ALLOC, 103)
+ X(RM, CTRL_GET_P2P_CAPS_V2, 104)
+ X(RM, CTRL_CIPHER_AES_ENCRYPT, 105)
+ X(RM, CTRL_CIPHER_SESSION_KEY, 106)
+ X(RM, CTRL_CIPHER_SESSION_KEY_STATUS, 107)
+ X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES, 108)
+ X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES, 109)
+ X(RM, CTRL_DBG_SET_EXCEPTION_MASK, 110)
+ X(RM, CTRL_GPU_PROMOTE_CTX, 111)
+ X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND, 112)
+ X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE, 113)
+ X(RM, CTRL_GR_CTXSW_ZCULL_BIND, 114)
+ X(RM, CTRL_GPU_INITIALIZE_CTX, 115)
+ X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES, 116)
+ X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT, 117)
+ X(RM, CTRL_GET_LATEST_ECC_ADDRESSES, 118)
+ X(RM, CTRL_MC_SERVICE_INTERRUPTS, 119)
+ X(RM, CTRL_DMA_SET_DEFAULT_VASPACE, 120)
+ X(RM, CTRL_GET_CE_PCE_MASK, 121)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY, 122)
+ X(RM, CTRL_GET_NVLINK_PEER_ID_MASK, 123) // deprecated
+ X(RM, CTRL_GET_NVLINK_STATUS, 124)
+ X(RM, CTRL_GET_P2P_CAPS, 125)
+ X(RM, CTRL_GET_P2P_CAPS_MATRIX, 126)
+ X(RM, RESERVED_0, 127)
+ X(RM, CTRL_RESERVE_PM_AREA_SMPC, 128)
+ X(RM, CTRL_RESERVE_HWPM_LEGACY, 129)
+ X(RM, CTRL_B0CC_EXEC_REG_OPS, 130)
+ X(RM, CTRL_BIND_PM_RESOURCES, 131)
+ X(RM, CTRL_DBG_SUSPEND_CONTEXT, 132)
+ X(RM, CTRL_DBG_RESUME_CONTEXT, 133)
+ X(RM, CTRL_DBG_EXEC_REG_OPS, 134)
+ X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG, 135)
+ X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE, 136)
+ X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE, 137)
+ X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG, 138)
+ X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE, 139)
+ X(RM, CTRL_ALLOC_PMA_STREAM, 140)
+ X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT, 141)
+ X(RM, CTRL_FB_GET_INFO_V2, 142)
+ X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES, 143)
+ X(RM, CTRL_GR_GET_CTX_BUFFER_INFO, 144)
+ X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES, 145)
+ X(RM, CTRL_GPU_EVICT_CTX, 146)
+ X(RM, CTRL_FB_GET_FS_INFO, 147)
+ X(RM, CTRL_GRMGR_GET_GR_FS_INFO, 148)
+ X(RM, CTRL_STOP_CHANNEL, 149)
+ X(RM, CTRL_GR_PC_SAMPLING_MODE, 150)
+ X(RM, CTRL_PERF_RATED_TDP_GET_STATUS, 151)
+ X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL, 152)
+ X(RM, CTRL_FREE_PMA_STREAM, 153)
+ X(RM, CTRL_TIMER_SET_GR_TICK_FREQ, 154)
+ X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, 155)
+ X(RM, GET_CONSOLIDATED_GR_STATIC_INFO, 156)
+ X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP, 157)
+ X(RM, CTRL_GR_GET_TPC_PARTITION_MODE, 158)
+ X(RM, CTRL_GR_SET_TPC_PARTITION_MODE, 159)
+ X(UVM, UVM_PAGING_CHANNEL_ALLOCATE, 160)
+ X(UVM, UVM_PAGING_CHANNEL_DESTROY, 161)
+ X(UVM, UVM_PAGING_CHANNEL_MAP, 162)
+ X(UVM, UVM_PAGING_CHANNEL_UNMAP, 163)
+ X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM, 164)
+ X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES, 165)
+ X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION, 166)
+ X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL, 167)
+ X(RM, DCE_RM_INIT, 168)
+ X(RM, REGISTER_VIRTUAL_EVENT_BUFFER, 169)
+ X(RM, CTRL_EVENT_BUFFER_UPDATE_GET, 170)
+ X(RM, GET_PLCABLE_ADDRESS_KIND, 171)
+ X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2, 172)
+ X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM, 173)
+ X(RM, CTRL_GET_MMU_DEBUG_MODE, 174)
+ X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, 175)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE, 176)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO, 177)
+ X(RM, DISABLE_CHANNELS, 178)
+ X(RM, CTRL_FABRIC_MEMORY_DESCRIBE, 179)
+ X(RM, CTRL_FABRIC_MEM_STATS, 180)
+ X(RM, SAVE_HIBERNATION_DATA, 181)
+ X(RM, RESTORE_HIBERNATION_DATA, 182)
+ X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, 183)
+ X(RM, CTRL_EXEC_PARTITIONS_CREATE, 184)
+ X(RM, CTRL_EXEC_PARTITIONS_DELETE, 185)
+ X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN, 186)
+ X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, 187)
+ X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION, 188)
+ X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, 189)
+ X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER, 190)
+ X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS, 191)
+ X(RM, CTRL_BUS_SET_P2P_MAPPING, 192)
+ X(RM, CTRL_BUS_UNSET_P2P_MAPPING, 193)
+ X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK, 194)
+ X(RM, CTRL_GPU_MIGRATABLE_OPS, 195)
+ X(RM, CTRL_GET_TOTAL_HS_CREDITS, 196)
+ X(RM, CTRL_GET_HS_CREDITS, 197)
+ X(RM, CTRL_SET_HS_CREDITS, 198)
+ X(RM, CTRL_PM_AREA_PC_SAMPLER, 199)
+ X(RM, INVALIDATE_TLB, 200)
+ X(RM, CTRL_GPU_QUERY_ECC_STATUS, 201) // deprecated
+ X(RM, ECC_NOTIFIER_WRITE_ACK, 202)
+ X(RM, CTRL_DBG_GET_MODE_MMU_DEBUG, 203)
+ X(RM, RM_API_CONTROL, 204)
+ X(RM, CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE, 205)
+ X(RM, CTRL_NVLINK_GET_INBAND_RECEIVED_DATA, 206)
+ X(RM, GET_STATIC_DATA, 207)
+ X(RM, RESERVED_208, 208)
+ X(RM, CTRL_GPU_GET_INFO_V2, 209)
+ X(RM, GET_BRAND_CAPS, 210)
+ X(RM, CTRL_CMD_NVLINK_INBAND_SEND_DATA, 211)
+ X(RM, UPDATE_GPM_GUEST_BUFFER_INFO, 212)
+ X(RM, CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE, 213)
+ X(RM, CTRL_SET_ZBC_STENCIL_CLEAR, 214)
+ X(RM, CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS, 215)
+ X(RM, CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS, 216)
+ X(RM, CTRL_DBG_SET_MODE_MMU_GCC_DEBUG, 217)
+ X(RM, CTRL_DBG_GET_MODE_MMU_GCC_DEBUG, 218)
+ X(RM, CTRL_RESERVE_HES, 219)
+ X(RM, CTRL_RELEASE_HES, 220)
+ X(RM, CTRL_RESERVE_CCU_PROF, 221)
+ X(RM, CTRL_RELEASE_CCU_PROF, 222)
+ X(RM, RESERVED, 223)
+ X(RM, CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL, 224)
+ X(RM, CTRL_CMD_GET_HS_CREDITS_MAPPING, 225)
+ X(RM, CTRL_EXEC_PARTITIONS_EXPORT, 226)
+ X(RM, NUM_FUNCTIONS, 227)
+#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef X
+# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
new file mode 100644
index 000000000000..6fb3083edde3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ofa.h"
+
+static int
+r570_ofa_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(parent, handle, oclass, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
+}
+
+const struct nvkm_rm_api_engine
+r570_ofa = {
+ .alloc = r570_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
new file mode 100644
index 000000000000..498658d0c60c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gh100 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb10x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb20x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x220000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_api
+r570_api = {
+ .gsp = &r570_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r570_client,
+ .device = &r535_device,
+ .fbsr = &r570_fbsr,
+ .disp = &r570_disp,
+ .fifo = &r570_fifo,
+ .ce = &r535_ce,
+ .gr = &r570_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r570_ofa,
+};
+
+const struct nvkm_rm_impl
+r570_rm_tu102 = {
+ .wpr = &r570_wpr_libos2,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_ga102 = {
+ .wpr = &r570_wpr_libos3,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gh100 = {
+ .wpr = &r570_wpr_libos3_gh100,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb10x = {
+ .wpr = &r570_wpr_libos3_gb10x,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb20x = {
+ .wpr = &r570_wpr_libos3_gb20x,
+ .api = &r570_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
new file mode 100644
index 000000000000..393ea775941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/gsp.h>
+#ifndef __NVKM_RM_H__
+#define __NVKM_RM_H__
+#include "handles.h"
+struct nvkm_outp;
+struct r535_gr;
+
+struct nvkm_rm_impl {
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm {
+ struct nvkm_device *device;
+ const struct nvkm_rm_gpu *gpu;
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm_wpr {
+ u32 os_carveout_size;
+ u32 base_size;
+ u64 heap_size_min;
+ u32 heap_size_non_wpr;
+ u32 rsvd_size_pmu;
+ bool offset_set_by_acr;
+};
+
+struct nvkm_rm_api {
+ const struct nvkm_rm_api_gsp {
+ void (*set_rmargs)(struct nvkm_gsp *, bool resume);
+ int (*set_system_info)(struct nvkm_gsp *);
+ int (*get_static_info)(struct nvkm_gsp *);
+ bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst);
+ void (*drop_send_user_shared_data)(struct nvkm_gsp *);
+ void (*drop_post_nocat_record)(struct nvkm_gsp *);
+ u32 (*sr_data_size)(struct nvkm_gsp *);
+ } *gsp;
+
+ const struct nvkm_rm_api_rpc {
+ void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc);
+ void *(*push)(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc);
+ void (*done)(struct nvkm_gsp *gsp, void *repv);
+ } *rpc;
+
+ const struct nvkm_rm_api_ctrl {
+ void *(*get)(struct nvkm_gsp_object *, u32 cmd, u32 params_size);
+ int (*push)(struct nvkm_gsp_object *, void **params, u32 repc);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+ } *ctrl;
+
+ const struct nvkm_rm_api_alloc {
+ void *(*get)(struct nvkm_gsp_object *, u32 oclass, u32 params_size);
+ void *(*push)(struct nvkm_gsp_object *, void *params);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+
+ int (*free)(struct nvkm_gsp_object *);
+ } *alloc;
+
+ const struct nvkm_rm_api_client {
+ int (*ctor)(struct nvkm_gsp_client *, u32 handle);
+ } *client;
+
+ const struct nvkm_rm_api_device {
+ int (*ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
+ void (*dtor)(struct nvkm_gsp_device *);
+
+ struct {
+ int (*ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
+ nvkm_gsp_event_func, struct nvkm_gsp_event *);
+ void (*dtor)(struct nvkm_gsp_event *);
+ } event;
+ } *device;
+
+ const struct nvkm_rm_api_fbsr {
+ int (*suspend)(struct nvkm_gsp *);
+ void (*resume)(struct nvkm_gsp *);
+ } *fbsr;
+
+ const struct nvkm_rm_api_disp {
+ int (*get_static_info)(struct nvkm_disp *);
+ int (*get_supported)(struct nvkm_disp *, unsigned long *display_mask);
+ int (*get_connect_state)(struct nvkm_disp *, unsigned display_id);
+ int (*get_active)(struct nvkm_disp *, unsigned head, u32 *display_id);
+
+ int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val);
+
+ struct {
+ int (*get_caps)(struct nvkm_disp *, int *link_bw, bool *mst, bool *wm);
+ int (*set_indexed_link_rates)(struct nvkm_outp *);
+ } dp;
+
+ struct {
+ int (*set_pushbuf)(struct nvkm_disp *, s32 oclass, int inst,
+ struct nvkm_memory *);
+ int (*dmac_alloc)(struct nvkm_disp *, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *disp;
+
+ const struct nvkm_rm_api_fifo {
+ int (*xlat_rm_engine_type)(u32 rm_engine_type,
+ enum nvkm_subdev_type *, int *nv2080_type);
+ int (*ectx_size)(struct nvkm_fifo *);
+ unsigned rsvd_chids;
+ int (*rc_triggered)(void *priv, u32 fn, void *repv, u32 repc);
+ struct {
+ int (*alloc)(struct nvkm_gsp_device *, u32 handle,
+ u32 nv2080_engine_type, u8 runq, bool priv, int chid,
+ u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *fifo;
+
+ const struct nvkm_rm_api_engine {
+ int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *);
+ } *ce, *nvdec, *nvenc, *nvjpg, *ofa;
+
+ const struct nvkm_rm_api_gr {
+ int (*get_ctxbufs_info)(struct r535_gr *);
+ struct {
+ int (*init)(struct r535_gr *);
+ void (*fini)(struct r535_gr *);
+ } scrubber;
+ } *gr;
+};
+
+extern const struct nvkm_rm_impl r535_rm_tu102;
+extern const struct nvkm_rm_impl r535_rm_ga102;
+extern const struct nvkm_rm_api_gsp r535_gsp;
+typedef struct DOD_METHOD_DATA DOD_METHOD_DATA;
+typedef struct JT_METHOD_DATA JT_METHOD_DATA;
+typedef struct CAPS_METHOD_DATA CAPS_METHOD_DATA;
+void r535_gsp_acpi_dod(acpi_handle, DOD_METHOD_DATA *);
+void r535_gsp_acpi_jt(acpi_handle, JT_METHOD_DATA *);
+void r535_gsp_acpi_caps(acpi_handle, CAPS_METHOD_DATA *);
+struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+void r535_gsp_get_static_info_fb(struct nvkm_gsp *,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *);
+extern const struct nvkm_rm_api_rpc r535_rpc;
+extern const struct nvkm_rm_api_ctrl r535_ctrl;
+extern const struct nvkm_rm_api_alloc r535_alloc;
+extern const struct nvkm_rm_api_client r535_client;
+void r535_gsp_client_dtor(struct nvkm_gsp_client *);
+extern const struct nvkm_rm_api_device r535_device;
+int r535_mmu_vaspace_new(struct nvkm_vmm *, u32 handle, bool external);
+void r535_mmu_vaspace_del(struct nvkm_vmm *);
+extern const struct nvkm_rm_api_fbsr r535_fbsr;
+void r535_fbsr_resume(struct nvkm_gsp *);
+int r535_fbsr_memlist(struct nvkm_gsp_device *, u32 handle, enum nvkm_memory_target,
+ u64 phys, u64 size, struct sg_table *, struct nvkm_gsp_object *);
+extern const struct nvkm_rm_api_disp r535_disp;
+extern const struct nvkm_rm_api_fifo r535_fifo;
+void r535_fifo_rc_chid(struct nvkm_fifo *, int chid);
+extern const struct nvkm_rm_api_engine r535_ce;
+extern const struct nvkm_rm_api_gr r535_gr;
+void *r535_gr_dtor(struct nvkm_gr *);
+int r535_gr_oneinit(struct nvkm_gr *);
+u64 r535_gr_units(struct nvkm_gr *);
+int r535_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *, const struct nvkm_oclass *,
+ struct nvkm_object **);
+int r535_gr_promote_ctx(struct r535_gr *, bool golden, struct nvkm_vmm *,
+ struct nvkm_memory **pctxbuf_mem, struct nvkm_vma **pctxbuf_vma,
+ struct nvkm_gsp_object *chan);
+extern const struct nvkm_rm_api_engine r535_nvdec;
+extern const struct nvkm_rm_api_engine r535_nvenc;
+extern const struct nvkm_rm_api_engine r535_nvjpg;
+extern const struct nvkm_rm_api_engine r535_ofa;
+
+extern const struct nvkm_rm_impl r570_rm_tu102;
+extern const struct nvkm_rm_impl r570_rm_ga102;
+extern const struct nvkm_rm_impl r570_rm_gh100;
+extern const struct nvkm_rm_impl r570_rm_gb10x;
+extern const struct nvkm_rm_impl r570_rm_gb20x;
+extern const struct nvkm_rm_api_gsp r570_gsp;
+extern const struct nvkm_rm_api_client r570_client;
+extern const struct nvkm_rm_api_fbsr r570_fbsr;
+extern const struct nvkm_rm_api_disp r570_disp;
+extern const struct nvkm_rm_api_fifo r570_fifo;
+extern const struct nvkm_rm_api_gr r570_gr;
+int r570_gr_gpc_mask(struct nvkm_gsp *, u32 *mask);
+int r570_gr_tpc_mask(struct nvkm_gsp *, int gpc, u32 *mask);
+extern const struct nvkm_rm_api_engine r570_ofa;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
new file mode 100644
index 000000000000..4431e33b3304
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_RPC_H__
+#define __NVKM_RM_RPC_H__
+#include "rm.h"
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int r535_gsp_rpc_poll(struct nvkm_gsp *, u32 fn);
+
+struct nvfw_gsp_rpc *r535_gsp_msg_recv(struct nvkm_gsp *, int fn, u32 gsp_rpc_len);
+int r535_gsp_msg_ntfy_add(struct nvkm_gsp *, u32 fn, nvkm_gsp_msg_ntfy_func, void *priv);
+
+int r535_rpc_status_to_errno(uint32_t rpc_status);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
new file mode 100644
index 000000000000..423502f870db
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+tu1xx_gpu = {
+ .disp.class = {
+ .root = TU102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = TU102_DISP_CORE_CHANNEL_DMA,
+ .wndw = TU102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = TU102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = TU102_DISP_CURSOR,
+ },
+
+ .usermode.class = TURING_USERMODE_A,
+
+ .fifo.chan = {
+ .class = TURING_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = TURING_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = TURING_A,
+ .compute = TURING_COMPUTE_A,
+ },
+ .nvdec.class = NVC4B0_VIDEO_DECODER,
+ .nvenc.class = NVC4B7_VIDEO_ENCODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 59c5f2b9172a..58e233bc53b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -22,11 +22,45 @@
#include "priv.h"
#include <subdev/fb.h>
+#include <engine/sec2.h>
+
+#include <rm/r535/nvrm/gsp.h>
#include <nvfw/flcn.h>
#include <nvfw/fw.h>
#include <nvfw/hs.h>
+static int
+tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 wpr2_hi;
+ int ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (!wpr2_hi) {
+ nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
+ return 0;
+ }
+
+ ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+ if (WARN_ON(ret))
+ return ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (WARN_ON(wpr2_hi))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+tu102_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ return nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+}
+
int
tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
@@ -114,6 +148,118 @@ tu102_gsp_reset(struct nvkm_gsp *gsp)
return gsp->falcon.func->reset_eng(&gsp->falcon);
}
+int
+tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ u32 mbox0 = 0xff, mbox1 = 0xff;
+ int ret;
+
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ nvkm_falcon_reset(&gsp->falcon);
+
+ ret = nvkm_gsp_fwsec_sb(gsp);
+ WARN_ON(ret);
+
+ if (suspend) {
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ ret = tu102_gsp_booter_unload(gsp, mbox0, mbox1);
+ WARN_ON(ret);
+ return 0;
+}
+
+int
+tu102_gsp_init(struct nvkm_gsp *gsp)
+{
+ u32 mbox0, mbox1;
+ int ret;
+
+ if (!gsp->sr.meta.data) {
+ mbox0 = lower_32_bits(gsp->wpr_meta.addr);
+ mbox1 = upper_32_bits(gsp->wpr_meta.addr);
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ /* Execute booter to handle (eventually...) booting GSP-RM. */
+ ret = tu102_gsp_booter_load(gsp, mbox0, mbox1);
+ if (WARN_ON(ret))
+ return ret;
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+tu102_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+ meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
+
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->gspFwRsvdStart = gsp->fb.heap.addr;
+ meta->nonWprHeapOffset = gsp->fb.heap.addr;
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwWprStart = gsp->fb.wpr2.addr;
+ meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
+ meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
+ meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
+ meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
+ meta->frtsOffset = gsp->fb.wpr2.frts.addr;
+ meta->frtsSize = gsp->fb.wpr2.frts.size;
+ meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
+ meta->fbSize = gsp->fb.size;
+ meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->bootCount = 0;
+ meta->partitionRpcAddr = 0;
+ meta->partitionRpcRequestOffset = 0;
+ meta->partitionRpcReplyOffset = 0;
+ meta->verified = 0;
+ return 0;
+}
+
+u64
+tu102_gsp_wpr_heap_size(struct nvkm_gsp *gsp)
+{
+ u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
+ u64 heap_size;
+
+ heap_size = gsp->rm->wpr->os_carveout_size +
+ gsp->rm->wpr->base_size +
+ ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
+ ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
+
+ return max(heap_size, gsp->rm->wpr->heap_size_min);
+}
+
static u64
tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
{
@@ -136,14 +282,67 @@ tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
int
tu102_gsp_oneinit(struct nvkm_gsp *gsp)
{
- gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ struct nvkm_device *device = gsp->subdev.device;
+ int ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(device);
gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size);
gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size;
- return r535_gsp_oneinit(gsp);
+ ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
+ &device->sec2->falcon, &gsp->booter.load);
+ if (ret)
+ return ret;
+
+ ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
+ &device->sec2->falcon, &gsp->booter.unload);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ /* Calculate FB layout. */
+ gsp->fb.wpr2.frts.size = 0x100000;
+ gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
+
+ gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
+ gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
+
+ gsp->fb.wpr2.elf.size = gsp->fw.len;
+ gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
+
+ gsp->fb.wpr2.heap.size = tu102_gsp_wpr_heap_size(gsp);
+
+ gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
+ gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
+
+ gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
+ gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
+
+ gsp->fb.heap.size = 0x100000;
+ gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+ ret = tu102_gsp_wpr_meta_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_fwsec_frts(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ /* Reset GSP into RISC-V mode. */
+ ret = gsp->func->reset(gsp);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+ nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+ return 0;
}
const struct nvkm_falcon_func
@@ -163,29 +362,73 @@ tu102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-tu102_gsp_r535_113_01 = {
+tu102_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu10x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
+int
+tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ bool enable_gsp = fwif->enable;
+ int ret;
+
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
+ return -EINVAL;
+
+ ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+tu102_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
static struct nvkm_gsp_fwif
tu102_gsps[] = {
- { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu102_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu102_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -196,3 +439,11 @@ tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
index 04fbd9ed28b1..97eb046c25d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -22,29 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-tu116_gsp_r535_113_01 = {
+tu116_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu11x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
static struct nvkm_gsp_fwif
tu116_gsps[] = {
- { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu116_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu116_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -55,3 +53,9 @@ tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
index 553d540f2736..fa7a2862dd1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
@@ -4,5 +4,4 @@ nvkm-y += nvkm/subdev/instmem/nv04.o
nvkm-y += nvkm/subdev/instmem/nv40.o
nvkm-y += nvkm/subdev/instmem/nv50.o
nvkm-y += nvkm/subdev/instmem/gk20a.o
-
-nvkm-y += nvkm/subdev/instmem/r535.o
+nvkm-y += nvkm/subdev/instmem/gh100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index a2cd3330efc6..2f55bab8e132 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -182,9 +182,11 @@ nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
int ret;
if (suspend) {
- ret = imem->func->suspend(imem);
- if (ret)
- return ret;
+ if (imem->func->suspend) {
+ ret = imem->func->suspend(imem);
+ if (ret)
+ return ret;
+ }
imem->suspend = true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c
new file mode 100644
index 000000000000..8d8dd5f8a6c7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/ref/gh100/pri_nv_xal_ep.h>
+
+static void
+gh100_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr)
+{
+ nvkm_wr32(device, NV_XAL_EP_BAR0_WINDOW, addr >> NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT);
+}
+
+static const struct nvkm_instmem_func
+gh100_instmem = {
+ .fini = nv50_instmem_fini,
+ .memory_new = nv50_instobj_new,
+ .memory_wrap = nv50_instobj_wrap,
+ .set_bar0_window_addr = gh100_instmem_set_bar0_window_addr,
+};
+
+int
+gh100_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pimem)
+{
+ return r535_instmem_new(&gh100_instmem, device, type, inst, pimem);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index 6b462f960922..2544b9f0ec85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -239,7 +239,6 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
struct nvkm_instmem **pimem)
{
struct nv40_instmem *imem;
- int bar;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
@@ -247,13 +246,8 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
*pimem = &imem->base;
/* map bar */
- if (device->func->resource_size(device, 2))
- bar = 2;
- else
- bar = 3;
-
- imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
- device->func->resource_size(device, bar));
+ imem->iomem = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST),
+ device->func->resource_size(device, NVKM_BAR2_INST));
if (!imem->iomem) {
nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index dd5b5a17ece0..4ca6fb30743d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -65,7 +65,7 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
nvkm_wr32(device, 0x700000 + addr, data);
@@ -85,7 +85,7 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
data = nvkm_rd32(device, 0x700000 + addr);
@@ -172,7 +172,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
/* Make the mapping visible to the host. */
iobj->bar = bar;
- iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
+ iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) +
(u32)iobj->bar->addr, size);
if (!iobj->map) {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
@@ -353,7 +353,7 @@ nv50_instobj_func = {
.map = nv50_instobj_map,
};
-static int
+int
nv50_instobj_wrap(struct nvkm_instmem *base,
struct nvkm_memory *memory, struct nvkm_memory **pmemory)
{
@@ -373,7 +373,7 @@ nv50_instobj_wrap(struct nvkm_instmem *base,
return 0;
}
-static int
+int
nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
@@ -395,6 +395,12 @@ nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
*****************************************************************************/
static void
+nv50_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr)
+{
+ nvkm_wr32(device, 0x001700, addr >> 16);
+}
+
+void
nv50_instmem_fini(struct nvkm_instmem *base)
{
nv50_instmem(base)->addr = ~0ULL;
@@ -415,6 +421,7 @@ nv50_instmem = {
.memory_new = nv50_instobj_new,
.memory_wrap = nv50_instobj_wrap,
.zero = false,
+ .set_bar0_window_addr = nv50_instmem_set_bar0_window_addr,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index 4c14c96fb60a..87bbdd786eaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -16,10 +16,16 @@ struct nvkm_instmem_func {
bool zero, struct nvkm_memory **);
int (*memory_wrap)(struct nvkm_instmem *, struct nvkm_memory *, struct nvkm_memory **);
bool zero;
+ void (*set_bar0_window_addr)(struct nvkm_device *, u64 addr);
};
int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_instmem **);
+void nv50_instmem_fini(struct nvkm_instmem *);
+int nv50_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
+ struct nvkm_memory **);
+int nv50_instobj_wrap(struct nvkm_instmem *, struct nvkm_memory *vram,
+ struct nvkm_memory **bar2);
void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_instmem *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 7ba35ea59c06..ea4848931540 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -15,8 +15,7 @@ nvkm-y += nvkm/subdev/mmu/gp100.o
nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/gv100.o
nvkm-y += nvkm/subdev/mmu/tu102.o
-
-nvkm-y += nvkm/subdev/mmu/r535.o
+nvkm-y += nvkm/subdev/mmu/gh100.o
nvkm-y += nvkm/subdev/mmu/mem.o
nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -38,6 +37,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgp100.o
nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
nvkm-y += nvkm/subdev/mmu/vmmgv100.o
nvkm-y += nvkm/subdev/mmu/vmmtu102.o
+nvkm-y += nvkm/subdev/mmu/vmmgh100.o
nvkm-y += nvkm/subdev/mmu/umem.o
nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c
new file mode 100644
index 000000000000..2918fb32cc91
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gh100_mmu = {
+ .dma_bits = 52,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gh100_vmm_new },
+ .kind = tu102_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gh100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return r535_mmu_new(&gh100_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
index d9c9bee45222..160a5749a29f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
@@ -60,7 +60,7 @@ gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if (ret)
return ret;
- *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
*psize = (*pvma)->size;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
index 79a3b0cc9f5b..1e3db52de6cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
@@ -41,7 +41,7 @@ nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
return ret;
- *paddr = device->func->resource_addr(device, 1) + addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + addr;
*psize = nvkm_memory_size(memory);
*pvma = ERR_PTR(-ENODEV);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
index 46759b89fc1f..33b2321e9d87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
@@ -57,7 +57,7 @@ nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if (ret)
return ret;
- *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
*psize = (*pvma)->size;
return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index e9ca6537778c..90efef8f0b54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -53,6 +53,8 @@ const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
+const u8 *tu102_mmu_kind(struct nvkm_mmu *, int *, u8 *);
+
struct nvkm_mmu_pt {
union {
struct nvkm_mmu_ptc *ptc;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index df662ce4a4b0..7acff3642e20 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -28,7 +28,7 @@
#include <nvif/class.h>
-static const u8 *
+const u8 *
tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 9c97800fe037..f95c58b67633 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -19,7 +19,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#define NVKM_VMM_LEVELS_MAX 5
+#define NVKM_VMM_LEVELS_MAX 6
#include "vmm.h"
#include <subdev/fb.h>
@@ -1030,12 +1030,8 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
struct nvkm_vma *vma;
struct rb_node *node;
- if (vmm->rm.client.gsp) {
- nvkm_gsp_rm_free(&vmm->rm.object);
- nvkm_gsp_device_dtor(&vmm->rm.device);
- nvkm_gsp_client_dtor(&vmm->rm.client);
- nvkm_vmm_put(vmm, &vmm->rm.rsvd);
- }
+ if (vmm->rm.client.gsp)
+ r535_mmu_vaspace_del(vmm);
if (0)
nvkm_vmm_dump(vmm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index f9bc30cdb2b3..4586a425dbe4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -143,6 +143,8 @@ struct nvkm_vmm_func {
int (*aper)(enum nvkm_memory_target);
int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
struct nvkm_vmm_map *);
+ int (*valid2)(struct nvkm_vmm *, bool ro, bool priv, u8 kind, u8 comp,
+ struct nvkm_vmm_map *);
void (*flush)(struct nvkm_vmm *, int depth);
int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
@@ -254,6 +256,8 @@ void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void tu102_vmm_flush(struct nvkm_vmm *, int depth);
+
int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
@@ -296,6 +300,9 @@ int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
+int gh100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
#define VMM_PRINT(l,v,p,f,a...) do { \
struct nvkm_vmm *_vmm = (v); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
new file mode 100644
index 000000000000..5614df3432da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_mmu.h>
+
+static inline void
+gh100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = addr | map->type;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data);
+ data += map->next;
+ }
+}
+
+static void
+gh100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = *map->dma++ | map->type;
+
+ VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data);
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, SPARSE);
+
+ VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .mem = gh100_vmm_pgt_mem,
+ .dma = gh100_vmm_pgt_dma,
+ .sgl = gh100_vmm_pgt_sgl,
+};
+
+static void
+gh100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, NO_VALID_4KB_PAGE);
+
+ VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_lpt = {
+ .invalid = gh100_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .mem = gh100_vmm_pgt_mem,
+};
+
+static inline void
+gh100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = addr | map->type;
+
+ while (ptes--) {
+ VMM_WO128(pt, vmm, ptei++ * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL);
+ data += map->next;
+ }
+}
+
+static void
+gh100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pd0_pte);
+}
+
+static inline bool
+gh100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
+{
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, VIDEO_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_NOT_ALLOWED);
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_COHERENT_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_UNCACHED_ATS_ALLOWED);
+ break;
+ case NVKM_MEM_TARGET_NCOH:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_NON_COHERENT_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_ALLOWED);
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ *data |= pt->addr;
+ return true;
+}
+
+static void
+gh100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data[2] = {};
+
+ if (pgt->pt[0] && !gh100_vmm_pde(pgt->pt[0], &data[0]))
+ return;
+ if (pgt->pt[1] && !gh100_vmm_pde(pgt->pt[1], &data[1]))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO128(pd, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data[0], data[1]);
+ nvkm_done(pd->memory);
+}
+
+static void
+gh100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_DUAL_PDE, PCF_BIG, SPARSE_ATS_ALLOWED);
+
+ VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL, pdes);
+}
+
+static void
+gh100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, 0ULL, 0ULL, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_pd0 = {
+ .unmap = gh100_vmm_pd0_unmap,
+ .sparse = gh100_vmm_pd0_sparse,
+ .pde = gh100_vmm_pd0_pde,
+ .mem = gh100_vmm_pd0_mem,
+};
+
+static void
+gh100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data = 0;
+
+ if (!gh100_vmm_pde(pgt->pt[0], &data))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * NV_MMU_VER3_PDE__SIZE, data);
+ nvkm_done(pd->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_pd1 = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .pde = gh100_vmm_pd1_pde,
+};
+
+static const struct nvkm_vmm_desc
+gh100_vmm_desc_16[] = {
+ { LPT, 5, 8, 0x0100, &gh100_vmm_desc_lpt },
+ { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gh100_vmm_desc_12[] = {
+ { SPT, 9, 8, 0x1000, &gh100_vmm_desc_spt },
+ { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ {}
+};
+
+static int
+gh100_vmm_valid(struct nvkm_vmm *vmm, bool ro, bool priv, u8 kind, u8 comp,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const bool vol = target == NVKM_MEM_TARGET_HOST;
+ const struct nvkm_vmm_page *page = map->page;
+ u8 kind_inv, pcf;
+ int kindn, aper;
+ const u8 *kindm;
+
+ map->next = 1ULL << page->shift;
+ map->type = 0;
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (priv) {
+ if (ro) {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD;
+ } else {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD;
+ }
+ } else {
+ if (ro) {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD;
+ } else {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD;
+ }
+ }
+
+ map->type |= NVDEF(NV_MMU, VER3_PTE, VALID, TRUE);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, APERTURE, aper);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, PCF, pcf);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, KIND, kind);
+ return 0;
+}
+
+static const struct nvkm_vmm_func
+gh100_vmm = {
+ .join = gv100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .valid2 = gh100_vmm_valid,
+ .flush = tu102_vmm_flush,
+ .page = {
+ { 56, &gh100_vmm_desc_16[5], NVKM_VMM_PAGE_Sxxx },
+ { 47, &gh100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gh100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gh100_vmm_desc_16[2], NVKM_VMM_PAGE_SVxC },
+ { 21, &gh100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gh100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gh100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gh100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gp100_vmm_new_(&gh100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index bddac77f48f0..851fd847a2a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -436,6 +436,9 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return ret;
}
+ if (vmm->func->valid2)
+ return vmm->func->valid2(vmm, ro, priv, kind, 0, map);
+
aper = vmm->func->aper(target);
if (WARN_ON(aper < 0))
return aper;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index 8379e72d77ab..4b30eab40bba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 174bdf995271..a14ea0f7b1c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/pci/gf100.o
nvkm-y += nvkm/subdev/pci/gf106.o
nvkm-y += nvkm/subdev/pci/gk104.o
nvkm-y += nvkm/subdev/pci/gp100.o
+nvkm-y += nvkm/subdev/pci/gh100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 5a0de45d36ce..6867934256a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -39,26 +39,26 @@ nvkm_pci_msi_rearm(struct nvkm_device *device)
u32
nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
{
- return pci->func->rd32(pci, addr);
+ return nvkm_rd32(pci->subdev.device, pci->func->cfg.addr + addr);
}
void
nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
{
- pci->func->wr08(pci, addr, data);
+ nvkm_wr08(pci->subdev.device, pci->func->cfg.addr + addr, data);
}
void
nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
{
- pci->func->wr32(pci, addr, data);
+ nvkm_wr32(pci->subdev.device, pci->func->cfg.addr + addr, data);
}
u32
nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
{
- u32 data = pci->func->rd32(pci, addr);
- pci->func->wr32(pci, addr, (data & ~mask) | value);
+ u32 data = nvkm_pci_rd32(pci, addr);
+ nvkm_pci_wr32(pci, addr, (data & ~mask) | value);
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
index 5b29aacedef3..5308f6539a3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
@@ -132,10 +132,9 @@ g84_pcie_init(struct nvkm_pci *pci)
static const struct nvkm_pci_func
g84_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
index a9e0674009c6..8ae7aa02e675 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
@@ -33,10 +33,9 @@ g92_pcie_version_supported(struct nvkm_pci *pci)
static const struct nvkm_pci_func
g92_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index 7bacd0693283..df745d0690ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -25,10 +25,9 @@
static const struct nvkm_pci_func
g94_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index 099906092fe1..6ce941df87b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -78,10 +78,9 @@ gf100_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
static const struct nvkm_pci_func
gf100_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = gf100_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
index bcde609ba866..712ca7e0959a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -25,10 +25,9 @@
static const struct nvkm_pci_func
gf106_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c
new file mode 100644
index 000000000000..42da92d7a5fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_xtl_ep_pri.h>
+
+static void
+gh100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ /* Handled by top-level intr ACK. */
+}
+
+static const struct nvkm_pci_func
+gh100_pci = {
+ .cfg = {
+ .addr = DRF_LO(NV_EP_PCFGM),
+ .size = DRF_HI(NV_EP_PCFGM) - DRF_LO(NV_EP_PCFGM) + 1,
+ },
+ .msi_rearm = gh100_pci_msi_rearm,
+};
+
+int
+gh100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&gh100_pci, device, type, inst, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
index 6be87ecffc89..ec6d0a7de995 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
@@ -204,10 +204,9 @@ gk104_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
static const struct nvkm_pci_func
gk104_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gk104_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
index a5fafda0014d..4204316a544f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -31,9 +31,7 @@ gp100_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
gp100_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = gp100_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
index 9ab64194b185..b8a3f6850fa7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -23,32 +23,9 @@
*/
#include "priv.h"
-static u32
-nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
-{
- struct nvkm_device *device = pci->subdev.device;
- return nvkm_rd32(device, 0x001800 + addr);
-}
-
-static void
-nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr08(device, 0x001800 + addr, data);
-}
-
-static void
-nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr32(device, 0x001800 + addr, data);
-}
-
static const struct nvkm_pci_func
nv04_pci_func = {
- .rd32 = nv04_pci_rd32,
- .wr08 = nv04_pci_wr08,
- .wr32 = nv04_pci_wr32,
+ .cfg = { .addr = 0x001800, .size = 0x1000 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
index 6a3c31cf0200..1971dbbdeb2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -23,27 +23,6 @@
*/
#include "priv.h"
-u32
-nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
-{
- struct nvkm_device *device = pci->subdev.device;
- return nvkm_rd32(device, 0x088000 + addr);
-}
-
-void
-nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr08(device, 0x088000 + addr, data);
-}
-
-void
-nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr32(device, 0x088000 + addr, data);
-}
-
void
nv40_pci_msi_rearm(struct nvkm_pci *pci)
{
@@ -52,9 +31,7 @@ nv40_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
nv40_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = nv40_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
index 9cad17f178ec..0093eabac9ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
@@ -38,9 +38,7 @@ nv46_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
nv46_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = nv46_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
index 741e34bf307c..b445081bb80e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -25,9 +25,7 @@
static const struct nvkm_pci_func
nv4c_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 9b7583532962..988eeee1471c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -8,10 +8,12 @@ int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_
struct nvkm_pci **);
struct nvkm_pci_func {
+ struct {
+ u32 addr;
+ u16 size;
+ } cfg;
+
void (*init)(struct nvkm_pci *);
- u32 (*rd32)(struct nvkm_pci *, u16 addr);
- void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
- void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
void (*msi_rearm)(struct nvkm_pci *);
struct {
@@ -27,9 +29,6 @@ struct nvkm_pci_func {
} pcie;
};
-u32 nv40_pci_rd32(struct nvkm_pci *, u16);
-void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
-void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
void nv40_pci_msi_rearm(struct nvkm_pci *);
void nv46_pci_msi_rearm(struct nvkm_pci *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
index dce337306cab..9446049642e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <rm/gpu.h>
+
static void
r535_vfn_dtor(struct nvkm_vfn *vfn)
{
@@ -32,6 +34,7 @@ r535_vfn_new(const struct nvkm_vfn_func *hw,
struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr,
struct nvkm_vfn **pvfn)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_vfn_func *rm;
int ret;
@@ -39,8 +42,12 @@ r535_vfn_new(const struct nvkm_vfn_func *hw,
return -ENOMEM;
rm->dtor = r535_vfn_dtor;
- rm->intr = hw->intr;
- rm->user = hw->user;
+ rm->intr = &tu102_vfn_intr,
+ rm->user.addr = 0x030000;
+ rm->user.size = 0x010000;
+ rm->user.base.minver = -1;
+ rm->user.base.maxver = -1;
+ rm->user.base.oclass = gpu->usermode.class;
ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
index c5460a14c541..4e64d8843373 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
@@ -36,7 +36,7 @@ nvkm_uvfn_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_vfn *vfn = nvkm_uvfn(object)->vfn;
struct nvkm_device *device = vfn->subdev.device;
- *addr = device->func->resource_addr(device, 0) + vfn->addr.user;
+ *addr = device->func->resource_addr(device, NVKM_BAR0_PRI) + vfn->addr.user;
*size = vfn->func->user.size;
*type = NVKM_OBJECT_MAP_IO;
return 0;
diff --git a/drivers/gpu/drm/nova/Kconfig b/drivers/gpu/drm/nova/Kconfig
new file mode 100644
index 000000000000..cca6a3fea879
--- /dev/null
+++ b/drivers/gpu/drm/nova/Kconfig
@@ -0,0 +1,14 @@
+config DRM_NOVA
+ tristate "Nova DRM driver"
+ depends on DRM=y
+ depends on PCI
+ depends on RUST
+ select AUXILIARY_BUS
+ default n
+ help
+ Choose this if you want to build the Nova DRM driver for Nvidia
+ GSP-based GPUs.
+
+ This driver is work in progress and may not be functional.
+
+ If M is selected, the module will be called nova.
diff --git a/drivers/gpu/drm/nova/Makefile b/drivers/gpu/drm/nova/Makefile
new file mode 100644
index 000000000000..42019bff3173
--- /dev/null
+++ b/drivers/gpu/drm/nova/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_NOVA) += nova.o
diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs
new file mode 100644
index 000000000000..b28b2e05cc15
--- /dev/null
+++ b/drivers/gpu/drm/nova/driver.rs
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, types::ARef};
+
+use crate::file::File;
+use crate::gem::NovaObject;
+
+pub(crate) struct NovaDriver {
+ #[expect(unused)]
+ drm: ARef<drm::Device<Self>>,
+}
+
+/// Convienence type alias for the DRM device type for this driver
+pub(crate) type NovaDevice = drm::Device<NovaDriver>;
+
+#[pin_data]
+pub(crate) struct NovaData {
+ pub(crate) adev: ARef<auxiliary::Device>,
+}
+
+const INFO: drm::DriverInfo = drm::DriverInfo {
+ major: 0,
+ minor: 0,
+ patchlevel: 0,
+ name: c_str!("nova"),
+ desc: c_str!("Nvidia Graphics"),
+};
+
+const NOVA_CORE_MODULE_NAME: &CStr = c_str!("NovaCore");
+const AUXILIARY_NAME: &CStr = c_str!("nova-drm");
+
+kernel::auxiliary_device_table!(
+ AUX_TABLE,
+ MODULE_AUX_TABLE,
+ <NovaDriver as auxiliary::Driver>::IdInfo,
+ [(
+ auxiliary::DeviceId::new(NOVA_CORE_MODULE_NAME, AUXILIARY_NAME),
+ ()
+ )]
+);
+
+impl auxiliary::Driver for NovaDriver {
+ type IdInfo = ();
+ const ID_TABLE: auxiliary::IdTable<Self::IdInfo> = &AUX_TABLE;
+
+ fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ let data = try_pin_init!(NovaData { adev: adev.into() });
+
+ let drm = drm::Device::<Self>::new(adev.as_ref(), data)?;
+ drm::Registration::new_foreign_owned(&drm, adev.as_ref(), 0)?;
+
+ Ok(KBox::new(Self { drm }, GFP_KERNEL)?.into())
+ }
+}
+
+#[vtable]
+impl drm::Driver for NovaDriver {
+ type Data = NovaData;
+ type File = File;
+ type Object = gem::Object<NovaObject>;
+
+ const INFO: drm::DriverInfo = INFO;
+
+ kernel::declare_drm_ioctls! {
+ (NOVA_GETPARAM, drm_nova_getparam, ioctl::RENDER_ALLOW, File::get_param),
+ (NOVA_GEM_CREATE, drm_nova_gem_create, ioctl::AUTH | ioctl::RENDER_ALLOW, File::gem_create),
+ (NOVA_GEM_INFO, drm_nova_gem_info, ioctl::AUTH | ioctl::RENDER_ALLOW, File::gem_info),
+ }
+}
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
new file mode 100644
index 000000000000..7e59a34b830d
--- /dev/null
+++ b/drivers/gpu/drm/nova/file.rs
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::driver::{NovaDevice, NovaDriver};
+use crate::gem::NovaObject;
+use crate::uapi::{GemCreate, GemInfo, Getparam};
+use kernel::{
+ alloc::flags::*,
+ drm::{self, gem::BaseObject},
+ pci,
+ prelude::*,
+ types::Opaque,
+ uapi,
+};
+
+pub(crate) struct File;
+
+impl drm::file::DriverFile for File {
+ type Driver = NovaDriver;
+
+ fn open(_dev: &NovaDevice) -> Result<Pin<KBox<Self>>> {
+ Ok(KBox::new(Self, GFP_KERNEL)?.into())
+ }
+}
+
+impl File {
+ /// IOCTL: get_param: Query GPU / driver metadata.
+ pub(crate) fn get_param(
+ dev: &NovaDevice,
+ getparam: &Opaque<uapi::drm_nova_getparam>,
+ _file: &drm::File<File>,
+ ) -> Result<u32> {
+ let adev = &dev.adev;
+ let parent = adev.parent().ok_or(ENOENT)?;
+ let pdev: &pci::Device = parent.try_into()?;
+ let getparam: &Getparam = getparam.into();
+
+ let value = match getparam.param() as u32 {
+ uapi::NOVA_GETPARAM_VRAM_BAR_SIZE => pdev.resource_len(1)?,
+ _ => return Err(EINVAL),
+ };
+
+ getparam.set_value(value);
+
+ Ok(0)
+ }
+
+ /// IOCTL: gem_create: Create a new DRM GEM object.
+ pub(crate) fn gem_create(
+ dev: &NovaDevice,
+ req: &Opaque<uapi::drm_nova_gem_create>,
+ file: &drm::File<File>,
+ ) -> Result<u32> {
+ let req: &GemCreate = req.into();
+ let obj = NovaObject::new(dev, req.size().try_into()?)?;
+
+ req.set_handle(obj.create_handle(file)?);
+
+ Ok(0)
+ }
+
+ /// IOCTL: gem_info: Query GEM metadata.
+ pub(crate) fn gem_info(
+ _dev: &NovaDevice,
+ req: &Opaque<uapi::drm_nova_gem_info>,
+ file: &drm::File<File>,
+ ) -> Result<u32> {
+ let req: &GemInfo = req.into();
+ let bo = NovaObject::lookup_handle(file, req.handle())?;
+
+ req.set_size(bo.size().try_into()?);
+
+ Ok(0)
+ }
+}
diff --git a/drivers/gpu/drm/nova/gem.rs b/drivers/gpu/drm/nova/gem.rs
new file mode 100644
index 000000000000..33b62d21400c
--- /dev/null
+++ b/drivers/gpu/drm/nova/gem.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{
+ drm,
+ drm::{gem, gem::BaseObject},
+ prelude::*,
+ types::ARef,
+};
+
+use crate::{
+ driver::{NovaDevice, NovaDriver},
+ file::File,
+};
+
+/// GEM Object inner driver data
+#[pin_data]
+pub(crate) struct NovaObject {}
+
+impl gem::BaseDriverObject<gem::Object<NovaObject>> for NovaObject {
+ fn new(_dev: &NovaDevice, _size: usize) -> impl PinInit<Self, Error> {
+ try_pin_init!(NovaObject {})
+ }
+}
+
+impl gem::DriverObject for NovaObject {
+ type Driver = NovaDriver;
+}
+
+impl NovaObject {
+ /// Create a new DRM GEM object.
+ pub(crate) fn new(dev: &NovaDevice, size: usize) -> Result<ARef<gem::Object<Self>>> {
+ let aligned_size = size.next_multiple_of(1 << 12);
+
+ if size == 0 || size > aligned_size {
+ return Err(EINVAL);
+ }
+
+ gem::Object::new(dev, aligned_size)
+ }
+
+ /// Look up a GEM object handle for a `File` and return an `ObjectRef` for it.
+ #[inline]
+ pub(crate) fn lookup_handle(
+ file: &drm::File<File>,
+ handle: u32,
+ ) -> Result<ARef<gem::Object<Self>>> {
+ gem::Object::lookup_handle(file, handle)
+ }
+}
diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs
new file mode 100644
index 000000000000..902876aa14d1
--- /dev/null
+++ b/drivers/gpu/drm/nova/nova.rs
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Nova DRM Driver
+
+mod driver;
+mod file;
+mod gem;
+mod uapi;
+
+use crate::driver::NovaDriver;
+
+kernel::module_auxiliary_driver! {
+ type: NovaDriver,
+ name: "Nova",
+ author: "Danilo Krummrich",
+ description: "Nova GPU driver",
+ license: "GPL v2",
+}
diff --git a/drivers/gpu/drm/nova/uapi.rs b/drivers/gpu/drm/nova/uapi.rs
new file mode 100644
index 000000000000..eb228a58d423
--- /dev/null
+++ b/drivers/gpu/drm/nova/uapi.rs
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::uapi;
+
+// TODO Work out some common infrastructure to avoid boilerplate code for uAPI abstractions.
+
+macro_rules! define_uapi_abstraction {
+ ($name:ident <= $inner:ty) => {
+ #[repr(transparent)]
+ pub struct $name(::kernel::types::Opaque<$inner>);
+
+ impl ::core::convert::From<&::kernel::types::Opaque<$inner>> for &$name {
+ fn from(value: &::kernel::types::Opaque<$inner>) -> Self {
+ // SAFETY: `Self` is a transparent wrapper of `$inner`.
+ unsafe { ::core::mem::transmute(value) }
+ }
+ }
+ };
+}
+
+define_uapi_abstraction!(Getparam <= uapi::drm_nova_getparam);
+
+impl Getparam {
+ pub fn param(&self) -> u64 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
+ unsafe { (*self.0.get()).param }
+ }
+
+ pub fn set_value(&self, v: u64) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
+ unsafe { (*self.0.get()).value = v };
+ }
+}
+
+define_uapi_abstraction!(GemCreate <= uapi::drm_nova_gem_create);
+
+impl GemCreate {
+ pub fn size(&self) -> u64 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
+ unsafe { (*self.0.get()).size }
+ }
+
+ pub fn set_handle(&self, handle: u32) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
+ unsafe { (*self.0.get()).handle = handle };
+ }
+}
+
+define_uapi_abstraction!(GemInfo <= uapi::drm_nova_gem_info);
+
+impl GemInfo {
+ pub fn handle(&self) -> u32 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
+ unsafe { (*self.0.get()).handle }
+ }
+
+ pub fn set_size(&self, size: u64) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
+ unsafe { (*self.0.get()).size = size };
+ }
+}
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b17e77f700dd..6eff97a09160 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -420,6 +420,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
*/
static int dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
@@ -429,7 +430,7 @@ static int dpi_bridge_attach(struct drm_bridge *bridge,
dpi_init_pll(dpi);
- return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+ return drm_bridge_attach(encoder, dpi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 9b9cc593790c..91ee63bfe0bc 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4617,6 +4617,7 @@ static const struct component_ops dsi_component_ops = {
*/
static int dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
@@ -4624,7 +4625,7 @@ static int dsi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
+ return drm_bridge_attach(encoder, dsi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index e1ac447221ee..a3b22952fdc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -314,6 +314,7 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
*/
static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -321,7 +322,7 @@ static int hdmi4_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index fa9904e4c218..0c98444d39a9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -312,6 +312,7 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
*/
static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -319,7 +320,7 @@ static int hdmi5_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index f9ae358e8e52..e78826e4b560 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -128,6 +128,7 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
*/
static int sdi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
@@ -135,7 +136,7 @@ static int sdi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+ return drm_bridge_attach(encoder, sdi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index aaeef603682c..50349518eda1 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -538,6 +538,7 @@ static int venc_get_clocks(struct venc_device *venc)
*/
static int venc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct venc_device *venc = drm_bridge_to_venc(bridge);
@@ -545,7 +546,7 @@ static int venc_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+ return drm_bridge_attach(encoder, venc->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index e059b06e0239..721581d425b4 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -67,6 +67,15 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_BOE_TD4320
+ tristate "BOE TD4320 DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for BOE TD4320 1080x2340
+ video mode panel found in Xiaomi Redmi Note 7 smartphones.
+
config DRM_PANEL_BOE_TH101MB31UIG002_28A
tristate "Boe TH101MB31UIG002-28A panel"
depends on OF
@@ -154,6 +163,17 @@ config DRM_PANEL_LVDS
handling of power supplies or control signals. It implements automatic
backlight handling if the panel is attached to a backlight controller.
+config DRM_PANEL_HIMAX_HX8279
+ tristate "Himax HX8279-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Himax HX8279 controller, such as the Startek KD070FHFID078
+ 7.0" 1200x1920 IPS LCD panel that uses a MIPI-DSI interface
+ and others.
+
config DRM_PANEL_HIMAX_HX83102
tristate "Himax HX83102-based panels"
depends on OF
@@ -497,6 +517,16 @@ config DRM_PANEL_NOVATEK_NT36672E
LCD panel module. The panel has a resolution of 1080x2408 and uses 24 bit
RGB per pixel.
+config DRM_PANEL_NOVATEK_NT37801
+ tristate "Novatek NT37801/NT37810 AMOLED DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Novatek NT37801 (or
+ NT37810) AMOLED DSI Video Mode LCD panel module with 1440x3200
+ resolution.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -996,6 +1026,15 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+config DRM_PANEL_VISIONOX_G2647FB105
+ tristate "Visionox G2647FB105"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Visionox
+ G2647FB105 (2340x1080@60Hz) AMOLED DSI cmd mode panel.
+
config DRM_PANEL_VISIONOX_R66451
tristate "Visionox R66451"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 1bb8ae46b59b..714cbac830e3 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.
obj-$(CONFIG_DRM_PANEL_AUO_A030JTN01) += panel-auo-a030jtn01.o
obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TD4320) += panel-boe-td4320.o
obj-$(CONFIG_DRM_PANEL_BOE_TH101MB31UIG002_28A) += panel-boe-th101mb31ig002-28a.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_LL2) += panel-boe-tv101wum-ll2.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
@@ -16,6 +17,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX8279) += panel-himax-hx8279.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
@@ -49,6 +51,7 @@ obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36523) += panel-novatek-nt36523.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672E) += panel-novatek-nt36672e.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT37801) += panel-novatek-nt37801.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
@@ -101,6 +104,7 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_G2647FB105) += panel-visionox-g2647fb105.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM692E5) += panel-visionox-rm692e5.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index 4692c36fe217..87fb0fd29658 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -279,9 +279,10 @@ static int y030xx067a_probe(struct spi_device *spi)
struct y030xx067a *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct y030xx067a, panel,
+ &y030xx067a_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -306,9 +307,6 @@ static int y030xx067a_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset GPIO\n");
- drm_panel_init(&priv->panel, dev, &y030xx067a_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 503ecea72c5e..ea5119018df4 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -306,9 +306,11 @@ static int versatile_panel_probe(struct platform_device *pdev)
return PTR_ERR(map);
}
- vpanel = devm_kzalloc(dev, sizeof(*vpanel), GFP_KERNEL);
- if (!vpanel)
- return -ENOMEM;
+ vpanel = devm_drm_panel_alloc(dev, struct versatile_panel, panel,
+ &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(vpanel))
+ return PTR_ERR(vpanel);
ret = regmap_read(map, SYS_CLCD, &val);
if (ret) {
@@ -348,9 +350,6 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&vpanel->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index b05a663c134c..db006576d704 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -224,9 +224,11 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
struct tm5p5_nt35596 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct tm5p5_nt35596, panel,
+ &tm5p5_nt35596_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd";
ctx->supplies[1].supply = "vddio";
@@ -253,9 +255,6 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight)) {
ret = PTR_ERR(ctx->panel.backlight);
diff --git a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
index 77604d6a4e72..6e52bf6830e1 100644
--- a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
+++ b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
@@ -200,9 +200,10 @@ static int a030jtn01_probe(struct spi_device *spi)
spi->mode |= SPI_MODE_3 | SPI_3WIRE;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct a030jtn01, panel,
+ &a030jtn01_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -223,9 +224,6 @@ static int a030jtn01_probe(struct spi_device *spi)
if (IS_ERR(priv->reset_gpio))
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO");
- drm_panel_init(&priv->panel, dev, &a030jtn01_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index 7e66db4a88bb..84c21c62a43e 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -55,77 +55,56 @@ static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe)
static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
- mipi_dsi_dcs_write_seq(dsi, 0xf8,
- 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(30);
-
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc0,
- 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
- 0x2a, 0x31, 0x39, 0x20, 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
- 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
- 0x5c, 0x5c, 0x5c);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
-
- msleep(30);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(50);
-
- return 0;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x00, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8,
+ 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0,
+ 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
+ 0x2a, 0x31, 0x39, 0x20, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
+ 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
+ 0x5c, 0x5c, 0x5c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
+
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+
+ return dsi_ctx.accum_err;
}
-static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
+static void boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
/* OFF commands sent in HS mode */
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- usleep_range(1000, 2000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
-
- return 0;
}
static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
{
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
- struct device *dev = &boe->dsi->dev;
int ret;
/*
@@ -157,13 +136,14 @@ static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
ret = boe_bf060y8m_aj0_on(boe);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(boe->reset_gpio, 1);
- return ret;
+ goto err_on;
}
return 0;
+err_on:
+ regulator_disable(boe->vregs[BF060Y8M_VREG_VCI].consumer);
err_vci:
regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer);
err_vddio:
@@ -178,15 +158,11 @@ err_elvss:
static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel)
{
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
- struct device *dev = &boe->dsi->dev;
- int ret;
- ret = boe_bf060y8m_aj0_off(boe);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ boe_bf060y8m_aj0_off(boe);
gpiod_set_value_cansleep(boe->reset_gpio, 1);
- ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
+ regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
return 0;
}
@@ -234,13 +210,11 @@ static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
- return 0;
+ return dsi_ctx.accum_err;
}
static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl)
@@ -350,9 +324,11 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
struct boe_bf060y8m_aj0 *boe;
int ret;
- boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL);
- if (!boe)
- return -ENOMEM;
+ boe = devm_drm_panel_alloc(dev, struct boe_bf060y8m_aj0, panel,
+ &boe_bf060y8m_aj0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(boe))
+ return PTR_ERR(boe);
ret = boe_bf060y8m_aj0_init_vregs(boe, dev);
if (ret)
@@ -374,9 +350,6 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
boe->panel.prepare_prev_first = true;
boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-boe-td4320.c b/drivers/gpu/drm/panel/panel-boe-td4320.c
new file mode 100644
index 000000000000..1956daa2c71b
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-td4320.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Barnabas Czeman <barnabas.czeman@mainlining.org>
+// Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+// Copyright (c) 2013, The Linux Foundation. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct boe_td4320 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct regulator_bulk_data boe_td4320_supplies[] = {
+ { .supply = "iovcc" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct boe_td4320 *to_boe_td4320(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_td4320, panel);
+}
+
+static void boe_td4320_reset(struct boe_td4320 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(30);
+}
+
+static int boe_td4320_on(struct boe_td4320 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd6, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb8,
+ 0x19, 0x55, 0x00, 0xbe, 0x00, 0x00,
+ 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb9,
+ 0x4d, 0x55, 0x05, 0xe6, 0x00, 0x02,
+ 0x03);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xba,
+ 0x9b, 0x5b, 0x07, 0xe6, 0x00, 0x13,
+ 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf9,
+ 0x44, 0x3f, 0x00, 0x8d, 0xbf);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xce,
+ 0x5d, 0x00, 0x0f, 0x1f, 0x2f, 0x3f,
+ 0x4f, 0x5f, 0x6f, 0x7f, 0x8f, 0x9f,
+ 0xaf, 0xbf, 0xcf, 0xdf, 0xef, 0xff,
+ 0x04, 0x00, 0x02, 0x02, 0x42, 0x01,
+ 0x69, 0x5a, 0x40, 0x40, 0x00, 0x00,
+ 0x04, 0xfa, 0x00);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00b8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x2c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00);
+ mipi_dsi_msleep(&dsi_ctx, 96);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int boe_td4320_off(struct boe_td4320 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int boe_td4320_prepare(struct drm_panel *panel)
+{
+ struct boe_td4320 *ctx = to_boe_td4320(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ boe_td4320_reset(ctx);
+
+ ret = boe_td4320_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int boe_td4320_unprepare(struct drm_panel *panel)
+{
+ struct boe_td4320 *ctx = to_boe_td4320(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = boe_td4320_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode boe_td4320_mode = {
+ .clock = (1080 + 86 + 2 + 100) * (2340 + 4 + 4 + 60) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 86,
+ .hsync_end = 1080 + 86 + 2,
+ .htotal = 1080 + 86 + 2 + 100,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 4,
+ .vsync_end = 2340 + 4 + 4,
+ .vtotal = 2340 + 4 + 4 + 60,
+ .width_mm = 67,
+ .height_mm = 145,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int boe_td4320_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &boe_td4320_mode);
+}
+
+static const struct drm_panel_funcs boe_td4320_panel_funcs = {
+ .prepare = boe_td4320_prepare,
+ .unprepare = boe_td4320_unprepare,
+ .get_modes = boe_td4320_get_modes,
+};
+
+static int boe_td4320_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct boe_td4320 *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct boe_td4320, panel,
+ &boe_td4320_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(boe_td4320_supplies),
+ boe_td4320_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->panel.prepare_prev_first = true;
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void boe_td4320_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_td4320 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id boe_td4320_of_match[] = {
+ { .compatible = "boe,td4320" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_td4320_of_match);
+
+static struct mipi_dsi_driver boe_td4320_driver = {
+ .probe = boe_td4320_probe,
+ .remove = boe_td4320_remove,
+ .driver = {
+ .name = "panel-boe-td4320",
+ .of_match_table = boe_td4320_of_match,
+ },
+};
+module_mipi_dsi_driver(boe_td4320_driver);
+
+MODULE_AUTHOR("Barnabas Czeman <barnabas.czeman@mainlining.org>");
+MODULE_DESCRIPTION("DRM driver for boe td4320 fhdplus video mode dsi panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
index 0b87f1e6ecae..f33d4f855929 100644
--- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -349,9 +349,11 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
const struct panel_desc *desc;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct boe_th101mb31ig002, panel,
+ &boe_th101mb31ig002_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
@@ -383,9 +385,6 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, ret,
"Failed to get orientation\n");
- drm_panel_init(&ctx->panel, &dsi->dev, &boe_th101mb31ig002_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
index 50e4a5341bc6..20b6e11a7d84 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
@@ -166,9 +166,11 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
struct boe_tv101wum_ll2 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct boe_tv101wum_ll2, panel,
+ &boe_tv101wum_ll2_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(boe_tv101wum_ll2_supplies),
@@ -190,8 +192,6 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_HSE;
- drm_panel_init(&ctx->panel, dev, &boe_tv101wum_ll2_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index 6b3f4d664d2a..ae6e9ffc46cb 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -511,9 +511,10 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dev_dbg(dev, "probe\n");
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
+ ddata = devm_drm_panel_alloc(dev, struct panel_drv_data, panel,
+ &dsicm_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ddata))
+ return PTR_ERR(ddata);
mipi_dsi_set_drvdata(dsi, ddata);
ddata->dsi = dsi;
@@ -530,9 +531,6 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dsicm_hw_reset(ddata);
- drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
if (ddata->use_dsi_backlight) {
struct backlight_properties props = { 0 };
props.max_brightness = 255;
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
index 0bfed0ec0bbc..fb9f9f42be4f 100644
--- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -163,9 +163,11 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
struct ebbg_ft8719 *ctx;
int i, ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ebbg_ft8719, panel,
+ &ebbg_ft8719_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
@@ -196,9 +198,6 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &ebbg_ft8719_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 52028c8f8988..90e8c154a978 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -839,9 +839,10 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
struct device_node *ddc;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct panel_edp, base,
+ &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->prepared_time = 0;
panel->desc = desc;
@@ -886,8 +887,6 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
dev_set_drvdata(dev, panel);
- drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
-
err = drm_panel_of_backlight(&panel->base);
if (err)
goto err_finished_ddc_init;
@@ -1763,6 +1762,13 @@ static const struct panel_delay delay_80_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_80_500_e80_p2e200 = {
+ .hpd_absent = 80,
+ .unprepare = 500,
+ .enable = 80,
+ .prepare_to_enable = 200,
+};
+
static const struct panel_delay delay_100_500_e200 = {
.hpd_absent = 100,
.unprepare = 500,
@@ -1878,6 +1884,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1938,6 +1945,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d73, &delay_200_500_e80, "NE140WUM-N6S"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1973,6 +1981,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c
new file mode 100644
index 000000000000..fb302d1f91b9
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c
@@ -0,0 +1,1296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Himax HX8279 DriverIC panels driver
+ *
+ * Copyright (c) 2025 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+/* Page selection */
+#define HX8279_REG_PAGE 0xb0
+ #define HX8279_PAGE_SEL GENMASK(3, 0)
+
+/* Page 0 - Driver/Module Configuration */
+#define HX8279_P0_VGHS 0xbf
+#define HX8279_P0_VGLS 0xc0
+#define HX8279_P0_VGPHS 0xc2
+#define HX8279_P0_VGNHS 0xc4
+ #define HX8279_P0_VG_SEL GENMASK(4, 0)
+ #define HX8279_VGH_MIN_MV 8700
+ #define HX8279_VGH_STEP_MV 300
+ #define HX8279_VGL_MIN_MV 6700
+ #define HX8279_VGL_STEP_MV 300
+ #define HX8279_VGPNH_MIN_MV 4000
+ #define HX8279_VGPNX_STEP_MV 50
+ #define HX8279_VGH_VOLT_SEL(x) ((x - HX8279_VGH_MIN_MV) / HX8279_VGH_STEP_MV)
+ #define HX8279_VGL_VOLT_SEL(x) ((x - HX8279_VGL_MIN_MV) / HX8279_VGL_STEP_MV)
+ #define HX8279_VGPN_VOLT_SEL(x) ((x - HX8279_VGPNH_MIN_MV) / HX8279_VGPNX_STEP_MV)
+
+/* Page 1 - Gate driver On Array (GOA) Mux */
+#define HX8279_P1_REG_GOA_L 0xc0
+#define HX8279_P1_REG_GOUTL(x) (HX8279_P1_REG_GOA_L + (x))
+#define HX8279_P1_REG_GOA_R 0xd4
+#define HX8279_P1_REG_GOUTR(x) (HX8279_P1_REG_GOA_R + (x))
+ #define HX8279_GOUT_STB GENMASK(7, 6)
+ #define HX8279_GOUT_SEL GENMASK(5, 0)
+
+/* Page 2 - Analog Gamma Configuration */
+#define HX8279_P2_REG_ANALOG_GAMMA 0xc0
+ #define HX8279_P2_REG_GAMMA_T_PVP(x) (HX8279_P2_REG_ANALOG_GAMMA + (x)) /* 0..16 */
+ #define HX8279_P2_REG_GAMMA_T_PVN(x) (HX8279_P2_REG_GAMMA_T_PVP(17) + (x)) /* 0..16 */
+
+/* Page 3 - Gate driver On Array (GOA) Configuration */
+#define HX8279_P3_REG_UNKNOWN_BA 0xba
+#define HX8279_P3_REG_GOA_CKV_FALL_PREC 0xbc
+#define HX8279_P3_REG_GOA_TIMING_ODD 0xc2
+ #define HX8279_P3_REG_GOA_TO(x) (HX8279_P3_REG_GOA_TIMING_ODD + x) /* GOA_T0..5 */
+#define HX8279_P3_REG_GOA_STVL 0xc8
+ #define HX8279_P3_GOA_STV_LEAD GENMASK(4, 0)
+#define HX8279_P3_REG_GOA_CKVL 0xc9
+ #define HX8279_P3_GOA_CKV_LEAD GENMASK(4, 0)
+#define HX8279_P3_REG_GOA_CKVD 0xca
+ #define HX8279_P3_GOA_CKV_NONOVERLAP BIT(7)
+ #define HX8279_P3_GOA_CKV_RESERVED BIT(6)
+ #define HX8279_P3_GOA_CKV_DUMMY GENMASK(5, 0)
+#define HX8279_P3_REG_GOA_CKV_RISE_PREC 0xcb
+#define HX8279_P3_REG_GOA_CLR1_W_ADJ 0xd2
+#define HX8279_P3_REG_GOA_CLR234_W_ADJ 0xd3
+#define HX8279_P3_REG_GOA_CLR1_CFG 0xd4
+#define HX8279_P3_REG_GOA_CLR_CFG(x) (HX8279_P3_REG_GOA_CLR1_CFG + (x)) /* CLR1..4 */
+ #define HX8279_P3_GOA_CLR_CFG_POLARITY BIT(7)
+ #define HX8279_P3_GOA_CLR_CFG_STARTPOS GENMASK(6, 0)
+#define HX8279_P3_REG_GOA_TIMING_EVEN 0xdd
+ #define HX8279_P3_REG_GOA_TE(x) (HX8279_P3_REG_GOA_TIMING_EVEN + x)
+#define HX8279_P3_REG_UNKNOWN_E4 0xe4
+#define HX8279_P3_REG_UNKNOWN_E5 0xe5
+
+/* Page 5 - MIPI */
+#define HX8279_P5_REG_TIMING 0xb3
+ #define HX8279_P5_TIMING_THS_SETTLE GENMASK(7, 5)
+ #define HX8279_P5_TIMING_LHS_SETTLE BIT(4)
+ #define HX8279_P5_TIMING_TLPX GENMASK(3, 0)
+#define HX8279_P5_REG_UNKNOWN_B8 0xb8
+#define HX8279_P5_REG_UNKNOWN_BC 0xbc
+#define HX8279_P5_REG_UNKNOWN_D6 0xd6
+
+/* Page 6 - Engineer */
+#define HX8279_P6_REG_ENGINEER_PWD 0xb8
+#define HX8279_P6_REG_INHOUSE_FUNC 0xc0
+ #define HX8279_P6_ENG_UNLOCK_WORD 0xa5
+#define HX8279_P6_REG_GAMMA_CHOPPER 0xbc
+ #define HX8279_P6_GAMMA_POCGM_CTL GENMASK(6, 4)
+ #define HX8279_P6_GAMMA_POGCMD_CTL GENMASK(2, 0)
+#define HX8279_P6_REG_VOLT_ADJ 0xc7
+ /* For VCCIFS and VCCS - 0: 1450, 1: 1500, 2: 1550, 3: 1600 uV */
+ #define HX8279_P6_VOLT_ADJ_VCCIFS GENMASK(3, 2)
+ #define HX8279_P6_VOLT_ADJ_VCCS GENMASK(1, 0)
+#define HX8279_P6_REG_DLY_TIME_ADJ 0xd5
+
+/* Page 7...12 - Digital Gamma Adjustment */
+#define HX8279_PG_DIGITAL_GAMMA 0xb1
+#define HX8279_DGAMMA_DGMA1_HI GENMASK(7, 6)
+#define HX8279_DGAMMA_DGMA2_HI GENMASK(5, 4)
+#define HX8279_DGAMMA_DGMA3_HI GENMASK(3, 2)
+#define HX8279_DGAMMA_DGMA4_HI GENMASK(1, 0)
+#define HX8279_PG_DGAMMA_NUM_LO_BYTES 24
+#define HX8279_PG_DGAMMA_NUM_HI_BYTES 6
+
+struct hx8279 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi[2];
+ struct regulator_bulk_data vregs[2];
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+ const struct hx8279_panel_desc *desc;
+ u8 last_page;
+ bool skip_voltage_config;
+ bool skip_goa_config;
+ bool skip_goa_timing;
+ bool skip_goa_even_timing;
+ bool skip_mipi_timing;
+};
+
+struct hx8279_panel_mode {
+ const struct drm_display_mode mode;
+ u8 bpc;
+ bool is_video_mode;
+};
+
+/**
+ * struct hx8279_goa_mux - Gate driver On Array Muxer
+ * @gout_l: Mux GOA signal to GOUT Left pin
+ * @gout_r: Mux GOA signal to GOUT Right pin
+ */
+struct hx8279_goa_mux {
+ u8 gout_l[20];
+ u8 gout_r[20];
+};
+
+/**
+ * struct hx8279_analog_gamma - Analog Gamma Adjustment
+ * @pos: Positive gamma op's input voltage, adjusted by VGP(H/L)
+ * @neg: Negative gamma op's input voltage, adjusted by VGN(H/L)
+ *
+ * Analog Gamma correction is performed with 17+17 reference voltages,
+ * changed with resistor streams, and defined with 17 register values
+ * for positive and 17 for negative.
+ *
+ * Each register holds resistance values, in 8.5ohms per unit, for the
+ * following gamma levels:
+ * 0, 8, 16, 28, 40, 56, 80, 128, 176, 200, 216, 228, 240, 248, 252, 255.
+ */
+struct hx8279_analog_gamma {
+ u8 pos[17];
+ u8 neg[17];
+};
+
+/**
+ * struct hx8279_digital_gamma - Digital Gamma Adjustment
+ * @r: Adjustment for red component
+ * @g: Adjustment for green component
+ * @b: Adjustment for blue component
+ *
+ * The layout of this structure follows the register layout to simplify
+ * both the handling and the declaration of those values in the driver.
+ * Gamma correction is internally done with a 24 segment piecewise
+ * linear interpolation; those segments are defined with 24 ten bits
+ * values of which:
+ * - The LOW eight bits for the first 24 registers start at the first
+ * register (at 0xb1) of the Digital Gamma Adjustment page;
+ * - The HIGH two bits for each of the 24 registers are contained
+ * in the last six registers;
+ * - The last six registers contain four groups of two-bits HI values
+ * for each of the first 24 registers, but in an inverted fashion,
+ * this means that the first two bits relate to the last register
+ * of a set of four.
+ *
+ * The 24 segments refer to the following gamma levels:
+ * 0, 1, 3, 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160,
+ * 192, 208, 224, 232, 240, 244, 248, 252, 254, 255
+ */
+struct hx8279_digital_gamma {
+ u8 r[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+ u8 g[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+ u8 b[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+};
+
+struct hx8279_panel_desc {
+ const struct mipi_dsi_device_info dsi_info;
+ const struct hx8279_panel_mode *mode_data;
+ u8 num_lanes;
+ u8 num_modes;
+
+ /* Page 0 */
+ unsigned int vgh_mv;
+ unsigned int vgl_mv;
+ unsigned int vgph_mv;
+ unsigned int vgnh_mv;
+
+ /* Page 1 */
+ const struct hx8279_goa_mux *gmux;
+
+ /* Page 2 */
+ const struct hx8279_analog_gamma *agamma;
+
+ /* Page 3 */
+ u8 goa_unk_ba;
+ u8 goa_odd_timing[6];
+ u8 goa_even_timing[6];
+ u8 goa_stv_lead_time_ck;
+ u8 goa_ckv_lead_time_ck;
+ u8 goa_ckv_dummy_vblank_num;
+ u8 goa_ckv_rise_precharge;
+ u8 goa_ckv_fall_precharge;
+ bool goa_ckv_non_overlap_ctl;
+ u8 goa_clr1_width_adj;
+ u8 goa_clr234_width_adj;
+ s8 goa_clr_polarity[4];
+ int goa_clr_start_pos[4];
+ u8 goa_unk_e4;
+ u8 goa_unk_e5;
+
+ /* Page 5 */
+ u8 bta_tlpx;
+ bool lhs_settle_time_by_osc25;
+ u8 ths_settle_time;
+ u8 timing_unk_b8;
+ u8 timing_unk_bc;
+ u8 timing_unk_d6;
+
+ /* Page 6 */
+ u8 gamma_ctl;
+ u8 volt_adj;
+ u8 src_delay_time_adj_ck;
+
+ /* Page 7..12 */
+ const struct hx8279_digital_gamma *dgamma;
+};
+
+static inline struct hx8279 *to_hx8279(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx8279, panel);
+}
+
+static void hx8279_set_page(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx, u8 page)
+{
+ const u8 cmd_set_page[] = { HX8279_REG_PAGE, page };
+
+ if (hx->last_page == page)
+ return;
+
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_page, ARRAY_SIZE(cmd_set_page));
+ if (!dsi_ctx->accum_err)
+ hx->last_page = page;
+}
+
+static void hx8279_set_module_config(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_voltage[2];
+
+ if (hx->skip_voltage_config)
+ return;
+
+ /* Page 0 - Driver/Module Configuration */
+ hx8279_set_page(hx, dsi_ctx, 0);
+
+ if (desc->vgh_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGHS;
+ cmd_set_voltage[1] = HX8279_VGH_VOLT_SEL(desc->vgh_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgl_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGLS;
+ cmd_set_voltage[1] = HX8279_VGL_VOLT_SEL(desc->vgl_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgph_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGPHS;
+ cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgph_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgnh_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGNHS;
+ cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgnh_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+}
+
+static void hx8279_set_gmux(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_goa_mux *gmux = hx->desc->gmux;
+ u8 cmd_set_gmux[2];
+ int i;
+
+ if (!gmux)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 1);
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) {
+ cmd_set_gmux[0] = HX8279_P1_REG_GOUTL(i);
+ cmd_set_gmux[1] = gmux->gout_l[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux,
+ ARRAY_SIZE(cmd_set_gmux));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) {
+ cmd_set_gmux[0] = HX8279_P1_REG_GOUTR(i);
+ cmd_set_gmux[1] = gmux->gout_r[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux,
+ ARRAY_SIZE(cmd_set_gmux));
+ }
+}
+
+static void hx8279_set_analog_gamma(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_analog_gamma *agamma = hx->desc->agamma;
+ u8 cmd_set_ana_gamma[2];
+ int i;
+
+ if (!agamma)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 2);
+
+ for (i = 0; i < ARRAY_SIZE(agamma->pos); i++) {
+ cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVP(i);
+ cmd_set_ana_gamma[1] = agamma->pos[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma,
+ ARRAY_SIZE(cmd_set_ana_gamma));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(agamma->neg); i++) {
+ cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVN(i);
+ cmd_set_ana_gamma[1] = agamma->neg[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma,
+ ARRAY_SIZE(cmd_set_ana_gamma));
+ }
+}
+
+static void hx8279_set_goa_timing(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_goa_t[2];
+ int i;
+
+ if (hx->skip_goa_timing)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 3);
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) {
+ cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TO(i);
+ cmd_set_goa_t[1] = desc->goa_odd_timing[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t,
+ ARRAY_SIZE(cmd_set_goa_t));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) {
+ cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TE(i);
+ cmd_set_goa_t[1] = desc->goa_odd_timing[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t,
+ ARRAY_SIZE(cmd_set_goa_t));
+ }
+}
+
+static void hx8279_set_goa_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_goa[2];
+ int i;
+
+ if (hx->skip_goa_config)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 3);
+
+ if (desc->goa_unk_ba) {
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_BA;
+ cmd_set_goa[1] = desc->goa_unk_ba;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_stv_lead_time_ck) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_STVL;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_STV_LEAD,
+ desc->goa_stv_lead_time_ck);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_ckv_lead_time_ck) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVL;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_DUMMY,
+ desc->goa_ckv_lead_time_ck);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_ckv_dummy_vblank_num) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVD;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_LEAD,
+ desc->goa_ckv_dummy_vblank_num);
+ cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CKV_NONOVERLAP,
+ desc->goa_ckv_non_overlap_ctl);
+ /* RESERVED must be always set */
+ cmd_set_goa[1] |= HX8279_P3_GOA_CKV_RESERVED;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ /*
+ * One of the two being more than zero means that we want to write
+ * both of them. Anyway, the register default is zero in this case.
+ */
+ if (desc->goa_ckv_rise_precharge || desc->goa_ckv_fall_precharge) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_RISE_PREC;
+ cmd_set_goa[1] = desc->goa_ckv_rise_precharge;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_FALL_PREC;
+ cmd_set_goa[1] = desc->goa_ckv_fall_precharge;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_clr1_width_adj) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR1_W_ADJ;
+ cmd_set_goa[1] = desc->goa_clr1_width_adj;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_clr234_width_adj) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR234_W_ADJ;
+ cmd_set_goa[1] = desc->goa_clr234_width_adj;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ /* Polarity and Start Position arrays are of the same size */
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) {
+ if (desc->goa_clr_polarity[i] < 0 || desc->goa_clr_start_pos[i] < 0)
+ continue;
+
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR_CFG(i);
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CLR_CFG_STARTPOS,
+ desc->goa_clr_start_pos[i]);
+ cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CLR_CFG_POLARITY,
+ desc->goa_clr_polarity[i]);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_unk_e4) {
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E4;
+ cmd_set_goa[1] = desc->goa_unk_e4;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E5;
+ cmd_set_goa[1] = desc->goa_unk_e5;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+}
+
+static void hx8279_set_mipi_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_mipi[2];
+
+ if (hx->skip_mipi_timing)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 5);
+
+ if (desc->bta_tlpx || desc->ths_settle_time || desc->lhs_settle_time_by_osc25) {
+ cmd_set_mipi[0] = HX8279_P5_REG_TIMING;
+ cmd_set_mipi[1] = FIELD_PREP(HX8279_P5_TIMING_TLPX, desc->bta_tlpx);
+ cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_THS_SETTLE,
+ desc->ths_settle_time);
+ cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_LHS_SETTLE,
+ desc->lhs_settle_time_by_osc25);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_b8) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_B8;
+ cmd_set_mipi[1] = desc->timing_unk_b8;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_bc) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_BC;
+ cmd_set_mipi[1] = desc->timing_unk_bc;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_d6) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_D6;
+ cmd_set_mipi[1] = desc->timing_unk_d6;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+}
+
+static void hx8279_set_adv_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ const u8 cmd_set_dly[] = { HX8279_P6_REG_DLY_TIME_ADJ, desc->src_delay_time_adj_ck };
+ const u8 cmd_set_gamma[] = { HX8279_P6_REG_GAMMA_CHOPPER, desc->gamma_ctl };
+ const u8 cmd_set_volt_adj[] = { HX8279_P6_REG_VOLT_ADJ, desc->volt_adj };
+ u8 cmd_set_eng[] = { HX8279_P6_REG_ENGINEER_PWD, HX8279_P6_ENG_UNLOCK_WORD };
+
+ if (!desc->gamma_ctl && !desc->src_delay_time_adj_ck && !desc->volt_adj)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 6);
+
+ /* Unlock ENG settings: write same word to both ENGINEER_PWD and INHOUSE_FUNC */
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ /* Set Gamma Chopper and Gamma buffer Chopper control */
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gamma, ARRAY_SIZE(cmd_set_gamma));
+
+ /* Set Source delay time adjustment (CKV falling to Source off) */
+ if (desc->src_delay_time_adj_ck)
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dly,
+ ARRAY_SIZE(cmd_set_dly));
+
+ /* Set voltage adjustment */
+ if (desc->volt_adj)
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_volt_adj,
+ ARRAY_SIZE(cmd_set_volt_adj));
+
+ /* Lock ENG settings again */
+ cmd_set_eng[0] = HX8279_P6_REG_ENGINEER_PWD;
+ cmd_set_eng[1] = 0;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+}
+
+static void hx8279_set_digital_gamma(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_digital_gamma *dgamma = hx->desc->dgamma;
+ u8 cmd_set_dig_gamma[2];
+ int i;
+
+ if (!dgamma)
+ return;
+
+ /*
+ * Pages 7..9 are for RGB Positive, 10..12 are for RGB Negative:
+ * The first iteration sets all positive component registers,
+ * the second one sets all negatives.
+ */
+ for (i = 0; i < 2; i++) {
+ u8 pg_neg = i * 3;
+
+ hx8279_set_page(hx, dsi_ctx, 7 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->r); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->r[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+
+ hx8279_set_page(hx, dsi_ctx, 8 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->g); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->g[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+
+ hx8279_set_page(hx, dsi_ctx, 9 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->b); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->b[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+ }
+}
+
+static int hx8279_on(struct hx8279 *hx)
+{
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ /* Page 5 */
+ hx8279_set_mipi_cfg(hx, &dsi_ctx);
+
+ /* Page 1 */
+ hx8279_set_gmux(hx, &dsi_ctx);
+
+ /* Page 2 */
+ hx8279_set_analog_gamma(hx, &dsi_ctx);
+
+ /* Page 3 */
+ hx8279_set_goa_cfg(hx, &dsi_ctx);
+ hx8279_set_goa_timing(hx, &dsi_ctx);
+
+ /* Page 0 - Driver/Module Configuration */
+ hx8279_set_module_config(hx, &dsi_ctx);
+
+ /* Page 6 */
+ hx8279_set_adv_cfg(hx, &dsi_ctx);
+
+ /* Pages 7..12 */
+ hx8279_set_digital_gamma(hx, &dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static void hx8279_power_off(struct hx8279 *hx)
+{
+ gpiod_set_value_cansleep(hx->reset_gpio, 0);
+ usleep_range(100, 500);
+ gpiod_set_value_cansleep(hx->enable_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(hx->vregs), hx->vregs);
+}
+
+static int hx8279_disable(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+
+ return 0;
+}
+
+static int hx8279_enable(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return 0;
+}
+
+static int hx8279_prepare(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hx->vregs), hx->vregs);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(hx->enable_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(hx->reset_gpio, 1);
+ usleep_range(6000, 7000);
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ if (hx->dsi[1])
+ hx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = hx8279_on(hx);
+ if (ret) {
+ hx8279_power_off(hx);
+ return ret;
+ }
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 130);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx8279_unprepare(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 130);
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ if (hx->dsi[1])
+ hx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ hx8279_power_off(hx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx8279_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ int i;
+
+ for (i = 0; i < hx->desc->num_modes; i++) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &hx->desc->mode_data[i].mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+ if (hx->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = hx->desc->mode_data[0].bpc;
+ connector->display_info.height_mm = hx->desc->mode_data[0].mode.height_mm;
+ connector->display_info.width_mm = hx->desc->mode_data[0].mode.width_mm;
+
+ return hx->desc->num_modes;
+}
+
+static const struct drm_panel_funcs hx8279_panel_funcs = {
+ .disable = hx8279_disable,
+ .unprepare = hx8279_unprepare,
+ .prepare = hx8279_prepare,
+ .enable = hx8279_enable,
+ .get_modes = hx8279_get_modes,
+};
+
+static int hx8279_init_vregs(struct hx8279 *hx, struct device *dev)
+{
+ int ret;
+
+ hx->vregs[0].supply = "vdd";
+ hx->vregs[1].supply = "iovcc";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hx->vregs),
+ hx->vregs);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_is_supported_voltage(hx->vregs[0].consumer,
+ 3000000, 5000000);
+ if (!ret)
+ return -EINVAL;
+
+ ret = regulator_is_supported_voltage(hx->vregs[1].consumer,
+ 1700000, 1900000);
+ if (!ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hx8279_check_gmux_config(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ const struct hx8279_goa_mux *gmux = desc->gmux;
+ int i;
+
+ /* No gmux defined means we simply skip the GOA mux configuration */
+ if (!gmux)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) {
+ if (gmux->gout_l[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value found in gout_l[%d]\n", i);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) {
+ if (gmux->gout_r[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value found in gout_r[%d]\n", i);
+ }
+
+ return 0;
+}
+
+static int hx8279_check_goa_config(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ bool goa_odd_valid, goa_even_valid;
+ int i, num_zero, num_clr = 0;
+
+ /* Up to 4 zero values is a valid configuration. Check them all. */
+ num_zero = 1;
+ for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) {
+ if (desc->goa_odd_timing[i])
+ num_zero++;
+ }
+
+ goa_odd_valid = (num_zero != ARRAY_SIZE(desc->goa_odd_timing));
+
+ /* Up to 3 zeroes is a valid config. Check them all. */
+ num_zero = 1;
+ for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) {
+ if (desc->goa_even_timing[i])
+ num_zero++;
+ }
+
+ goa_even_valid = (num_zero != ARRAY_SIZE(desc->goa_even_timing));
+
+ /* Programming one without the other would make no sense! */
+ if (goa_odd_valid != goa_even_valid)
+ return -EINVAL;
+
+ /* We know that both are either true or false now, check just one */
+ if (!goa_odd_valid)
+ hx->skip_goa_timing = true;
+
+ if (!desc->goa_unk_ba && !desc->goa_stv_lead_time_ck &&
+ !desc->goa_ckv_lead_time_ck && !desc->goa_ckv_dummy_vblank_num &&
+ !desc->goa_ckv_rise_precharge && !desc->goa_ckv_fall_precharge &&
+ !desc->goa_clr1_width_adj && !desc->goa_clr234_width_adj &&
+ !desc->goa_unk_e4 && !desc->goa_unk_e5) {
+ hx->skip_goa_config = true;
+ return 0;
+ }
+
+ if ((desc->goa_stv_lead_time_ck > HX8279_P3_GOA_STV_LEAD) ||
+ (desc->goa_ckv_lead_time_ck > HX8279_P3_GOA_CKV_LEAD) ||
+ (desc->goa_ckv_dummy_vblank_num > HX8279_P3_GOA_CKV_DUMMY))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid lead timings in GOA config\n");
+
+ /*
+ * Don't perform zero check for polarity and start position, as
+ * both pol=0 and start=0 are valid configuration values.
+ */
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_start_pos); i++) {
+ if (desc->goa_clr_start_pos[i] < 0)
+ continue;
+ else if (desc->goa_clr_start_pos[i] > HX8279_P3_GOA_CLR_CFG_STARTPOS)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid start position for CLR%d\n", i + 1);
+ else
+ num_clr++;
+ }
+ if (!num_clr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) {
+ if (num_clr < 0)
+ return -EINVAL;
+
+ if (desc->goa_clr_polarity[i] < 0)
+ continue;
+ else if (desc->goa_clr_polarity[i] > 1)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid polarity for CLR%d\n", i + 1);
+ else
+ num_clr--;
+ }
+
+ return 0;
+}
+
+static int hx8279_check_dig_gamma(struct hx8279 *hx, struct device *dev, const u8 *component)
+{
+ u8 gamma_high_bits[4];
+ u16 prev_val = 0;
+ int i, j, k, x;
+
+ /*
+ * The gamma values are 10 bits long and shall be incremental
+ * to form a digital gamma correction reference curve.
+ *
+ * As for the registers format: the first 24 bytes contain each the
+ * lowest 8 bits for each of the gamma level references, and the last
+ * 6 bytes contain the high two bits of 4 registers at a time, where
+ * the first two bits are relative to the last register, and the last
+ * two are relative to the first register.
+ *
+ * Another way of saying, those are the first four LOW values:
+ * DGMA1_LO = 0xb1, DGMA2_LO = 0xb2, DGMA3_LO = 0xb3, DGMA4_LO = 0xb4
+ *
+ * The high values for those four are at DGMA1_4_HI = 0xc9;
+ * ...and DGMA1_4_HI's data contains the following bits:
+ * [1:0] = DGMA4_HI, [3:2] = DGMA3_HI, [5:4] = DGMA2_HI, [7:6] = DGMA1_HI
+ */
+ for (i = 0; i < HX8279_PG_DGAMMA_NUM_HI_BYTES; i++) {
+ k = HX8279_PG_DGAMMA_NUM_LO_BYTES + i;
+ j = i * 4;
+ x = 0;
+
+ gamma_high_bits[0] = FIELD_GET(HX8279_DGAMMA_DGMA1_HI, component[k]);
+ gamma_high_bits[1] = FIELD_GET(HX8279_DGAMMA_DGMA2_HI, component[k]);
+ gamma_high_bits[2] = FIELD_GET(HX8279_DGAMMA_DGMA3_HI, component[k]);
+ gamma_high_bits[3] = FIELD_GET(HX8279_DGAMMA_DGMA4_HI, component[k]);
+
+ do {
+ u16 cur_val = component[j] | (gamma_high_bits[x] << 8);
+
+ if (cur_val < prev_val)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid dgamma values: %u < %u!\n",
+ cur_val, prev_val);
+ prev_val = cur_val;
+ j++;
+ x++;
+ } while (x < 4);
+ };
+
+ return 0;
+}
+
+static int hx8279_check_params(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ int ret;
+
+ /* Voltages config validation */
+ if (!desc->vgh_mv && !desc->vgl_mv && !desc->vgph_mv && !desc->vgnh_mv)
+ hx->skip_voltage_config = true;
+ else if ((desc->vgh_mv && desc->vgh_mv < HX8279_VGH_MIN_MV) ||
+ (desc->vgl_mv && desc->vgl_mv < HX8279_VGL_MIN_MV) ||
+ (desc->vgph_mv && desc->vgph_mv < HX8279_VGPNH_MIN_MV) ||
+ (desc->vgnh_mv && desc->vgnh_mv < HX8279_VGPNH_MIN_MV))
+ return -EINVAL;
+
+ /* GOA Muxing validation */
+ ret = hx8279_check_gmux_config(hx, dev);
+ if (ret)
+ return ret;
+
+ /* GOA Configuration validation */
+ ret = hx8279_check_goa_config(hx, dev);
+ if (ret)
+ return ret;
+
+ /* MIPI Configuration validation */
+ if (!desc->bta_tlpx && !desc->lhs_settle_time_by_osc25 &&
+ !desc->ths_settle_time && !desc->timing_unk_b8 &&
+ !desc->timing_unk_bc && !desc->timing_unk_d6)
+ hx->skip_mipi_timing = true;
+
+ /* ENG/Gamma Configuration validation */
+ if (desc->gamma_ctl > (HX8279_P6_GAMMA_POCGM_CTL | HX8279_P6_GAMMA_POGCMD_CTL))
+ return -EINVAL;
+
+ /* Digital Gamma values validation */
+ if (desc->dgamma) {
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->r);
+ if (ret)
+ return ret;
+
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->g);
+ if (ret)
+ return ret;
+
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->b);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx8279_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi_r;
+ struct hx8279 *hx;
+ int i, ret;
+
+ hx = devm_drm_panel_alloc(dev, struct hx8279, panel,
+ &hx8279_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(hx))
+ return PTR_ERR(hx);
+
+ ret = hx8279_init_vregs(hx, dev);
+ if (ret)
+ return ret;
+
+ hx->desc = device_get_match_data(dev);
+ if (!hx->desc)
+ return -ENODEV;
+
+ /*
+ * In some DriverICs some or all fields may be OTP: perform a
+ * basic configuration check before writing to help avoiding
+ * irreparable mistakes.
+ *
+ * Please note that this is not perfect and will only check if
+ * the values may be plausible; values that are wrong for a
+ * specific display, but still plausible for DrIC config will
+ * be accepted.
+ */
+ ret = hx8279_check_params(hx, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Invalid DriverIC configuration\n");
+
+ /* The enable line may be always tied to VCCIO, so this is optional */
+ hx->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_ASIS);
+ if (IS_ERR(hx->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(hx->enable_gpio),
+ "Failed to get enable GPIO\n");
+
+ hx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(hx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(hx->reset_gpio),
+ "Failed to get reset GPIO\n");
+
+ /* If the panel is connected on two DSIs then DSI0 left, DSI1 right */
+ dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (dsi_r) {
+ const struct mipi_dsi_device_info *info = &hx->desc->dsi_info;
+ struct mipi_dsi_host *dsi_r_host;
+
+ dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ of_node_put(dsi_r);
+ if (!dsi_r_host)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "Cannot get secondary DSI host\n");
+
+ hx->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi_r_host, info);
+ if (IS_ERR(hx->dsi[1]))
+ return dev_err_probe(dev, PTR_ERR(hx->dsi[1]),
+ "Cannot get secondary DSI node\n");
+ mipi_dsi_set_drvdata(hx->dsi[1], hx);
+ }
+
+ hx->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, hx);
+
+ ret = drm_panel_of_backlight(&hx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&hx->panel);
+
+ for (i = 0; i < 2; i++) {
+ if (!hx->dsi[i])
+ continue;
+
+ hx->dsi[i]->lanes = hx->desc->num_lanes;
+ hx->dsi[i]->format = MIPI_DSI_FMT_RGB888;
+
+ hx->dsi[i]->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_LPM;
+
+ if (hx->desc->mode_data[0].is_video_mode)
+ hx->dsi[i]->mode_flags |= MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+ ret = devm_mipi_dsi_attach(dev, hx->dsi[i]);
+ if (ret < 0) {
+ drm_panel_remove(&hx->panel);
+ return dev_err_probe(dev, ret,
+ "Cannot attach to DSI%d host.\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static void hx8279_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx8279 *hx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&hx->panel);
+}
+
+static const struct hx8279_panel_mode aoly_sl101pm1794fog_v15_modes[] = {
+ {
+ .mode = {
+ .clock = 159420,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 60,
+ .htotal = 1200 + 80 + 60 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .width_mm = 136,
+ .height_mm = 217,
+ .type = DRM_MODE_TYPE_DRIVER
+ },
+ .bpc = 8,
+ .is_video_mode = true,
+ },
+};
+
+static const struct hx8279_panel_mode startek_kd070fhfid078_modes[] = {
+ {
+ .mode = {
+ .clock = 156458,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 50,
+ .hsync_end = 1200 + 50 + 24,
+ .htotal = 1200 + 50 + 24 + 66,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 14,
+ .vsync_end = 1920 + 14 + 2,
+ .vtotal = 1920 + 14 + 2 + 10,
+ .width_mm = 95,
+ .height_mm = 151,
+ .type = DRM_MODE_TYPE_DRIVER
+ },
+ .bpc = 8,
+ .is_video_mode = true,
+ },
+};
+
+static const struct hx8279_goa_mux aoly_sl101pm1794fog_v15_gmux = {
+ .gout_l = { 0x5, 0x5, 0xb, 0xb, 0x9, 0x9, 0x16, 0x16, 0xe, 0xe,
+ 0x7, 0x7, 0x26, 0x26, 0x15, 0x15, 0x1, 0x1, 0x3, 0x3 },
+ .gout_r = { 0x6, 0x6, 0xc, 0xc, 0xa, 0xa, 0x16, 0x16, 0xe, 0xe,
+ 0x8, 0x8, 0x26, 0x26, 0x15, 0x15, 0x2, 0x2, 0x4, 0x4 },
+};
+
+static const struct hx8279_analog_gamma aoly_sl101pm1794fog_v15_ana_gamma = {
+ .pos = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x1c, 0x2c, 0x33, 0x31,
+ 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 },
+ .neg = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x3f, 0x3f, 0x3f, 0x3f,
+ 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 },
+};
+
+static const struct hx8279_digital_gamma aoly_sl101pm1794fog_v15_dig_gamma = {
+ .r = { 0x0, 0x5, 0x10, 0x22, 0x36, 0x4a, 0x6c, 0x9a, 0xd7, 0x17,
+ 0x92, 0x15, 0x18, 0x8c, 0x0, 0x3a, 0x72, 0x8c, 0xa5, 0xb1,
+ 0xbe, 0xca, 0xd1, 0xd4, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+ .g = { 0x4, 0x5, 0x11, 0x24, 0x39, 0x4e, 0x72, 0xa3, 0xe1, 0x25,
+ 0xa8, 0x2e, 0x32, 0xad, 0x28, 0x63, 0x9b, 0xb5, 0xcf, 0xdb,
+ 0xe8, 0xf5, 0xfa, 0xfc, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+ .b = { 0x4, 0x4, 0xf, 0x22, 0x37, 0x4d, 0x71, 0xa2, 0xe1, 0x26,
+ 0xa9, 0x2f, 0x33, 0xac, 0x24, 0x5d, 0x94, 0xac, 0xc5, 0xd1,
+ 0xdc, 0xe8, 0xed, 0xf0, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+};
+
+static const struct hx8279_panel_desc aoly_sl101pm1794fog_v15 = {
+ .dsi_info = {
+ .type = "L101PM1794FOG-V15",
+ .channel = 0,
+ .node = NULL,
+ },
+ .mode_data = aoly_sl101pm1794fog_v15_modes,
+ .num_modes = ARRAY_SIZE(aoly_sl101pm1794fog_v15_modes),
+ .num_lanes = 4,
+
+ /* Driver/Module Configuration: LC Matrix voltages */
+ .vgh_mv = 16500,
+ .vgl_mv = 11200,
+ .vgph_mv = 4600,
+ .vgnh_mv = 4600,
+
+ /* Analog Gamma correction */
+ .agamma = &aoly_sl101pm1794fog_v15_ana_gamma,
+
+ /* Gate driver On Array (GOA) Muxing */
+ .gmux = &aoly_sl101pm1794fog_v15_gmux,
+
+ /* Gate driver On Array (GOA) Mux Config */
+ .goa_unk_ba = 0xf0,
+ .goa_odd_timing = { 0, 0, 0, 42, 0, 0 },
+ .goa_even_timing = { 1, 42, 0, 0 },
+ .goa_stv_lead_time_ck = 11,
+ .goa_ckv_lead_time_ck = 7,
+ .goa_ckv_dummy_vblank_num = 3,
+ .goa_ckv_rise_precharge = 1,
+ .goa_clr1_width_adj = 0,
+ .goa_clr234_width_adj = 0,
+ .goa_clr_polarity = { 1, 0, 0, 0 },
+ .goa_clr_start_pos = { 8, 9, 3, 4 },
+ .goa_unk_e4 = 0xc0,
+ .goa_unk_e5 = 0x0d,
+
+ /* MIPI Configuration */
+ .bta_tlpx = 2,
+ .lhs_settle_time_by_osc25 = true,
+ .ths_settle_time = 2,
+ .timing_unk_b8 = 0xa5,
+ .timing_unk_bc = 0x20,
+ .timing_unk_d6 = 0x7f,
+
+ /* ENG/Gamma Configuration */
+ .gamma_ctl = 0,
+ .volt_adj = FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCIFS, 3) |
+ FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCS, 3),
+ .src_delay_time_adj_ck = 50,
+
+ /* Digital Gamma Adjustment */
+ .dgamma = &aoly_sl101pm1794fog_v15_dig_gamma,
+};
+
+static const struct hx8279_goa_mux startek_kd070fhfid078_gmux = {
+ .gout_l = { 0xd, 0xd, 0x6, 0x6, 0x8, 0x8, 0xa, 0xa, 0xc, 0xc,
+ 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x4, 0x4, 0x0, 0x0 },
+ .gout_r = { 0xd, 0xd, 0x5, 0x5, 0x7, 0x7, 0x9, 0x9, 0xb, 0xb,
+ 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x3, 0x3, 0x0, 0x0 },
+};
+
+static const struct hx8279_panel_desc startek_kd070fhfid078 = {
+ .dsi_info = {
+ .type = "KD070FHFID078",
+ .channel = 0,
+ .node = NULL,
+ },
+ .mode_data = startek_kd070fhfid078_modes,
+ .num_modes = ARRAY_SIZE(startek_kd070fhfid078_modes),
+ .num_lanes = 4,
+
+ /* Driver/Module Configuration: LC Matrix voltages */
+ .vgh_mv = 18000,
+ .vgl_mv = 12100,
+ .vgph_mv = 5500,
+ .vgnh_mv = 5500,
+
+ /* Gate driver On Array (GOA) Mux Config */
+ .gmux = &startek_kd070fhfid078_gmux,
+
+ /* Gate driver On Array (GOA) Configuration */
+ .goa_unk_ba = 0xf0,
+ .goa_stv_lead_time_ck = 7,
+ .goa_ckv_lead_time_ck = 3,
+ .goa_ckv_dummy_vblank_num = 1,
+ .goa_ckv_rise_precharge = 0,
+ .goa_ckv_fall_precharge = 0,
+ .goa_clr1_width_adj = 1,
+ .goa_clr234_width_adj = 5,
+ .goa_clr_polarity = { 0, 1, -1, -1 },
+ .goa_clr_start_pos = { 5, 10, -1, -1 },
+ .goa_unk_e4 = 0xc0,
+ .goa_unk_e5 = 0x00,
+
+ /* MIPI Configuration */
+ .bta_tlpx = 2,
+ .lhs_settle_time_by_osc25 = true,
+ .ths_settle_time = 2,
+ .timing_unk_b8 = 0x7f,
+ .timing_unk_bc = 0x20,
+ .timing_unk_d6 = 0x7f,
+
+ /* ENG/Gamma Configuration */
+ .gamma_ctl = FIELD_PREP_CONST(HX8279_P6_GAMMA_POCGM_CTL, 1) |
+ FIELD_PREP_CONST(HX8279_P6_GAMMA_POGCMD_CTL, 1),
+ .src_delay_time_adj_ck = 72,
+};
+
+static const struct of_device_id hx8279_of_match[] = {
+ { .compatible = "aoly,sl101pm1794fog-v15", .data = &aoly_sl101pm1794fog_v15 },
+ { .compatible = "startek,kd070fhfid078", .data = &startek_kd070fhfid078 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx8279_of_match);
+
+static struct mipi_dsi_driver hx8279_driver = {
+ .probe = hx8279_probe,
+ .remove = hx8279_remove,
+ .driver = {
+ .name = "panel-himax-hx8279",
+ .of_match_table = hx8279_of_match,
+ },
+};
+module_mipi_dsi_driver(hx8279_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Himax HX8279 DriverIC panels driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index 92b03a2f65a3..ff994bf0e3cc 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -80,7 +80,7 @@ struct hx8394_panel_desc {
unsigned int lanes;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
- int (*init_sequence)(struct hx8394 *ctx);
+ void (*init_sequence)(struct mipi_dsi_multi_context *dsi_ctx);
};
static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
@@ -88,98 +88,94 @@ static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
return container_of(panel, struct hx8394, panel);
}
-static int hsd060bhw4_init_sequence(struct hx8394 *ctx)
+static void hsd060bhw4_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x78, 0x0c, 0x07);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x78, 0x0c, 0x07);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
- 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
- 0x7c);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
+ 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
+ 0x7c);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
- 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
- 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
- 0x00, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
+ 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
+ 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
+ 0x00, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
- 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
- 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
- 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
- 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
- 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
- 0x4a, 0x4c, 0x4b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
+ 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
+ 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
+ 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
+ 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
+ 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
+ 0x4a, 0x4c, 0x4b, 0x7f);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x7d, 0x7d);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x7d, 0x7d);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0xed);
}
static const struct drm_display_mode hsd060bhw4_mode = {
@@ -205,114 +201,110 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = {
.init_sequence = hsd060bhw4_init_sequence,
};
-static int powkiddy_x55_init_sequence(struct hx8394 *ctx)
+static void powkiddy_x55_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
- 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
- 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
+ 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
+ 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
- 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
- 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
+ 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
+ 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
- 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
- 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
+ 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
- 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
- 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
+ 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
- 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
- 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
- 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
- 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
+ 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
+ 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
+ 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
+ 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5,
- 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN5,
+ 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode powkiddy_x55_mode = {
@@ -339,131 +331,127 @@ static const struct hx8394_panel_desc powkiddy_x55_desc = {
.init_sequence = powkiddy_x55_init_sequence,
};
-static int mchp_ac40t08a_init_sequence(struct hx8394 *ctx)
+static void mchp_ac40t08a_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* DCS commands do not seem to be sent correclty without this delay */
- msleep(20);
+ mipi_dsi_msleep(dsi_ctx, 20);
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
- 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
+ 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
- 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
- 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
- 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
- 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
+ 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
+ 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
+ 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
+ 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
- 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
+ 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
- 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
- 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
+ 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
- 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
- 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
- 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
- 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
- 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
- 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
- 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
- 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
+ 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
+ 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
+ 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
+ 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
+ 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
+ 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
+ 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
+ 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet (C0H) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x73);
/* Set CABC control (C9h)*/
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCABC,
- 0x76, 0x00, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCABC,
+ 0x76, 0x00, 0x30);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet (D4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* 5.19.11 Set register bank (D8h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet (C6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode mchp_ac40t08a_mode = {
@@ -493,35 +481,31 @@ static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
int ret;
- ret = ctx->desc->init_sequence(ctx);
- if (ret) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->init_sequence(&dsi_ctx);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
/* Panel is operational 120 msec after reset */
msleep(120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to turn on the display: %d\n", ret);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
goto sleep_in;
- }
return 0;
sleep_in:
+ ret = dsi_ctx.accum_err;
+ dsi_ctx.accum_err = 0;
+
/* This will probably fail, but let's try orderly power off anyway. */
- if (!mipi_dsi_dcs_enter_sleep_mode(dsi))
- msleep(50);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
return ret;
}
@@ -530,17 +514,12 @@ static int hx8394_disable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- msleep(50); /* about 3 frames */
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50); /* about 3 frames */
- return 0;
+ return dsi_ctx.accum_err;
}
static int hx8394_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index 5d115ecd5dd4..b6429795e8f5 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -413,15 +413,10 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
static void panel_nv3051d_shutdown(struct mipi_dsi_device *dsi)
{
struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi);
- int ret;
- ret = drm_panel_unprepare(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
+ drm_panel_unprepare(&ctx->panel);
- ret = drm_panel_disable(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
+ drm_panel_disable(&ctx->panel);
}
static void panel_nv3051d_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 04f1d2676c78..116d67bfa114 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -23,10 +23,12 @@
#define DSI_NUM_MIN 1
-#define mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, cmd, seq...) \
- do { \
- mipi_dsi_dcs_write_seq(dsi0, cmd, seq); \
- mipi_dsi_dcs_write_seq(dsi1, cmd, seq); \
+#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \
+ do { \
+ dsi_ctx.dsi = dsi0; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
+ dsi_ctx.dsi = dsi1; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
} while (0)
struct panel_info {
@@ -67,868 +69,829 @@ static int elish_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd2, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x76, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x77, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x59);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdc, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdd, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe1, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe2, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf3, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf4, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x13, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x97, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x98, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x99, 0x95);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9b, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9c, 0x0b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9d, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9e, 0x90);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa3, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x60);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int elish_csot_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x55, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x4d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x96);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x5c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x1c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int j606f_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi = pinfo->dsi[0];
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0xd9);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x63);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0x95, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, 0x96, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x75, 0xa2);
- mipi_dsi_dcs_write_seq(dsi, 0x77, 0xb3);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x23);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x77);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x01, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x09, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x10, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x21, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x32);
- mipi_dsi_dcs_write_seq(dsi, 0x37, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x38, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x9a);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x42);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x43, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x47, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x9a);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x4c, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x43);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 18);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x52, 0x34);
- mipi_dsi_dcs_write_seq(dsi, 0x55, 0x82, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x3c);
- mipi_dsi_dcs_write_seq(dsi, 0x82, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x97, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xb6,
- 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
- 0x05, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x93, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x94, 0x5f);
- mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x55);
- mipi_dsi_dcs_write_seq(dsi, 0xda, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x90);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_GET_SCANLINE, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xf1, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x70, 0x30);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_START, 0xf3);
- mipi_dsi_dcs_write_seq(dsi, 0xa3, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa4, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa5, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0xa1);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0xf2);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x50);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x51);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x65);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x86);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0xbb);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x05);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0xc3);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x35, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0xca, 0x4e);
- mipi_dsi_dcs_write_seq(dsi, 0xcb, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
- mipi_dsi_dcs_write_seq(dsi, 0xaa, 0x47);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x53);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x01);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x60, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x63, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x66, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x78, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2f);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x08);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x5d);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x5d);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x91, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x60);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0x8d);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x75);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x02);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- usleep_range(10000, 11000);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x05, 0x01);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(100);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(30);
-
- return 0;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0xd9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x63);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x95, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x96, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xa2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x9a);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x42);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x9a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x43);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 18);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x34);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x82, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x3c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6,
+ 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x05, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x93, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xda, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x90);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_GET_SCANLINE, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_START, 0xf3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa3, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa4, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa5, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd6, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0xf2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x65);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0xc3);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc9, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xaa, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x27);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x08);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x5d);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x5d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x8d);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x75);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x02);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x05, 0x01);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode elish_boe_modes[] = {
@@ -1063,18 +1026,18 @@ static int nt36523_prepare(struct drm_panel *panel)
static int nt36523_disable(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
- int i, ret;
+ int i;
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_set_display_off(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to set display off: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
}
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to enter sleep mode: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
msleep(70);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt37801.c b/drivers/gpu/drm/panel/panel-novatek-nt37801.c
new file mode 100644
index 000000000000..84d367eab058
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt37801.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Linaro Limited
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/mipi_display.h>
+
+struct novatek_nt37801 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_config dsc;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data novatek_nt37801_supplies[] = {
+ { .supply = "vddio" },
+ { .supply = "vci" },
+ { .supply = "vdd" },
+};
+
+static inline struct novatek_nt37801 *to_novatek_nt37801(struct drm_panel *panel)
+{
+ return container_of(panel, struct novatek_nt37801, panel);
+}
+
+static void novatek_nt37801_reset(struct novatek_nt37801 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 21000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 21000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 21000);
+}
+
+#define NT37801_DCS_SWITCH_PAGE 0xf0
+
+#define novatek_nt37801_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi((dsi_ctx), NT37801_DCS_SWITCH_PAGE, \
+ 0x55, 0xaa, 0x52, 0x08, (page))
+
+static int novatek_nt37801_on(struct novatek_nt37801 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ novatek_nt37801_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc5, 0x0b, 0x0b, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf5, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x1b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0x00);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x059f);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0c7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x90, 0x03, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91,
+ 0x89, 0x28, 0x00, 0x28, 0xc2, 0x00, 0x02,
+ 0x68, 0x04, 0x6c, 0x00, 0x0a, 0x02, 0x77,
+ 0x01, 0xe9, 0x10, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb,
+ 0x00, 0x01, 0x00, 0x11, 0x33, 0x33, 0x33,
+ 0x55, 0x57, 0xd0, 0x00, 0x00, 0x44, 0x56,
+ 0x77, 0x78, 0x9a, 0xbc, 0xdd, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xdc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x00);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x00, 0x18, 0x00, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51,
+ 0x07, 0xff, 0x07, 0xff, 0x0f, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x00);
+
+ novatek_nt37801_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x55, 0x01, 0xff, 0x03);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int novatek_nt37801_off(struct novatek_nt37801 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int novatek_nt37801_prepare(struct drm_panel *panel)
+{
+ struct novatek_nt37801 *ctx = to_novatek_nt37801(panel);
+ struct device *dev = &ctx->dsi->dev;
+ struct drm_dsc_picture_parameter_set pps;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ novatek_nt37801_reset(ctx);
+
+ ret = novatek_nt37801_on(ctx);
+ if (ret < 0)
+ goto err;
+
+ drm_dsc_pps_payload_pack(&pps, &ctx->dsc);
+
+ ret = mipi_dsi_picture_parameter_set(ctx->dsi, &pps);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to transmit PPS: %d\n", ret);
+ goto err;
+ }
+
+ ret = mipi_dsi_compression_mode(ctx->dsi, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable compression mode: %d\n", ret);
+ goto err;
+ }
+
+ msleep(28);
+
+ return 0;
+
+err:
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+
+ return ret;
+}
+
+static int novatek_nt37801_unprepare(struct drm_panel *panel)
+{
+ struct novatek_nt37801 *ctx = to_novatek_nt37801(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = novatek_nt37801_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode novatek_nt37801_mode = {
+ .clock = (1440 + 20 + 4 + 20) * (3200 + 20 + 2 + 18) * 120 / 1000,
+ .hdisplay = 1440,
+ .hsync_start = 1440 + 20,
+ .hsync_end = 1440 + 20 + 4,
+ .htotal = 1440 + 20 + 4 + 20,
+ .vdisplay = 3200,
+ .vsync_start = 3200 + 20,
+ .vsync_end = 3200 + 20 + 2,
+ .vtotal = 3200 + 20 + 2 + 18,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int novatek_nt37801_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &novatek_nt37801_mode);
+}
+
+static const struct drm_panel_funcs novatek_nt37801_panel_funcs = {
+ .prepare = novatek_nt37801_prepare,
+ .unprepare = novatek_nt37801_unprepare,
+ .get_modes = novatek_nt37801_get_modes,
+};
+
+static int novatek_nt37801_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops novatek_nt37801_bl_ops = {
+ .update_status = novatek_nt37801_bl_update_status,
+};
+
+static struct backlight_device *
+novatek_nt37801_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 4095,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &novatek_nt37801_bl_ops, &props);
+}
+
+static int novatek_nt37801_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct novatek_nt37801 *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct novatek_nt37801, panel,
+ &novatek_nt37801_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(novatek_nt37801_supplies),
+ novatek_nt37801_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->panel.prepare_prev_first = true;
+ ctx->panel.backlight = novatek_nt37801_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ /* This panel only supports DSC; unconditionally enable it */
+ dsi->dsc = &ctx->dsc;
+ ctx->dsc.dsc_version_major = 1;
+ ctx->dsc.dsc_version_minor = 1;
+ ctx->dsc.slice_height = 40;
+ ctx->dsc.slice_width = 720;
+ ctx->dsc.slice_count = 1440 / ctx->dsc.slice_width;
+ ctx->dsc.bits_per_component = 8;
+ ctx->dsc.bits_per_pixel = 8 << 4; /* 4 fractional bits */
+ ctx->dsc.block_pred_enable = true;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void novatek_nt37801_remove(struct mipi_dsi_device *dsi)
+{
+ struct novatek_nt37801 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id novatek_nt37801_of_match[] = {
+ { .compatible = "novatek,nt37801" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, novatek_nt37801_of_match);
+
+static struct mipi_dsi_driver novatek_nt37801_driver = {
+ .probe = novatek_nt37801_probe,
+ .remove = novatek_nt37801_remove,
+ .driver = {
+ .name = "panel-novatek-nt37801",
+ .of_match_table = novatek_nt37801_of_match,
+ },
+};
+module_mipi_dsi_driver(novatek_nt37801_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("Panel driver for the Novatek NT37801/NT37810 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index f23d8832a1ad..93f11e2e9398 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -34,8 +34,8 @@ struct s6d7aa0 {
struct s6d7aa0_panel_desc {
unsigned int panel_type;
- int (*init_func)(struct s6d7aa0 *ctx);
- int (*off_func)(struct s6d7aa0 *ctx);
+ void (*init_func)(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx);
+ void (*off_func)(struct mipi_dsi_multi_context *dsi_ctx);
const struct drm_display_mode *drm_mode;
unsigned long mode_flags;
u32 bus_flags;
@@ -62,93 +62,61 @@ static void s6d7aa0_reset(struct s6d7aa0 *ctx)
msleep(50);
}
-static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
+static void s6d7aa0_lock(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx, bool lock)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
if (lock) {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0x5a, 0x5a);
} else {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0xa5, 0xa5);
}
-
- return 0;
}
static int s6d7aa0_on(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->init_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- return ret;
- }
+ ctx->desc->init_func(ctx, &dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
-static int s6d7aa0_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_off(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->off_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Panel-specific off function failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->off_func(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(64);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 64);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int s6d7aa0_prepare(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
s6d7aa0_reset(ctx);
ret = s6d7aa0_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -159,12 +127,8 @@ static int s6d7aa0_prepare(struct drm_panel *panel)
static int s6d7aa0_disable(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = s6d7aa0_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ s6d7aa0_off(ctx);
return 0;
}
@@ -185,13 +149,11 @@ static int s6d7aa0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
- return 0;
+ return dsi_ctx.accum_err;
}
static int s6d7aa0_bl_get_brightness(struct backlight_device *bl)
@@ -228,65 +190,39 @@ s6d7aa0_create_backlight(struct mipi_dsi_device *dsi)
/* Initialization code and structures for LSL080AL02 panel */
-static int s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- usleep_range(20000, 25000);
+ s6d7aa0_lock(ctx, dsi_ctx, false);
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MCS_OTP_RELOAD, 0x00, 0x10);
- usleep_range(1000, 1500);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_OTP_RELOAD, 0x00, 0x10);
+ mipi_dsi_usleep_range(dsi_ctx, 1000, 1500);
/* SEQ_B6_PARAM_8_R01 */
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x10);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0x10);
/* BL_CTL_ON */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x28);
-
- usleep_range(5000, 6000);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x28);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
+ mipi_dsi_usleep_range(dsi_ctx, 5000, 6000);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
- msleep(120);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ mipi_dsi_msleep(dsi_ctx, 120);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, true);
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al02_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
/* BL_CTL_OFF */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x20);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x20);
}
static const struct drm_display_mode s6d7aa0_lsl080al02_mode = {
@@ -317,79 +253,51 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
/* Initialization code and structures for LSL080AL03 panel */
-static int s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- usleep_range(20000, 25000);
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, false);
if (ctx->desc->panel_type == S6D7AA0_PANEL_LSL080AL03) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0xc7, 0x00, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x78);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0xc7, 0x00, 0x29);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x78);
} else if (ctx->desc->panel_type == S6D7AA0_PANEL_LTL101AT01) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0x0b);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x68);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x68);
}
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x51);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x02, 0x08, 0x08);
-
- usleep_range(10000, 11000);
-
- mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x80, 0x80, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xcd,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
- mipi_dsi_dcs_write_seq(dsi, 0xce,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x03);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x51);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xf2, 0x02, 0x08, 0x08);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
-
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ mipi_dsi_usleep_range(dsi_ctx, 10000, 11000);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x80, 0x80, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcd,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x03);
- return 0;
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
+ s6d7aa0_lock(ctx, dsi_ctx, true);
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al03_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x22, 0x00);
}
static const struct drm_display_mode s6d7aa0_lsl080al03_mode = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 04ce925b3d9d..d92ae6b6100f 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2020 Caleb Connolly <caleb@connolly.tech>
+/* Copyright (c) 2020 Casey Connolly <casey.connolly@linaro.org>
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
@@ -22,7 +22,6 @@ struct sofef00_panel {
struct mipi_dsi_device *dsi;
struct regulator *supply;
struct gpio_desc *reset_gpio;
- const struct drm_display_mode *mode;
};
static inline
@@ -44,66 +43,44 @@ static void sofef00_panel_reset(struct sofef00_panel *ctx)
static int sofef00_panel_on(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- usleep_range(10000, 11000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x12);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sofef00_panel_off(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(40);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 40);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(160);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 160);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sofef00_panel_prepare(struct drm_panel *panel)
@@ -122,7 +99,6 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
ret = sofef00_panel_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -133,13 +109,8 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
static int sofef00_panel_unprepare(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
-
- ret = sofef00_panel_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ sofef00_panel_off(ctx);
regulator_disable(ctx->supply);
return 0;
@@ -159,26 +130,11 @@ static const struct drm_display_mode enchilada_panel_mode = {
.height_mm = 145,
};
-static const struct drm_display_mode fajita_panel_mode = {
- .clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000,
- .hdisplay = 1080,
- .hsync_start = 1080 + 72,
- .hsync_end = 1080 + 72 + 16,
- .htotal = 1080 + 72 + 16 + 36,
- .vdisplay = 2340,
- .vsync_start = 2340 + 32,
- .vsync_end = 2340 + 32 + 4,
- .vtotal = 2340 + 32 + 4 + 18,
- .width_mm = 68,
- .height_mm = 145,
-};
-
static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
{
struct drm_display_mode *mode;
- struct sofef00_panel *ctx = to_sofef00_panel(panel);
- mode = drm_mode_duplicate(connector->dev, ctx->mode);
+ mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode);
if (!mode)
return -ENOMEM;
@@ -239,13 +195,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
- ctx->mode = of_device_get_match_data(dev);
-
- if (!ctx->mode) {
- dev_err(dev, "Missing device mode\n");
- return -ENODEV;
- }
-
ctx->supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(ctx->supply))
return dev_err_probe(dev, PTR_ERR(ctx->supply),
@@ -295,14 +244,7 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id sofef00_panel_of_match[] = {
- { // OnePlus 6 / enchilada
- .compatible = "samsung,sofef00",
- .data = &enchilada_panel_mode,
- },
- { // OnePlus 6T / fajita
- .compatible = "samsung,s6e3fc2x01",
- .data = &fajita_panel_mode,
- },
+ { .compatible = "samsung,sofef00" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sofef00_panel_of_match);
@@ -318,6 +260,6 @@ static struct mipi_dsi_driver sofef00_panel_driver = {
module_mipi_dsi_driver(sofef00_panel_driver);
-MODULE_AUTHOR("Caleb Connolly <caleb@connolly.tech>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 729cbb0d8403..36abfa2e65e9 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -36,60 +36,49 @@ static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
/* Novatek two-lane operation */
- ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xae, 0x03);
/* Set both MCU and RGB I/F to 24bpp */
- ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
- (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx,
+ MIPI_DCS_PIXEL_FMT_24BIT |
+ (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 33a37539de57..82ee2f12b8d2 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -579,9 +579,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
u32 bus_flags;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
+ &panel_simple_funcs, desc->connector_type);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->desc = desc;
@@ -694,8 +695,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type);
-
err = drm_panel_of_backlight(&panel->base);
if (err) {
dev_err_probe(dev, err, "Could not find backlight\n");
@@ -3528,6 +3527,30 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode nlt_nl13676bc25_03f_mode = {
+ .clock = 75400,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 14,
+ .hsync_end = 1366 + 14 + 56,
+ .htotal = 1366 + 14 + 56 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 1,
+ .vsync_end = 768 + 1 + 3,
+ .vtotal = 768 + 1 + 3 + 22,
+};
+
+static const struct panel_desc nlt_nl13676bc25_03f = {
+ .modes = &nlt_nl13676bc25_03f_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 363,
+ .height = 215,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing nlt_nl192108ac18_02d_timing = {
.pixelclock = { 130000000, 148350000, 163000000 },
.hactive = { 1920, 1920, 1920 },
@@ -3797,6 +3820,32 @@ static const struct panel_desc pda_91_00156_a0 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode powertip_ph128800t004_zza01_mode = {
+ .clock = 71150,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 9,
+ .vsync_end = 800 + 9 + 8,
+ .vtotal = 800 + 9 + 8 + 6,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc powertip_ph128800t004_zza01 = {
+ .modes = &powertip_ph128800t004_zza01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = {
.clock = 66500,
.hdisplay = 1280,
@@ -4394,10 +4443,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
};
/*
- * The datasheet computes total blanking as back porch + front porch, not
- * including sync pulse width. This is for both H and V. To make the total
- * blanking and period correct, subtract the pulse width from the front
- * porch.
+ * The TM070JDHG34-00 datasheet computes total blanking as back porch +
+ * front porch, not including sync pulse width. This is for both H and
+ * V. To make the total blanking and period correct, subtract the pulse
+ * width from the front porch.
*
* This works well for the Min and Typ values, but for Max values the sync
* pulse width is higher than back porch + front porch, so work around that
@@ -4406,6 +4455,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
*
* Exact datasheet values are added as a comment where they differ from the
* ones implemented for the above reason.
+ *
+ * The P0700WXF1MBAA datasheet is even less detailed, only listing period
+ * and total blanking time, however the resulting values are the same as
+ * the TM070JDHG34-00.
*/
static const struct display_timing tianma_tm070jdhg34_00_timing = {
.pixelclock = { 68400000, 71900000, 78100000 },
@@ -4428,6 +4481,30 @@ static const struct panel_desc tianma_tm070jdhg34_00 = {
.width = 150, /* 149.76 */
.height = 94, /* 93.60 */
},
+ .delay = {
+ .prepare = 15, /* Tp1 */
+ .enable = 150, /* Tp2 */
+ .disable = 150, /* Tp4 */
+ .unprepare = 120, /* Tp3 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct panel_desc tianma_p0700wxf1mbaa = {
+ .timings = &tianma_tm070jdhg34_00_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 150, /* 149.76 */
+ .height = 94, /* 93.60 */
+ },
+ .delay = {
+ .prepare = 18, /* Tr + Tp1 */
+ .enable = 152, /* Tp2 + Tp5 */
+ .disable = 152, /* Tp6 + Tp4 */
+ .unprepare = 120, /* Tp3 */
+ },
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -5122,6 +5199,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
+ .compatible = "nlt,nl13676bc25-03f",
+ .data = &nlt_nl13676bc25_03f,
+ }, {
.compatible = "nlt,nl192108ac18-02d",
.data = &nlt_nl192108ac18_02d,
}, {
@@ -5155,6 +5235,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
+ .compatible = "powertip,ph128800t004-zza01",
+ .data = &powertip_ph128800t004_zza01,
+ }, {
.compatible = "powertip,ph128800t006-zhc01",
.data = &powertip_ph128800t006_zhc01,
}, {
@@ -5215,6 +5298,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
+ .compatible = "tianma,p0700wxf1mbaa",
+ .data = &tianma_p0700wxf1mbaa,
+ }, {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index 17349825543f..b148e6cba9bd 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -106,53 +106,34 @@ static int r63353_panel_power_off(struct r63353_panel *rpanel)
static int r63353_panel_activate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int i, ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ int i;
- ret = mipi_dsi_dcs_soft_reset(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to do Software Reset (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_soft_reset_multi(&dsi_ctx);
- usleep_range(15000, 17000);
+ mipi_dsi_usleep_range(&dsi_ctx, 15000, 17000);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
for (i = 0; i < rpanel->pdata->init_length; i++) {
const struct r63353_instr *instr = &rpanel->pdata->init[i];
- ret = mipi_dsi_dcs_write_buffer(dsi, instr->data, instr->len);
- if (ret < 0)
- goto fail;
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, instr->data,
+ instr->len);
}
- msleep(120);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_msleep(&dsi_ctx, 120);
- usleep_range(5000, 10000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display ON (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
-fail:
- gpiod_set_value(rpanel->reset_gpio, 0);
+ if (dsi_ctx.accum_err)
+ gpiod_set_value(rpanel->reset_gpio, 0);
- return ret;
+ return dsi_ctx.accum_err;
}
static int r63353_panel_prepare(struct drm_panel *panel)
@@ -178,27 +159,16 @@ static int r63353_panel_prepare(struct drm_panel *panel)
return 0;
}
-static int r63353_panel_deactivate(struct r63353_panel *rpanel)
+static void r63353_panel_deactivate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display OFF (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- usleep_range(5000, 10000);
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- return ret;
- }
-
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
static int r63353_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 4dbf8b88f264..11d460d2ea19 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,11 +86,7 @@ struct td028ttec1_panel {
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
-/*
- * noinline_for_stack so we don't get multiple copies of tx_buf
- * on the stack in case of gcc-plugin-structleak
- */
-static int noinline_for_stack
+static int
jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
{
struct spi_device *spi = lcd->spi;
diff --git a/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
new file mode 100644
index 000000000000..413849f7b4de
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Alexander Baransky <sanyapilot496@gmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct visionox_g2647fb105 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data visionox_g2647fb105_supplies[] = {
+ { .supply = "vdd3p3" },
+ { .supply = "vddio" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline
+struct visionox_g2647fb105 *to_visionox_g2647fb105(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_g2647fb105, panel);
+}
+
+static void visionox_g2647fb105_reset(struct visionox_g2647fb105 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int visionox_g2647fb105_on(struct visionox_g2647fb105 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbe, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbf, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0xdd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_g2647fb105_off(struct visionox_g2647fb105 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_g2647fb105_prepare(struct drm_panel *panel)
+{
+ struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ visionox_g2647fb105_reset(ctx);
+
+ ret = visionox_g2647fb105_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int visionox_g2647fb105_unprepare(struct drm_panel *panel)
+{
+ struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = visionox_g2647fb105_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode visionox_g2647fb105_mode = {
+ .clock = (1080 + 28 + 4 + 36) * (2340 + 8 + 4 + 4) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 28,
+ .hsync_end = 1080 + 28 + 4,
+ .htotal = 1080 + 28 + 4 + 36,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 8,
+ .vsync_end = 2340 + 8 + 4,
+ .vtotal = 2340 + 8 + 4 + 4,
+ .width_mm = 69,
+ .height_mm = 149,
+};
+
+static int visionox_g2647fb105_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &visionox_g2647fb105_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_g2647fb105_panel_funcs = {
+ .prepare = visionox_g2647fb105_prepare,
+ .unprepare = visionox_g2647fb105_unprepare,
+ .get_modes = visionox_g2647fb105_get_modes,
+};
+
+static int visionox_g2647fb105_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops visionox_g2647fb105_bl_ops = {
+ .update_status = visionox_g2647fb105_bl_update_status,
+};
+
+static struct backlight_device *
+visionox_g2647fb105_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 1023,
+ .max_brightness = 2047,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &visionox_g2647fb105_bl_ops, &props);
+}
+
+static int visionox_g2647fb105_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_g2647fb105 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(visionox_g2647fb105_supplies),
+ visionox_g2647fb105_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ctx->panel.prepare_prev_first = true;
+
+ drm_panel_init(&ctx->panel, dev, &visionox_g2647fb105_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = visionox_g2647fb105_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void visionox_g2647fb105_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_g2647fb105 *ctx = mipi_dsi_get_drvdata(dsi);
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_g2647fb105_of_match[] = {
+ { .compatible = "visionox,g2647fb105" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_g2647fb105_of_match);
+
+static struct mipi_dsi_driver visionox_g2647fb105_driver = {
+ .probe = visionox_g2647fb105_probe,
+ .remove = visionox_g2647fb105_remove,
+ .driver = {
+ .name = "panel-visionox-g2647fb105",
+ .of_match_table = visionox_g2647fb105_of_match,
+ },
+};
+module_mipi_dsi_driver(visionox_g2647fb105_driver);
+
+MODULE_AUTHOR("Alexander Baransky <sanyapilot496@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Visionox G2647FB105 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index a45e4addcc19..5d35076b2e6d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -209,10 +209,20 @@ int panfrost_device_init(struct panfrost_device *pfdev)
spin_lock_init(&pfdev->cycle_counter.lock);
+ err = panfrost_pm_domain_init(pfdev);
+ if (err)
+ return err;
+
+ err = panfrost_reset_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "reset init failed %d\n", err);
+ goto out_pm_domain;
+ }
+
err = panfrost_clk_init(pfdev);
if (err) {
dev_err(pfdev->dev, "clk init failed %d\n", err);
- return err;
+ goto out_reset;
}
err = panfrost_devfreq_init(pfdev);
@@ -229,25 +239,15 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto out_devfreq;
}
- err = panfrost_reset_init(pfdev);
- if (err) {
- dev_err(pfdev->dev, "reset init failed %d\n", err);
- goto out_regulator;
- }
-
- err = panfrost_pm_domain_init(pfdev);
- if (err)
- goto out_reset;
-
pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
- goto out_pm_domain;
+ goto out_regulator;
}
err = panfrost_gpu_init(pfdev);
if (err)
- goto out_pm_domain;
+ goto out_regulator;
err = panfrost_mmu_init(pfdev);
if (err)
@@ -268,16 +268,16 @@ out_mmu:
panfrost_mmu_fini(pfdev);
out_gpu:
panfrost_gpu_fini(pfdev);
-out_pm_domain:
- panfrost_pm_domain_fini(pfdev);
-out_reset:
- panfrost_reset_fini(pfdev);
out_regulator:
panfrost_regulator_fini(pfdev);
out_devfreq:
panfrost_devfreq_fini(pfdev);
out_clk:
panfrost_clk_fini(pfdev);
+out_reset:
+ panfrost_reset_fini(pfdev);
+out_pm_domain:
+ panfrost_pm_domain_fini(pfdev);
return err;
}
@@ -287,11 +287,11 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
panfrost_job_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
- panfrost_pm_domain_fini(pfdev);
- panfrost_reset_fini(pfdev);
panfrost_devfreq_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
+ panfrost_reset_fini(pfdev);
+ panfrost_pm_domain_fini(pfdev);
}
#define PANFROST_EXCEPTION(id) \
@@ -406,11 +406,36 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
static int panfrost_device_runtime_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
+ ret = reset_control_deassert(pfdev->rstc);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(pfdev->clock);
+ if (ret)
+ goto err_clk;
+
+ if (pfdev->bus_clock) {
+ ret = clk_enable(pfdev->bus_clock);
+ if (ret)
+ goto err_bus_clk;
+ }
+ }
panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
return 0;
+
+err_bus_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
+ clk_disable(pfdev->clock);
+err_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
+ reset_control_assert(pfdev->rstc);
+ return ret;
}
static int panfrost_device_runtime_suspend(struct device *dev)
@@ -426,6 +451,14 @@ static int panfrost_device_runtime_suspend(struct device *dev)
panfrost_gpu_suspend_irq(pfdev);
panfrost_gpu_power_off(pfdev);
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
+ if (pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+
+ clk_disable(pfdev->clock);
+ reset_control_assert(pfdev->rstc);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index cffcb0ac7c11..dcff70f905cd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -36,10 +36,21 @@ enum panfrost_drv_comp_bits {
* enum panfrost_gpu_pm - Supported kernel power management features
* @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend
* @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend
+ * @GPU_PM_RT: Allow disabling clocks and asserting the reset control during
+ * system runtime suspend
*/
enum panfrost_gpu_pm {
GPU_PM_CLK_DIS,
GPU_PM_VREG_OFF,
+ GPU_PM_RT
+};
+
+/**
+ * enum panfrost_gpu_quirks - GPU optional quirks
+ * @GPU_QUIRK_FORCE_AARCH64_PGTABLE: Use AARCH64_4K page table format
+ */
+enum panfrost_gpu_quirks {
+ GPU_QUIRK_FORCE_AARCH64_PGTABLE,
};
struct panfrost_features {
@@ -95,6 +106,9 @@ struct panfrost_compatible {
/* Allowed PM features */
u8 pm_features;
+
+ /* GPU configuration quirks */
+ u8 gpu_quirks;
};
struct panfrost_device {
@@ -162,6 +176,11 @@ struct panfrost_mmu {
int as;
atomic_t as_count;
struct list_head list;
+ struct {
+ u64 transtab;
+ u64 memattr;
+ u64 transcfg;
+ } cfg;
};
struct panfrost_engine_usage {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 0f3935556ac7..f1ec3b02f15a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -476,7 +476,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
}
}
- args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
+ args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv);
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
@@ -776,6 +776,13 @@ static const struct panfrost_compatible default_data = {
.pm_domain_names = NULL,
};
+static const struct panfrost_compatible allwinner_h616_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 1,
+ .pm_features = BIT(GPU_PM_RT),
+};
+
static const struct panfrost_compatible amlogic_data = {
.num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
@@ -824,6 +831,7 @@ static const struct panfrost_compatible mediatek_mt8188_data = {
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
@@ -835,6 +843,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = {
.num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
.pm_domain_names = mediatek_mt8192_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
static const struct of_device_id dt_match[] = {
@@ -859,6 +868,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
{ .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data },
{ .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
+ { .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index 47751302f1bc..4042afe2fbf4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job)
goto dump_header;
}
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
@@ -228,7 +228,7 @@ void panfrost_core_dump(struct panfrost_job *job)
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
iter.hdr->bomap.valid = 1;
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
index 7ed0cd3ea2d4..52f9d69f6db9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_features.h
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -54,6 +54,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g72 (\
@@ -64,6 +65,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g51 hw_features_g72
@@ -77,6 +79,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g76 (\
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 8e0ff3efede7..963f04ba2de6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.base.import_attach || bo->base.pages)
+ if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
if (bo->base.madv == PANFROST_MADV_DONTNEED)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 3d9f51bd48b6..02b60ea1433a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -51,7 +51,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo);
- drm_gem_shmem_purge(&bo->base);
+ drm_gem_shmem_purge_locked(&bo->base);
ret = true;
dma_resv_unlock(shmem->base.resv);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index b91019cd5acb..f6b91c052cfb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -26,6 +26,48 @@
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
+static u64 mair_to_memattr(u64 mair, bool coherent)
+{
+ u64 memattr = 0;
+ u32 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 in_attr = mair >> (8 * i), out_attr;
+ u8 outer = in_attr >> 4, inner = in_attr & 0xf;
+
+ /* For caching to be enabled, inner and outer caching policy
+ * have to be both write-back, if one of them is write-through
+ * or non-cacheable, we just choose non-cacheable. Device
+ * memory is also translated to non-cacheable.
+ */
+ if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
+ AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
+ } else {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ /* Use SH_MIDGARD_INNER mode when device isn't coherent,
+ * so SH_IS, which is used when IOMMU_CACHE is set, maps
+ * to Mali's internal-shareable mode. As per the Mali
+ * Spec, inner and outer-shareable modes aren't allowed
+ * for WB memory when coherency is disabled.
+ * Use SH_CPU_INNER mode when coherency is enabled, so
+ * that SH_IS actually maps to the standard definition of
+ * inner-shareable.
+ */
+ if (!coherent)
+ out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
+ else
+ out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
+ }
+
+ memattr |= (u64)out_attr << (8 * i);
+ }
+
+ return memattr;
+}
+
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
@@ -124,9 +166,9 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev,
static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as_nr = mmu->as;
- struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
- u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
- u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
+ u64 transtab = mmu->cfg.transtab;
+ u64 memattr = mmu->cfg.memattr;
+ u64 transcfg = mmu->cfg.transcfg;
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
@@ -139,6 +181,9 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -152,9 +197,67 @@ static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0);
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
+static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+
+ /* TODO: The following fields are duplicated between the MMU and Page
+ * Table config structs. Ideally, should be kept in one place.
+ */
+ mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
+ mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
+ mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
+
+ return 0;
+}
+
+static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
+ ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK))
+ return -EINVAL;
+
+ mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+
+ mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair,
+ pgtbl_cfg->coherent_walk);
+
+ mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
+ AS_TRANSCFG_PTW_RA |
+ AS_TRANSCFG_ADRMODE_AARCH64_4K |
+ AS_TRANSCFG_INA_BITS(55 - pgtbl_cfg->ias);
+ if (pgtbl_cfg->coherent_walk)
+ mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS;
+
+ return 0;
+}
+
+static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
+ enum io_pgtable_fmt fmt)
+{
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ switch (fmt) {
+ case ARM_64_LPAE_S1:
+ return mmu_cfg_init_aarch64_4k(mmu);
+ case ARM_MALI_LPAE:
+ return mmu_cfg_init_mali_lpae(mmu);
+ default:
+ /* This should never happen */
+ drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
+ return -EINVAL;
+ }
+}
+
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as;
@@ -327,7 +430,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
struct drm_gem_object *obj = &shmem->base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
- int prot = IOMMU_READ | IOMMU_WRITE;
+ int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
if (WARN_ON(mapping->active))
return 0;
@@ -489,7 +592,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_unlock;
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
} else {
pages = bo->base.pages;
if (pages[page_offset]) {
@@ -528,7 +631,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map;
mmu_map_sg(pfdev, bomapping->mmu, addr,
- IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
bomapping->active = true;
bo->heap_rss_size += SZ_2M;
@@ -615,7 +718,22 @@ static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features);
+ u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features);
struct panfrost_mmu *mmu;
+ enum io_pgtable_fmt fmt;
+ int ret;
+
+ if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
+ if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
+ dev_err_once(pfdev->dev,
+ "AARCH64_4K page table not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+ fmt = ARM_64_LPAE_S1;
+ } else {
+ fmt = ARM_MALI_LPAE;
+ }
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
@@ -633,23 +751,33 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = SZ_4K | SZ_2M,
- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .ias = va_bits,
+ .oas = pa_bits,
.coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
- mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
- mmu);
+ mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
if (!mmu->pgtbl_ops) {
- kfree(mmu);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto err_free_mmu;
}
+ ret = panfrost_mmu_cfg_init(mmu, fmt);
+ if (ret)
+ goto err_free_io_pgtable;
+
kref_init(&mmu->refcount);
return mmu;
+
+err_free_io_pgtable:
+ free_io_pgtable_ops(mmu->pgtbl_ops);
+
+err_free_mmu:
+ kfree(mmu);
+ return ERR_PTR(ret);
}
static const char *access_type_name(struct panfrost_device *pfdev,
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index ba9b6e2b2636..52befead08c6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_close_bo;
}
- ret = drm_gem_vmap_unlocked(&bo->base, &map);
+ ret = drm_gem_vmap(&bo->base, &map);
if (ret)
goto err_put_mapping;
perfcnt->buf = map.vaddr;
@@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
return 0;
err_vunmap:
- drm_gem_vunmap_unlocked(&bo->base, &map);
+ drm_gem_vunmap(&bo->base, &map);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
@@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
+ drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index c7bba476ab3f..2b8f1617b836 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -16,6 +16,8 @@
#define GROUPS_L2_COHERENT BIT(0) /* Cores groups are l2 coherent */
#define GPU_MMU_FEATURES 0x014 /* (RO) MMU features */
+#define GPU_MMU_FEATURES_VA_BITS(x) ((x) & GENMASK(7, 0))
+#define GPU_MMU_FEATURES_PA_BITS(x) (((x) >> 8) & GENMASK(7, 0))
#define GPU_AS_PRESENT 0x018 /* (RO) Address space slots present */
#define GPU_JS_PRESENT 0x01C /* (RO) Job slots present */
@@ -299,6 +301,17 @@
#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x08) /* (RW) Memory attributes for address space n, low word. */
#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0x0C) /* (RW) Memory attributes for address space n, high word. */
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
+ ((w) ? BIT(0) : 0) | \
+ ((r) ? BIT(1) : 0))
+#define AS_MEMATTR_AARCH64_SH_MIDGARD_INNER (0 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER (1 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER_SHADER_COH (2 << 4)
+#define AS_MEMATTR_AARCH64_SHARED (0 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
+#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10) /* (RW) Lock region address for address space n, low word */
#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14) /* (RW) Lock region address for address space n, high word */
#define AS_COMMAND(as) (MMU_AS(as) + 0x18) /* (WO) MMU command register for address space n */
@@ -309,6 +322,24 @@
/* Additional Bifrost AS registers */
#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30) /* (RW) Translation table configuration for address space n, low word */
#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34) /* (RW) Translation table configuration for address space n, high word */
+#define AS_TRANSCFG_ADRMODE_LEGACY (0 << 0)
+#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
+#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K (8 << 0)
+#define AS_TRANSCFG_INA_BITS(x) ((x) << 6)
+#define AS_TRANSCFG_OUTA_BITS(x) ((x) << 14)
+#define AS_TRANSCFG_SL_CONCAT BIT(22)
+#define AS_TRANSCFG_PTW_MEMATTR_NC (1 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WB (2 << 24)
+#define AS_TRANSCFG_PTW_SH_NS (0 << 28)
+#define AS_TRANSCFG_PTW_SH_OS (2 << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3 << 28)
+#define AS_TRANSCFG_PTW_RA BIT(30)
+#define AS_TRANSCFG_DISABLE_HIER_AP BIT(33)
+#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
+#define AS_TRANSCFG_WXN BIT(35)
+#define AS_TRANSCFG_XREADABLE BIT(36)
#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
@@ -324,6 +355,11 @@
#define AS_TRANSTAB_LPAE_READ_INNER BIT(2)
#define AS_TRANSTAB_LPAE_SHARE_OUTER BIT(4)
+/*
+ * Begin AARCH64_4K MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_AARCH64_4K_ADDR_MASK 0xfffffffffffffff0
+
#define AS_STATUS_AS_ACTIVE 0x01
#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index a9da1d1eeb70..f0b2da5b2b96 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -171,10 +171,6 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
- ret = panthor_gpu_coherency_init(ptdev);
- if (ret)
- return ret;
-
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
if (ret)
@@ -184,6 +180,11 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
return ret;
+#ifdef CONFIG_DEBUG_FS
+ drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
+ INIT_LIST_HEAD(&ptdev->gems.node);
+#endif
+
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
p = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!p)
@@ -247,6 +248,10 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
goto err_rpm_put;
+ ret = panthor_gpu_coherency_init(ptdev);
+ if (ret)
+ goto err_unplug_gpu;
+
ret = panthor_mmu_init(ptdev);
if (ret)
goto err_unplug_gpu;
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index da6574021664..465d3ab1b79e 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -205,6 +205,17 @@ struct panthor_device {
/** @fast_rate: Maximum device clock frequency. Set by DVFS */
unsigned long fast_rate;
+
+#ifdef CONFIG_DEBUG_FS
+ /** @gems: Device-wide list of GEM objects owned by at least one file. */
+ struct {
+ /** @gems.lock: Protects the device-wide list of GEM objects. */
+ struct mutex lock;
+
+ /** @node: Used to keep track of all the device's DRM objects */
+ struct list_head node;
+ } gems;
+#endif
};
struct panthor_gpu_usage {
@@ -383,8 +394,6 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da
if (!status) \
break; \
\
- gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status); \
- \
__handler(ptdev, status); \
ret = IRQ_HANDLED; \
} \
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 06fe46e32073..6200cad22563 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -940,6 +940,7 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct drm_panthor_bo_mmap_offset *args = data;
+ struct panthor_gem_object *bo;
struct drm_gem_object *obj;
int ret;
@@ -950,6 +951,12 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
if (!obj)
return -ENOENT;
+ bo = to_panthor_bo(obj);
+ if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) {
+ ret = -EPERM;
+ goto out;
+ }
+
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
@@ -1331,6 +1338,46 @@ static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
return 0;
}
+static int panthor_ioctl_bo_set_label(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panthor_bo_set_label *args = data;
+ struct drm_gem_object *obj;
+ const char *label = NULL;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->label) {
+ label = strndup_user((const char __user *)(uintptr_t)args->label,
+ PANTHOR_BO_LABEL_MAXLEN);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ if (ret == -EINVAL)
+ ret = -E2BIG;
+ goto err_put_obj;
+ }
+ }
+
+ /*
+ * We treat passing a label of length 0 and passing a NULL label
+ * differently, because even though they might seem conceptually
+ * similar, future uses of the BO label might expect a different
+ * behaviour in each case.
+ */
+ panthor_gem_bo_set_label(obj, label);
+
+err_put_obj:
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
static int
panthor_open(struct drm_device *ddev, struct drm_file *file)
{
@@ -1400,6 +1447,7 @@ static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
};
static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1496,9 +1544,34 @@ static const struct file_operations panthor_drm_driver_fops = {
};
#ifdef CONFIG_DEBUG_FS
+static int panthor_gems_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
+
+ panthor_gem_debugfs_print_bos(ptdev, m);
+
+ return 0;
+}
+
+static struct drm_info_list panthor_debugfs_list[] = {
+ {"gems", panthor_gems_show, 0, NULL},
+};
+
+static int panthor_gems_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_debugfs_list,
+ ARRAY_SIZE(panthor_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+
static void panthor_debugfs_init(struct drm_minor *minor)
{
panthor_mmu_debugfs_init(minor);
+ panthor_gems_debugfs_init(minor);
}
#endif
@@ -1509,6 +1582,7 @@ static void panthor_debugfs_init(struct drm_minor *minor)
* - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
* - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
* - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
+ * - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
@@ -1522,7 +1596,7 @@ static const struct drm_driver panthor_drm_driver = {
.name = "panthor",
.desc = "Panthor DRM driver",
.major = 1,
- .minor = 3,
+ .minor = 4,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 0f52766a3120..7bc38e635329 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -449,7 +449,8 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Queue FW interface");
if (IS_ERR(mem))
return mem;
@@ -481,7 +482,8 @@ panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size)
return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "FW suspend buffer");
}
static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
@@ -601,7 +603,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
section_size,
DRM_PANTHOR_BO_NO_MMAP,
- vm_map_flags, va);
+ vm_map_flags, va, "FW section");
if (IS_ERR(section->mem))
return PTR_ERR(section->mem);
@@ -1008,6 +1010,8 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev)
static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status)
{
+ gpu_write(ptdev, JOB_INT_CLEAR, status);
+
if (!ptdev->fw->booted && (status & JOB_INT_GLOBAL_IF))
ptdev->fw->booted = true;
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 8244a4e6c2a2..7c00fd77758b 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -2,6 +2,7 @@
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
+#include <linux/cleanup.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -10,14 +11,64 @@
#include <drm/panthor_drm.h>
#include "panthor_device.h"
+#include "panthor_fw.h"
#include "panthor_gem.h"
#include "panthor_mmu.h"
+#ifdef CONFIG_DEBUG_FS
+static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
+ struct panthor_gem_object *bo)
+{
+ INIT_LIST_HEAD(&bo->debugfs.node);
+
+ bo->debugfs.creator.tgid = current->group_leader->pid;
+ get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
+
+ mutex_lock(&ptdev->gems.lock);
+ list_add_tail(&bo->debugfs.node, &ptdev->gems.node);
+ mutex_unlock(&ptdev->gems.lock);
+}
+
+static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo)
+{
+ struct panthor_device *ptdev = container_of(bo->base.base.dev,
+ struct panthor_device, base);
+
+ if (list_empty(&bo->debugfs.node))
+ return;
+
+ mutex_lock(&ptdev->gems.lock);
+ list_del_init(&bo->debugfs.node);
+ mutex_unlock(&ptdev->gems.lock);
+}
+
+static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags)
+{
+ bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+}
+#else
+static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
+ struct panthor_gem_object *bo)
+{}
+static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {}
+static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {}
+#endif
+
static void panthor_gem_free_object(struct drm_gem_object *obj)
{
struct panthor_gem_object *bo = to_panthor_bo(obj);
struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
+ panthor_gem_debugfs_bo_rm(bo);
+
+ /*
+ * Label might have been allocated with kstrdup_const(),
+ * we need to take that into account when freeing the memory
+ */
+ kfree_const(bo->label.str);
+
+ mutex_destroy(&bo->label.lock);
+
drm_gem_free_mmap_offset(&bo->base.base);
mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
@@ -67,17 +118,19 @@ out_free_bo:
* @gpu_va: GPU address assigned when mapping to the VM.
* If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
* automatically allocated.
+ * @name: Descriptive label of the BO's contents
*
* Return: A valid pointer in case of success, an ERR_PTR() otherwise.
*/
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va)
+ u64 gpu_va, const char *name)
{
struct drm_gem_shmem_object *obj;
struct panthor_kernel_bo *kbo;
struct panthor_gem_object *bo;
+ u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL;
int ret;
if (drm_WARN_ON(&ptdev->base, !vm))
@@ -97,6 +150,12 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ if (vm == panthor_fw_vm(ptdev))
+ debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED;
+
+ panthor_gem_kernel_bo_set_label(kbo, name);
+ panthor_gem_debugfs_set_usage_flags(to_panthor_bo(kbo->obj), debug_flags);
+
/* The system and GPU MMU page size might differ, which becomes a
* problem for FW sections that need to be mapped at explicit address
* since our PAGE_SIZE alignment might cover a VA range that's
@@ -129,17 +188,6 @@ err_free_bo:
return ERR_PTR(ret);
}
-static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- struct panthor_gem_object *bo = to_panthor_bo(obj);
-
- /* Don't allow mmap on objects that have the NO_MMAP flag set. */
- if (bo->flags & DRM_PANTHOR_BO_NO_MMAP)
- return -EINVAL;
-
- return drm_gem_shmem_object_mmap(obj, vma);
-}
-
static struct dma_buf *
panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
{
@@ -155,7 +203,7 @@ static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj)
struct panthor_gem_object *bo = to_panthor_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.base.import_attach || bo->base.pages)
+ if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
return res;
@@ -169,7 +217,7 @@ static const struct drm_gem_object_funcs panthor_gem_funcs = {
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
- .mmap = panthor_gem_mmap,
+ .mmap = drm_gem_shmem_object_mmap,
.status = panthor_gem_status,
.export = panthor_gem_prime_export,
.vm_ops = &drm_gem_shmem_vm_ops,
@@ -196,6 +244,9 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.map_wc = !ptdev->coherent;
mutex_init(&obj->gpuva_list_lock);
drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
+ mutex_init(&obj->label.lock);
+
+ panthor_gem_debugfs_bo_add(ptdev, obj);
return &obj->base.base;
}
@@ -245,5 +296,153 @@ panthor_gem_create_with_handle(struct drm_file *file,
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
+ /*
+ * No explicit flags are needed in the call below, since the
+ * function internally sets the INITIALIZED bit for us.
+ */
+ panthor_gem_debugfs_set_usage_flags(bo, 0);
+
return ret;
}
+
+void
+panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(obj);
+ const char *old_label;
+
+ scoped_guard(mutex, &bo->label.lock) {
+ old_label = bo->label.str;
+ bo->label.str = label;
+ }
+
+ kfree_const(old_label);
+}
+
+void
+panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label)
+{
+ const char *str;
+
+ /* We should never attempt labelling a UM-exposed GEM object */
+ if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0))
+ return;
+
+ if (!label)
+ return;
+
+ str = kstrdup_const(label, GFP_KERNEL);
+ if (!str) {
+ /* Failing to allocate memory for a label isn't a fatal condition */
+ drm_warn(bo->obj->dev, "Not enough memory to allocate BO label");
+ return;
+ }
+
+ panthor_gem_bo_set_label(bo->obj, str);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct gem_size_totals {
+ size_t size;
+ size_t resident;
+ size_t reclaimable;
+};
+
+static void panthor_gem_debugfs_print_flag_names(struct seq_file *m)
+{
+ int len;
+ int i;
+
+ static const char * const gem_state_flags_names[] = {
+ [PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported",
+ [PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported",
+ };
+
+ static const char * const gem_usage_flags_names[] = {
+ [PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel",
+ [PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped",
+ };
+
+ seq_puts(m, "GEM state flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
+ if (!gem_state_flags_names[i])
+ continue;
+ seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i],
+ (u32)BIT(i), (i < len - 1) ? ", " : "\n");
+ }
+
+ seq_puts(m, "GEM usage flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) {
+ if (!gem_usage_flags_names[i])
+ continue;
+ seq_printf(m, "%s (0x%x)%s", gem_usage_flags_names[i],
+ (u32)BIT(i), (i < len - 1) ? ", " : "\n\n");
+ }
+}
+
+static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo,
+ struct seq_file *m,
+ struct gem_size_totals *totals)
+{
+ unsigned int refcount = kref_read(&bo->base.base.refcount);
+ char creator_info[32] = {};
+ size_t resident_size;
+ u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+ u32 gem_state_flags = 0;
+
+ /* Skip BOs being destroyed. */
+ if (!refcount)
+ return;
+
+ resident_size = bo->base.pages ? bo->base.base.size : 0;
+
+ snprintf(creator_info, sizeof(creator_info),
+ "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
+ seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
+ creator_info,
+ bo->base.base.name,
+ refcount,
+ bo->base.base.size,
+ resident_size,
+ drm_vma_node_start(&bo->base.base.vma_node));
+
+ if (bo->base.base.import_attach)
+ gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
+ if (bo->base.base.dma_buf)
+ gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
+
+ seq_printf(m, "0x%-8x 0x%-10x", gem_state_flags, gem_usage_flags);
+
+ scoped_guard(mutex, &bo->label.lock) {
+ seq_printf(m, "%s\n", bo->label.str ? : "");
+ }
+
+ totals->size += bo->base.base.size;
+ totals->resident += resident_size;
+ if (bo->base.madv > 0)
+ totals->reclaimable += resident_size;
+}
+
+void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev,
+ struct seq_file *m)
+{
+ struct gem_size_totals totals = {0};
+ struct panthor_gem_object *bo;
+
+ panthor_gem_debugfs_print_flag_names(m);
+
+ seq_puts(m, "created-by global-name refcount size resident-size file-offset state usage label\n");
+ seq_puts(m, "----------------------------------------------------------------------------------------------------------------------------------------------\n");
+
+ scoped_guard(mutex, &ptdev->gems.lock) {
+ list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) {
+ if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED)
+ panthor_gem_debugfs_bo_print(bo, m, &totals);
+ }
+ }
+
+ seq_puts(m, "==============================================================================================================================================\n");
+ seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
+ totals.size, totals.resident, totals.reclaimable);
+}
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 5749ef2ebe03..4dd732dcd59f 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -13,6 +13,56 @@
struct panthor_vm;
+#define PANTHOR_BO_LABEL_MAXLEN 4096
+
+enum panthor_debugfs_gem_state_flags {
+ PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT = 0,
+ PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT = 1,
+
+ /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED: GEM BO is PRIME imported. */
+ PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED: GEM BO is PRIME exported. */
+ PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT),
+};
+
+enum panthor_debugfs_gem_usage_flags {
+ PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT = 0,
+ PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT = 1,
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL: BO is for kernel use only. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL = BIT(PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED: BO is mapped on the FW VM. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED = BIT(PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED: BO is ready for DebugFS display. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED = BIT(31),
+};
+
+/**
+ * struct panthor_gem_debugfs - GEM object's DebugFS list information
+ */
+struct panthor_gem_debugfs {
+ /**
+ * @node: Node used to insert the object in the device-wide list of
+ * GEM objects, to display information about it through a DebugFS file.
+ */
+ struct list_head node;
+
+ /** @creator: Information about the UM process which created the GEM. */
+ struct {
+ /** @creator.process_name: Group leader name in owning thread's process */
+ char process_name[TASK_COMM_LEN];
+
+ /** @creator.tgid: PID of the thread's group leader within its process */
+ pid_t tgid;
+ } creator;
+
+ /** @flags: Combination of panthor_debugfs_gem_usage_flags flags */
+ u32 flags;
+};
+
/**
* struct panthor_gem_object - Driver specific GEM object.
*/
@@ -46,6 +96,24 @@ struct panthor_gem_object {
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
+
+ /**
+ * @label: BO tagging fields. The label can be assigned within the
+ * driver itself or through a specific IOCTL.
+ */
+ struct {
+ /**
+ * @label.str: Pointer to NULL-terminated string,
+ */
+ const char *str;
+
+ /** @lock.str: Protects access to the @label.str field. */
+ struct mutex lock;
+ } label;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panthor_gem_debugfs debugfs;
+#endif
};
/**
@@ -91,6 +159,9 @@ panthor_gem_create_with_handle(struct drm_file *file,
struct panthor_vm *exclusive_vm,
u64 *size, u32 flags, uint32_t *handle);
+void panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label);
+void panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label);
+
static inline u64
panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
{
@@ -112,7 +183,7 @@ panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
if (bo->kmap)
return 0;
- ret = drm_gem_vmap_unlocked(bo->obj, &map);
+ ret = drm_gem_vmap(bo->obj, &map);
if (ret)
return ret;
@@ -126,7 +197,7 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
if (bo->kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
- drm_gem_vunmap_unlocked(bo->obj, &map);
+ drm_gem_vunmap(bo->obj, &map);
bo->kmap = NULL;
}
}
@@ -134,8 +205,13 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va);
+ u64 gpu_va, const char *name);
void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
+#ifdef CONFIG_DEBUG_FS
+void panthor_gem_debugfs_print_bos(struct panthor_device *pfdev,
+ struct seq_file *m);
+#endif
+
#endif /* __PANTHOR_GEM_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 671049020afa..32d678a0114e 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -150,6 +150,8 @@ static void panthor_gpu_init_info(struct panthor_device *ptdev)
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
+ gpu_write(ptdev, GPU_INT_CLEAR, status);
+
if (status & GPU_IRQ_FAULT) {
u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index 3bdf61c14264..d236e9ceade4 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -151,7 +151,8 @@ static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool,
chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Tiler heap chunk");
if (IS_ERR(chunk->bo)) {
ret = PTR_ERR(chunk->bo);
goto err_free_chunk;
@@ -555,7 +556,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Heap pool");
if (IS_ERR(pool->gpu_contexts)) {
ret = PTR_ERR(pool->gpu_contexts);
goto err_destroy_pool;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 12a02e28f50f..6ca9a2642a4e 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -781,6 +781,7 @@ out_enable_as:
if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
+ ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as);
gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
}
@@ -1103,7 +1104,7 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
/* If the vm_bo object was destroyed, release the pin reference that
* was hold by this object.
*/
- if (unpin && !bo->base.base.import_attach)
+ if (unpin && !drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
drm_gpuvm_put(vm);
@@ -1234,7 +1235,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
if (ret)
goto err_cleanup;
- if (!bo->base.base.import_attach) {
+ if (!drm_gem_is_imported(&bo->base.base)) {
/* Pre-reserve the BO pages, so the map operation doesn't have to
* allocate.
*/
@@ -1245,7 +1246,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt)) {
- if (!bo->base.base.import_attach)
+ if (!drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
ret = PTR_ERR(sgt);
@@ -1256,7 +1257,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
if (!preallocated_vm_bo) {
- if (!bo->base.base.import_attach)
+ if (!drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
ret = -ENOMEM;
@@ -1282,7 +1283,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* which will be released in panthor_vm_bo_put().
*/
if (preallocated_vm_bo != op_ctx->map.vm_bo &&
- !bo->base.base.import_attach)
+ !drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
op_ctx->map.bo_offset = offset;
@@ -1709,11 +1710,17 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
access_type, access_type_name(ptdev, fault_status),
source_id);
+ /* We don't handle VM faults at the moment, so let's just clear the
+ * interrupt and let the writer/reader crash.
+ * Note that COMPLETED irqs are never cleared, but this is fine
+ * because they are always masked.
+ */
+ gpu_write(ptdev, MMU_INT_CLEAR, mask);
+
/* Ignore MMU interrupts on this AS until it's been
* re-enabled.
*/
ptdev->mmu->irq.mask = new_int_mask;
- gpu_write(ptdev, MMU_INT_MASK, new_int_mask);
if (ptdev->mmu->as.slots[as].vm)
ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index b7b3b3add166..a7a323dc5cf9 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -133,8 +133,8 @@
#define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name)
#define GPU_COHERENCY_PROTOCOL 0x304
-#define GPU_COHERENCY_ACE 0
-#define GPU_COHERENCY_ACE_LITE 1
+#define GPU_COHERENCY_ACE_LITE 0
+#define GPU_COHERENCY_ACE 1
#define GPU_COHERENCY_NONE 31
#define MCU_CONTROL 0x700
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 4d31d1967716..43ee57728de5 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -840,7 +840,7 @@ panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
if (queue->syncwait.kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
- drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
+ drm_gem_vunmap(queue->syncwait.obj, &map);
queue->syncwait.kmap = NULL;
}
@@ -866,7 +866,7 @@ panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue
goto err_put_syncwait_obj;
queue->syncwait.obj = &bo->base.base;
- ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
+ ret = drm_gem_vmap(queue->syncwait.obj, &map);
if (drm_WARN_ON(&ptdev->base, ret))
goto err_put_syncwait_obj;
@@ -3332,7 +3332,8 @@ group_create_queue(struct panthor_group *group,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "CS ring buffer");
if (IS_ERR(queue->ringbuf)) {
ret = PTR_ERR(queue->ringbuf);
goto err_free_queue;
@@ -3362,7 +3363,8 @@ group_create_queue(struct panthor_group *group,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group job stats");
if (IS_ERR(queue->profiling.slots)) {
ret = PTR_ERR(queue->profiling.slots);
@@ -3493,7 +3495,8 @@ int panthor_group_create(struct panthor_file *pfile,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group sync objects");
if (IS_ERR(group->syncobjs)) {
ret = PTR_ERR(group->syncobjs);
goto err_put_group;
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 1e4b28d03f4d..5f460b296c0c 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -501,7 +501,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
* if we find it, it will take precedence. This is on the Integrator/AP
* which only has this option for PL110 graphics.
*/
- if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+ if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
&clcd_id);
if (np)
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 2db40789235c..a7caac5b8ac8 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -3825,8 +3825,7 @@ typedef struct _ATOM_DPCD_INFO
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the structure remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
-// (in offset to start of memory address) is KB aligned instead of byte aligend.
-/***********************************************************************************/
+// (in offset to start of memory address) is KB aligned instead of byte aligned.
// Note3:
/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fa78824931cc..3f3c360dce4b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -501,8 +501,8 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -678,7 +678,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -741,7 +741,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 11a492f21157..51a3e0fc2f56 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -8548,7 +8548,7 @@ int cik_suspend(struct radeon_device *rdev)
*/
int cik_init(struct radeon_device *rdev)
{
- struct radeon_ring *ring;
+ struct radeon_ring *ring, *ring_cp1, *ring_cp2;
int r;
/* Read BIOS */
@@ -8623,19 +8623,22 @@ int cik_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+ ring_cp1 = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ ring_cp2 = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ ring_cp1->ring_obj = NULL;
+ ring_cp2->ring_obj = NULL;
+ ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS;
+ ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS;
+
+ r600_ring_init(rdev, ring_cp1, 1024 * 1024);
+ r = radeon_doorbell_get(rdev, &ring_cp1->doorbell_index);
if (r)
return r;
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+ r600_ring_init(rdev, ring_cp2, 1024 * 1024);
+ r = radeon_doorbell_get(rdev, &ring_cp2->doorbell_index);
if (r)
- return r;
+ goto out;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring->ring_obj = NULL;
@@ -8653,12 +8656,16 @@ int cik_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
- return r;
+ goto out;
rdev->accel_working = true;
r = cik_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
+ radeon_doorbell_free(rdev, ring_cp1->doorbell_index);
+ radeon_doorbell_free(rdev, ring_cp2->doorbell_index);
+ ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS;
+ ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS;
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_irq_fini(rdev);
@@ -8678,10 +8685,16 @@ int cik_init(struct radeon_device *rdev)
*/
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
return 0;
+
+out:
+ radeon_doorbell_free(rdev, ring_cp1->doorbell_index);
+ radeon_doorbell_free(rdev, ring_cp2->doorbell_index);
+ return r;
}
/**
@@ -8695,6 +8708,7 @@ int cik_init(struct radeon_device *rdev)
*/
void cik_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring;
radeon_pm_fini(rdev);
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
@@ -8708,6 +8722,10 @@ void cik_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ radeon_doorbell_free(rdev, ring->doorbell_index);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ radeon_doorbell_free(rdev, ring->doorbell_index);
radeon_uvd_fini(rdev);
radeon_vce_fini(rdev);
cik_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 661f374f5f27..9758f3a9df75 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,28 +290,6 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
return result;
}
-/*
- * write the audio workaround status to the hardware
- */
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset = dig->afmt->offset;
- bool hdmi_audio_workaround = false; /* FIXME */
- u32 value;
-
- if (!hdmi_audio_workaround ||
- r600_hdmi_is_audio_buffer_filled(encoder))
- value = 0; /* disable workaround */
- else
- value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
- WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
- value, ~HDMI0_AUDIO_TEST_EN);
-}
-
void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
{
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8605c074d9f7..63c47585afbc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -394,9 +394,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, l
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_any(struct radeon_device *rdev,
- struct radeon_fence **fences,
- bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 8f5e07834fcc..9e697f10f9ca 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -401,7 +401,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
size_t size);
void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
u32 r600_get_xclk(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 64b26bfeafc9..b8e6202f1d5b 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -409,7 +409,6 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
* radeon_cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
- * @backoff: indicator to backoff the reservation
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 8ff4f18b51a9..5b5b54e876d4 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -575,48 +575,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
}
/**
- * radeon_fence_wait_any - wait for a fence to signal on any ring
- *
- * @rdev: radeon device pointer
- * @fences: radeon fence object(s)
- * @intr: use interruptable sleep
- *
- * Wait for any requested fence to signal (all asics). Fence
- * array is indexed by ring id. @intr selects whether to use
- * interruptable (true) or non-interruptable (false) sleep when
- * waiting for the fences. Used by the suballocator.
- * Returns 0 if any fence has passed, error for all other cases.
- */
-int radeon_fence_wait_any(struct radeon_device *rdev,
- struct radeon_fence **fences,
- bool intr)
-{
- uint64_t seq[RADEON_NUM_RINGS];
- unsigned int i, num_rings = 0;
- long r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- seq[i] = 0;
-
- if (!fences[i])
- continue;
-
- seq[i] = fences[i]->seq;
- ++num_rings;
- }
-
- /* nothing to wait for ? */
- if (num_rings == 0)
- return -ENOENT;
-
- r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-/**
* radeon_fence_wait_next - wait for the next fence to signal
*
* @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 65a911ddd509..f9267b026f8d 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1109,7 +1109,7 @@
#define MIN_POWER_SHIFT 0
#define MAX_POWER(x) ((x) << 16)
#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
+#define MAX_POWER_SHIFT 16
#define SQ_POWER_THROTTLE2 0x8e5c
#define MAX_POWER_DELTA(x) ((x) << 0)
#define MAX_POWER_DELTA_MASK (0x3fff << 0)
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
index 79b67c406bd6..93ba115d654f 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
@@ -32,11 +32,6 @@ struct rcar_cmm {
} lut;
};
-static inline int rcar_cmm_read(struct rcar_cmm *rcmm, u32 reg)
-{
- return ioread32(rcmm->base + reg);
-}
-
static inline void rcar_cmm_write(struct rcar_cmm *rcmm, u32 reg, u32 data)
{
iowrite32(data, rcmm->base + reg);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index 70d8ad065bfa..4c8fe83dd610 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -705,7 +705,7 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
cells, i, &args);
if (ret < 0)
- goto error;
+ goto done;
/*
* Add the VSP to the list or update the corresponding existing
@@ -743,13 +743,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
vsp->dev = rcdu;
ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
- if (ret < 0)
- goto error;
+ if (ret)
+ goto done;
}
- return 0;
-
-error:
+done:
for (i = 0; i < ARRAY_SIZE(vsps); ++i)
of_node_put(vsps[i].np);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index 380a855b832a..a9145253294f 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -634,6 +634,7 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
}
static int rcar_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -641,7 +642,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
if (!lvds->next_bridge)
return 0;
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index d1e626068065..7ab8be46c7f6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -799,11 +799,12 @@ static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi)
*/
static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index 7c1817240846..e57536fd6f4d 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -14,10 +14,15 @@ config DRM_RZG2L_DU
Choose this option if you have an RZ/G2L alike chipset.
If M is selected the module will be called rzg2l-du-drm.
-config DRM_RZG2L_MIPI_DSI
- tristate "RZ/G2L MIPI DSI Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select DRM_MIPI_DSI
+config DRM_RZG2L_USE_MIPI_DSI
+ bool "RZ/G2L MIPI DSI Encoder Support"
+ depends on DRM_BRIDGE && OF
+ depends on DRM_RZG2L_DU || COMPILE_TEST
+ default DRM_RZG2L_DU
help
Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders.
+
+config DRM_RZG2L_MIPI_DSI
+ def_tristate DRM_RZG2L_DU
+ depends on DRM_RZG2L_USE_MIPI_DSI
+ select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index cbd9b9841267..5e40f0c1e7b0 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -79,7 +79,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops);
static const struct drm_driver rzg2l_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .dumb_create = rzg2l_du_dumb_create,
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(rzg2l_du_dumb_create),
DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &rzg2l_du_fops,
.name = "rzg2l-du",
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index 90c6269ccd29..55a97691e9b2 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -36,23 +36,129 @@
static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = {
{
- .fourcc = DRM_FORMAT_XRGB8888,
- .v4l2 = V4L2_PIX_FMT_XBGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_RGB332,
+ .v4l2 = V4L2_PIX_FMT_RGB332,
.planes = 1,
.hsub = 1,
}, {
- .fourcc = DRM_FORMAT_ARGB8888,
- .v4l2 = V4L2_PIX_FMT_ABGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_ARGB4444,
+ .v4l2 = V4L2_PIX_FMT_ARGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB4444,
+ .v4l2 = V4L2_PIX_FMT_XRGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB1555,
+ .v4l2 = V4L2_PIX_FMT_ARGB555,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB1555,
+ .v4l2 = V4L2_PIX_FMT_XRGB555,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGB565,
+ .v4l2 = V4L2_PIX_FMT_RGB565,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGR888,
+ .v4l2 = V4L2_PIX_FMT_RGB24,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
.v4l2 = V4L2_PIX_FMT_BGR24,
- .bpp = 24,
.planes = 1,
.hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRA8888,
+ .v4l2 = V4L2_PIX_FMT_ARGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRX8888,
+ .v4l2 = V4L2_PIX_FMT_XRGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_UYVY,
+ .v4l2 = V4L2_PIX_FMT_UYVY,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUYV,
+ .v4l2 = V4L2_PIX_FMT_YUYV,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVYU,
+ .v4l2 = V4L2_PIX_FMT_YVYU,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV12,
+ .v4l2 = V4L2_PIX_FMT_NV12M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV21,
+ .v4l2 = V4L2_PIX_FMT_NV21M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV16,
+ .v4l2 = V4L2_PIX_FMT_NV16M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV61,
+ .v4l2 = V4L2_PIX_FMT_NV61M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV420,
+ .v4l2 = V4L2_PIX_FMT_YUV420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU420,
+ .v4l2 = V4L2_PIX_FMT_YVU420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV422,
+ .v4l2 = V4L2_PIX_FMT_YUV422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU422,
+ .v4l2 = V4L2_PIX_FMT_YVU422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV444,
+ .v4l2 = V4L2_PIX_FMT_YUV444M,
+ .planes = 3,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_YVU444,
+ .v4l2 = V4L2_PIX_FMT_YVU444M,
+ .planes = 3,
+ .hsub = 1,
}
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
index 876e97cfbf45..e2c599f115c6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
@@ -23,7 +23,6 @@ struct sg_table;
struct rzg2l_du_format_info {
u32 fourcc;
u32 v4l2;
- unsigned int bpp;
unsigned int planes;
unsigned int hsub;
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
index 8643ff2eec46..040d4e4aff00 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
@@ -340,6 +340,15 @@ int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
drm_plane_helper_add(&plane->plane,
&rzg2l_du_vsp_plane_helper_funcs);
+
+ drm_plane_create_alpha_property(&plane->plane);
+ drm_plane_create_zpos_property(&plane->plane, i, 0,
+ num_planes - 1);
+
+ drm_plane_create_blend_mode_property(&plane->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
}
return 0;
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index 4550c6d84796..dc6ab012cdb6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -479,7 +479,7 @@ static int rzg2l_mipi_dsi_start_video(struct rzg2l_mipi_dsi *dsi)
u32 status;
int ret;
- /* Configuration for Blanking sequence and start video input*/
+ /* Configuration for Blanking sequence and start video input */
vich1set0r = VICH1SET0R_HFPNOLP | VICH1SET0R_HBPNOLP |
VICH1SET0R_HSANOLP | VICH1SET0R_VSTART;
rzg2l_mipi_dsi_link_write(dsi, VICH1SET0R, vich1set0r);
@@ -523,11 +523,12 @@ err:
*/
static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 26c4410b2407..ab525668939a 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,12 +2,14 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
+ depends on OF
select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index a8265a1bf9ff..d30f0983a53a 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -21,6 +21,7 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -51,11 +52,13 @@ struct rockchip_grf_reg_field {
/**
* struct rockchip_dp_chip_data - splite the grf setting of kind of chips
* @lcdc_sel: grf register field of lcdc_sel
+ * @edp_mode: grf register field of edp_mode
* @chip_type: specific chip type
* @reg: register base address
*/
struct rockchip_dp_chip_data {
const struct rockchip_grf_reg_field lcdc_sel;
+ const struct rockchip_grf_reg_field edp_mode;
u32 chip_type;
u32 reg;
};
@@ -70,6 +73,7 @@ struct rockchip_dp_device {
struct clk *grfclk;
struct regmap *grf;
struct reset_control *rst;
+ struct reset_control *apbrst;
const struct rockchip_dp_chip_data *data;
@@ -115,6 +119,10 @@ static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
usleep_range(10, 20);
reset_control_deassert(dp->rst);
+ reset_control_assert(dp->apbrst);
+ usleep_range(10, 20);
+ reset_control_deassert(dp->apbrst);
+
return 0;
}
@@ -136,12 +144,21 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
return ret;
}
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 1);
+ if (ret != 0)
+ DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret);
+
return ret;
}
static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
{
struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
+ int ret;
+
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 0);
+ if (ret != 0)
+ DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret);
clk_disable_unprepare(dp->pclk);
@@ -205,6 +222,10 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
struct rockchip_dp_device *dp = encoder_to_dp(encoder);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
+ struct of_endpoint endpoint;
+ struct device_node *remote_port, *remote_port_parent;
+ char name[32];
+ u32 port_id;
int ret;
crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
@@ -222,13 +243,27 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
return;
}
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ ret = drm_of_encoder_active_endpoint(dp->dev->of_node, encoder, &endpoint);
if (ret < 0)
return;
- DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ remote_port_parent = of_graph_get_remote_port_parent(endpoint.local_node);
+ if (remote_port_parent) {
+ if (of_get_child_by_name(remote_port_parent, "ports")) {
+ remote_port = of_graph_get_remote_port(endpoint.local_node);
+ of_property_read_u32(remote_port, "reg", &port_id);
+ of_node_put(remote_port);
+ sprintf(name, "%s vp%d", remote_port_parent->full_name, port_id);
+ } else {
+ sprintf(name, "%s %s",
+ remote_port_parent->full_name, endpoint.id ? "vopl" : "vopb");
+ }
+ of_node_put(remote_port_parent);
+
+ DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ }
- ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret);
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, endpoint.id);
if (ret != 0)
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
@@ -322,6 +357,12 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
return PTR_ERR(dp->rst);
}
+ dp->apbrst = devm_reset_control_get_optional(dev, "apb");
+ if (IS_ERR(dp->apbrst)) {
+ DRM_DEV_ERROR(dev, "failed to get apb reset control\n");
+ return PTR_ERR(dp->apbrst);
+ }
+
return 0;
}
@@ -392,11 +433,28 @@ static const struct component_ops rockchip_dp_component_ops = {
.unbind = rockchip_dp_unbind,
};
+static int rockchip_dp_link_panel(struct drm_dp_aux *aux)
+{
+ struct analogix_dp_plat_data *plat_data = analogix_dp_aux_to_plat_data(aux);
+ struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
+ int ret;
+
+ /*
+ * If drm_of_find_panel_or_bridge() returns -ENODEV, there may be no valid panel
+ * or bridge nodes. The driver should go on for the driver-free bridge or the DP
+ * mode applications.
+ */
+ ret = drm_of_find_panel_or_bridge(dp->dev->of_node, 1, 0, &plat_data->panel, NULL);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ return component_add(dp->dev, &rockchip_dp_component_ops);
+}
+
static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rockchip_dp_chip_data *dp_data;
- struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
struct resource *res;
int i;
@@ -406,10 +464,6 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (!dp_data)
return -ENODEV;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret < 0 && ret != -ENODEV)
- return ret;
-
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@@ -432,7 +486,6 @@ static int rockchip_dp_probe(struct platform_device *pdev)
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
- dp->plat_data.panel = panel;
dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on = rockchip_dp_poweron;
dp->plat_data.power_off = rockchip_dp_powerdown;
@@ -448,9 +501,20 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (IS_ERR(dp->adp))
return PTR_ERR(dp->adp);
- ret = component_add(dev, &rockchip_dp_component_ops);
- if (ret)
- return ret;
+ ret = devm_of_dp_aux_populate_bus(analogix_dp_get_aux(dp->adp), rockchip_dp_link_panel);
+ if (ret) {
+ /*
+ * If devm_of_dp_aux_populate_bus() returns -ENODEV, the done_probing() will not
+ * be called because there are no EP devices. Then the rockchip_dp_link_panel()
+ * will be called directly in order to support the other valid DT configurations.
+ *
+ * NOTE: The devm_of_dp_aux_populate_bus() is allowed to return -EPROBE_DEFER.
+ */
+ if (ret != -ENODEV)
+ return dev_err_probe(dp->dev, ret, "failed to populate aux bus\n");
+
+ return rockchip_dp_link_panel(analogix_dp_get_aux(dp->adp));
+ }
return 0;
}
@@ -501,9 +565,24 @@ static const struct rockchip_dp_chip_data rk3288_dp[] = {
{ /* sentinel */ }
};
+static const struct rockchip_dp_chip_data rk3588_edp[] = {
+ {
+ .edp_mode = GRF_REG_FIELD(0x0000, 0, 0),
+ .chip_type = RK3588_EDP,
+ .reg = 0xfdec0000,
+ },
+ {
+ .edp_mode = GRF_REG_FIELD(0x0004, 0, 0),
+ .chip_type = RK3588_EDP,
+ .reg = 0xfded0000,
+ },
+ { /* sentinel */ }
+};
+
static const struct of_device_id rockchip_dp_dt_ids[] = {
{.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
{.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
+ {.compatible = "rockchip,rk3588-edp", .data = &rk3588_edp },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 483ecfeaebb0..db4b4038e51d 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -10,10 +10,12 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
+#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -29,8 +31,19 @@
#include "inno_hdmi.h"
+#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
+
#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
+#define RK3036_GRF_SOC_CON2 0x148
+#define RK3036_HDMI_PHSYNC BIT(4)
+#define RK3036_HDMI_PVSYNC BIT(5)
+
+enum inno_hdmi_dev_type {
+ RK3036_HDMI,
+ RK3128_HDMI,
+};
+
struct inno_hdmi_phy_config {
unsigned long pixelclock;
u8 pre_emphasis;
@@ -38,6 +51,7 @@ struct inno_hdmi_phy_config {
};
struct inno_hdmi_variant {
+ enum inno_hdmi_dev_type dev_type;
struct inno_hdmi_phy_config *phy_configs;
struct inno_hdmi_phy_config *default_phy_config;
};
@@ -58,6 +72,7 @@ struct inno_hdmi {
struct clk *pclk;
struct clk *refclk;
void __iomem *regs;
+ struct regmap *grf;
struct drm_connector connector;
struct rockchip_encoder encoder;
@@ -374,7 +389,15 @@ static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
- int value;
+ int value, psync;
+
+ if (hdmi->variant->dev_type == RK3036_HDMI) {
+ psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? RK3036_HDMI_PHSYNC : 0;
+ value = HIWORD_UPDATE(psync, RK3036_HDMI_PHSYNC);
+ psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? RK3036_HDMI_PVSYNC : 0;
+ value |= HIWORD_UPDATE(psync, RK3036_HDMI_PVSYNC);
+ regmap_write(hdmi->grf, RK3036_GRF_SOC_CON2, value);
+ }
/* Set detail external video timing polarity and interlace mode */
value = v_EXTERANL_VIDEO(1);
@@ -885,32 +908,34 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->regs);
hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
- if (IS_ERR(hdmi->pclk)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI pclk clk\n");
- return PTR_ERR(hdmi->pclk);
- }
+ if (IS_ERR(hdmi->pclk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->pclk), "Unable to get HDMI pclk\n");
ret = clk_prepare_enable(hdmi->pclk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev,
- "Cannot enable HDMI pclk clock: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable HDMI pclk: %d\n", ret);
hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref");
if (IS_ERR(hdmi->refclk)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI reference clock\n");
- ret = PTR_ERR(hdmi->refclk);
+ ret = dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n");
goto err_disable_pclk;
}
ret = clk_prepare_enable(hdmi->refclk);
if (ret) {
- DRM_DEV_ERROR(hdmi->dev,
- "Cannot enable HDMI reference clock: %d\n", ret);
+ ret = dev_err_probe(dev, ret, "Cannot enable HDMI refclk: %d\n", ret);
goto err_disable_pclk;
}
+ if (hdmi->variant->dev_type == RK3036_HDMI) {
+ hdmi->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(hdmi->grf)) {
+ ret = dev_err_probe(dev, PTR_ERR(hdmi->grf),
+ "Unable to get rockchip,grf\n");
+ goto err_disable_clk;
+ }
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
@@ -995,11 +1020,13 @@ static void inno_hdmi_remove(struct platform_device *pdev)
}
static const struct inno_hdmi_variant rk3036_inno_hdmi_variant = {
+ .dev_type = RK3036_HDMI,
.phy_configs = rk3036_hdmi_phy_configs,
.default_phy_config = &rk3036_hdmi_phy_configs[1],
};
static const struct inno_hdmi_variant rk3128_inno_hdmi_variant = {
+ .dev_type = RK3128_HDMI,
.phy_configs = rk3128_hdmi_phy_configs,
.default_phy_config = &rk3128_hdmi_phy_configs[1],
};
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index f7a460190313..e7875b52f298 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -5,6 +5,9 @@
*/
#include <drm/drm_atomic.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -46,27 +49,20 @@ struct rk3066_hdmi {
struct clk *hclk;
void __iomem *regs;
- struct drm_connector connector;
+ struct drm_bridge bridge;
+ struct drm_connector *connector;
struct rockchip_encoder encoder;
struct rk3066_hdmi_i2c *i2c;
- struct i2c_adapter *ddc;
unsigned int tmdsclk;
struct hdmi_data_info hdmi_data;
};
-static struct rk3066_hdmi *encoder_to_rk3066_hdmi(struct drm_encoder *encoder)
+static struct rk3066_hdmi *bridge_to_rk3066_hdmi(struct drm_bridge *bridge)
{
- struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
-
- return container_of(rkencoder, struct rk3066_hdmi, encoder);
-}
-
-static struct rk3066_hdmi *connector_to_rk3066_hdmi(struct drm_connector *connector)
-{
- return container_of(connector, struct rk3066_hdmi, connector);
+ return container_of(bridge, struct rk3066_hdmi, bridge);
}
static inline u8 hdmi_readb(struct rk3066_hdmi *hdmi, u16 offset)
@@ -161,57 +157,40 @@ static void rk3066_hdmi_set_power_mode(struct rk3066_hdmi *hdmi, int mode)
hdmi->tmdsclk = DEFAULT_PLLA_RATE;
}
-static int
-rk3066_hdmi_upload_frame(struct rk3066_hdmi *hdmi, int setup_rc,
- union hdmi_infoframe *frame, u32 frame_index,
- u32 mask, u32 disable, u32 enable)
+static int rk3066_hdmi_bridge_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
{
- if (mask)
- hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, disable);
-
- hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, frame_index);
-
- if (setup_rc >= 0) {
- u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
- ssize_t rc, i;
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
- rc = hdmi_infoframe_pack(frame, packed_frame,
- sizeof(packed_frame));
- if (rc < 0)
- return rc;
-
- for (i = 0; i < rc; i++)
- hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4,
- packed_frame[i]);
-
- if (mask)
- hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, enable);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(bridge->dev, "Unsupported infoframe type: %u\n", type);
+ return 0;
}
- return setup_rc;
+ hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, HDMI_INFOFRAME_AVI);
+
+ return 0;
}
-static int rk3066_hdmi_config_avi(struct rk3066_hdmi *hdmi,
- struct drm_display_mode *mode)
+static int
+rk3066_hdmi_bridge_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
{
- union hdmi_infoframe frame;
- int rc;
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
+ ssize_t i;
- rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &hdmi->connector, mode);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(bridge->dev, "Unsupported infoframe type: %u\n", type);
+ return 0;
+ }
- if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
- else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
- else
- frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ rk3066_hdmi_bridge_clear_infoframe(bridge, type);
- frame.avi.colorimetry = hdmi->hdmi_data.colorimetry;
- frame.avi.scan_mode = HDMI_SCAN_MODE_NONE;
+ for (i = 0; i < len; i++)
+ hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4, buffer[i]);
- return rk3066_hdmi_upload_frame(hdmi, rc, &frame,
- HDMI_INFOFRAME_AVI, 0, 0, 0);
+ return 0;
}
static int rk3066_hdmi_config_video_timing(struct rk3066_hdmi *hdmi,
@@ -324,9 +303,27 @@ static void rk3066_hdmi_config_phy(struct rk3066_hdmi *hdmi)
}
static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
- struct drm_display_mode *mode)
+ struct drm_atomic_state *state)
{
- struct drm_display_info *display = &hdmi->connector.display_info;
+ struct drm_bridge *bridge = &hdmi->bridge;
+ struct drm_connector *connector;
+ struct drm_display_info *display;
+ struct drm_display_mode *mode;
+ struct drm_connector_state *new_conn_state;
+ struct drm_crtc_state *new_crtc_state;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+
+ new_conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!new_conn_state))
+ return -EINVAL;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ if (WARN_ON(!new_crtc_state))
+ return -EINVAL;
+
+ display = &connector->display_info;
+ mode = &new_crtc_state->adjusted_mode;
hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
@@ -363,7 +360,7 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
if (display->is_hdmi) {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK,
HDMI_VIDEO_MODE_HDMI);
- rk3066_hdmi_config_avi(hdmi, mode);
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
} else {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 0);
}
@@ -386,15 +383,15 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
return 0;
}
-static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void rk3066_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
int mux, val;
- conn_state = drm_atomic_get_new_connector_state(state, &hdmi->connector);
+ conn_state = drm_atomic_get_new_connector_state(state, hdmi->connector);
if (WARN_ON(!conn_state))
return;
@@ -402,7 +399,7 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
if (WARN_ON(!crtc_state))
return;
- mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
+ mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, &hdmi->encoder.encoder);
if (mux)
val = (HDMI_VIDEO_SEL << 16) | HDMI_VIDEO_SEL;
else
@@ -413,13 +410,13 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
(mux) ? "1" : "0");
- rk3066_hdmi_setup(hdmi, &crtc_state->adjusted_mode);
+ rk3066_hdmi_setup(hdmi, state);
}
-static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void rk3066_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder disable\n");
@@ -450,39 +447,34 @@ rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
static const
struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
.atomic_check = rk3066_hdmi_encoder_atomic_check,
- .atomic_enable = rk3066_hdmi_encoder_enable,
- .atomic_disable = rk3066_hdmi_encoder_disable,
};
static enum drm_connector_status
-rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
+rk3066_hdmi_bridge_detect(struct drm_bridge *bridge)
{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
return (hdmi_readb(hdmi, HDMI_HPG_MENS_STA) & HDMI_HPG_IN_STATUS_HIGH) ?
connector_status_connected : connector_status_disconnected;
}
-static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
+static const struct drm_edid *
+rk3066_hdmi_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
const struct drm_edid *drm_edid;
- int ret = 0;
-
- if (!hdmi->ddc)
- return 0;
- drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
- drm_edid_connector_update(connector, drm_edid);
- ret = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
+ drm_edid = drm_edid_read_ddc(connector, bridge->ddc);
+ if (!drm_edid)
+ dev_dbg(hdmi->dev, "failed to get edid\n");
- return ret;
+ return drm_edid;
}
static enum drm_mode_status
-rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+rk3066_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
{
u32 vic = drm_match_cea_mode(mode);
@@ -492,82 +484,19 @@ rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
-static struct drm_encoder *
-rk3066_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
-
- return &hdmi->encoder.encoder;
-}
-
-static int
-rk3066_hdmi_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- if (maxX > 1920)
- maxX = 1920;
- if (maxY > 1080)
- maxY = 1080;
-
- return drm_helper_probe_single_connector_modes(connector, maxX, maxY);
-}
-
-static void rk3066_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs rk3066_hdmi_connector_funcs = {
- .fill_modes = rk3066_hdmi_probe_single_connector_modes,
- .detect = rk3066_hdmi_connector_detect,
- .destroy = rk3066_hdmi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const
-struct drm_connector_helper_funcs rk3066_hdmi_connector_helper_funcs = {
- .get_modes = rk3066_hdmi_connector_get_modes,
- .mode_valid = rk3066_hdmi_connector_mode_valid,
- .best_encoder = rk3066_hdmi_connector_best_encoder,
+static const struct drm_bridge_funcs rk3066_hdmi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = rk3066_hdmi_bridge_atomic_enable,
+ .atomic_disable = rk3066_hdmi_bridge_atomic_disable,
+ .detect = rk3066_hdmi_bridge_detect,
+ .edid_read = rk3066_hdmi_bridge_edid_read,
+ .hdmi_clear_infoframe = rk3066_hdmi_bridge_clear_infoframe,
+ .hdmi_write_infoframe = rk3066_hdmi_bridge_write_infoframe,
+ .mode_valid = rk3066_hdmi_bridge_mode_valid,
};
-static int
-rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
-{
- struct drm_encoder *encoder = &hdmi->encoder.encoder;
- struct device *dev = hdmi->dev;
-
- encoder->possible_crtcs =
- drm_of_find_possible_crtcs(drm, dev->of_node);
-
- /*
- * If we failed to find the CRTC(s) which this encoder is
- * supposed to be connected to, it's because the CRTC has
- * not been registered yet. Defer probing, and hope that
- * the required CRTC is added later.
- */
- if (encoder->possible_crtcs == 0)
- return -EPROBE_DEFER;
-
- drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
-
- hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_helper_add(&hdmi->connector,
- &rk3066_hdmi_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &hdmi->connector,
- &rk3066_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- hdmi->ddc);
-
- drm_connector_attach_encoder(&hdmi->connector, encoder);
-
- return 0;
-}
static irqreturn_t rk3066_hdmi_hardirq(int irq, void *dev_id)
{
@@ -597,7 +526,7 @@ static irqreturn_t rk3066_hdmi_irq(int irq, void *dev_id)
{
struct rk3066_hdmi *hdmi = dev_id;
- drm_helper_hpd_irq_event(hdmi->connector.dev);
+ drm_helper_hpd_irq_event(hdmi->connector->dev);
return IRQ_HANDLED;
}
@@ -720,7 +649,7 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
strscpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
- ret = i2c_add_adapter(adap);
+ ret = devm_i2c_add_adapter(hdmi->dev, adap);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "cannot add %s I2C adapter\n",
adap->name);
@@ -735,6 +664,66 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
return adap;
}
+static int
+rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder.encoder;
+ struct device *dev = hdmi->dev;
+ int ret;
+
+ encoder->possible_crtcs =
+ drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+
+ hdmi->bridge.driver_private = hdmi;
+ hdmi->bridge.funcs = &rk3066_hdmi_bridge_funcs;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HPD;
+ hdmi->bridge.of_node = hdmi->dev->of_node;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ hdmi->bridge.vendor = "Rockchip";
+ hdmi->bridge.product = "RK3066 HDMI";
+
+ hdmi->bridge.ddc = rk3066_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->bridge.ddc))
+ return PTR_ERR(hdmi->bridge.ddc);
+
+ if (IS_ERR(hdmi->bridge.ddc))
+ return PTR_ERR(hdmi->bridge.ddc);
+
+ ret = devm_drm_bridge_add(dev, &hdmi->bridge);
+ if (ret)
+ return ret;
+
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
+
+ hdmi->connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(hdmi->connector)) {
+ ret = PTR_ERR(hdmi->connector);
+ dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_attach_encoder(hdmi->connector, encoder);
+
+ return 0;
+}
+
static int rk3066_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -781,13 +770,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
/* internal hclk = hdmi_hclk / 25 */
hdmi_writeb(hdmi, HDMI_INTERNAL_CLK_DIVIDER, 25);
- hdmi->ddc = rk3066_hdmi_i2c_adapter(hdmi);
- if (IS_ERR(hdmi->ddc)) {
- ret = PTR_ERR(hdmi->ddc);
- hdmi->ddc = NULL;
- goto err_disable_hclk;
- }
-
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
usleep_range(999, 1000);
hdmi_writeb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_HOTPLUG);
@@ -798,7 +780,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
ret = rk3066_hdmi_register(drm, hdmi);
if (ret)
- goto err_disable_i2c;
+ goto err_disable_hclk;
dev_set_drvdata(dev, hdmi);
@@ -813,10 +795,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
return 0;
err_cleanup_hdmi:
- hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-err_disable_i2c:
- i2c_put_adapter(hdmi->ddc);
err_disable_hclk:
clk_disable_unprepare(hdmi->hclk);
@@ -828,10 +807,8 @@ static void rk3066_hdmi_unbind(struct device *dev, struct device *master,
{
struct rk3066_hdmi *hdmi = dev_get_drvdata(dev);
- hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
- i2c_put_adapter(hdmi->ddc);
clk_disable_unprepare(hdmi->hclk);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index e3596e2b557d..ba6b0528d1e5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -733,11 +733,10 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
WARN_ON(vop->event);
- if (crtc->state->self_refresh_active)
+ if (crtc->state->self_refresh_active) {
rockchip_drm_set_win_enabled(crtc, false);
-
- if (crtc->state->self_refresh_active)
goto out;
+ }
mutex_lock(&vop->vop_lock);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 680bedbb770e..fc3ecb9fcd95 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -710,6 +710,7 @@ enum dst_factor_mode {
#define VOP2_COLOR_KEY_MASK BIT(31)
+#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL GENMASK(31, 30)
#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD BIT(28)
#define RK3568_OVL_CTRL__YUV_MODE(vp) BIT(vp)
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 0a2840cbe8e2..32c4ed685739 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -2070,7 +2070,10 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
- ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL;
+ ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id);
+
if (vcstate->yuv_overlay)
ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
else
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 4e2099d86517..d1f788763318 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -906,21 +906,21 @@ static const struct vop_data rk3366_vop = {
static const struct vop_output rk3399_output = {
.dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
- .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
- .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
- .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
- .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .rgb_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 31),
.dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
+ .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
- .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
- .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
- .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
- .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
- .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
+ .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15),
+ .mipi_dual_channel_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 3),
};
static const struct vop_common rk3399_common = {
@@ -975,23 +975,23 @@ static const struct vop_win_phy rk3399_win0_data = {
.data_formats = formats_win_full_10,
.nformats = ARRAY_SIZE(formats_win_full_10),
.format_modifiers = format_modifiers_win_full_afbc,
- .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
- .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
- .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4),
- .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
- .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
- .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
- .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
- .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
- .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
- .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
- .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
- .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
- .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
+ .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1),
+ .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4),
+ .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12),
+ .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15),
+ .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21),
+ .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0),
};
static const struct vop_win_phy rk3399_win1_data = {
@@ -999,23 +999,23 @@ static const struct vop_win_phy rk3399_win1_data = {
.data_formats = formats_win_full_10,
.nformats = ARRAY_SIZE(formats_win_full_10),
.format_modifiers = format_modifiers_win_full,
- .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
- .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
- .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4),
- .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
- .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
- .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
- .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
- .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
- .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
- .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
- .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
- .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
- .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
+ .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1),
+ .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4),
+ .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12),
+ .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15),
+ .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21),
+ .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0),
};
/*
diff --git a/drivers/gpu/drm/scheduler/.kunitconfig b/drivers/gpu/drm/scheduler/.kunitconfig
new file mode 100644
index 000000000000..cece53609fcf
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/.kunitconfig
@@ -0,0 +1,12 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_SCHED_KUNIT_TEST=y
+CONFIG_EXPERT=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCKDEP=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_LIST=y
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index 53863621829f..6e13e4c63e9d 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -23,3 +23,5 @@
gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index bfea608a7106..829579c41c6b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -828,11 +828,15 @@ EXPORT_SYMBOL(drm_sched_job_init);
*
* This arms a scheduler job for execution. Specifically it initializes the
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
- * or other places that need to track the completion of this job.
+ * or other places that need to track the completion of this job. It also
+ * initializes sequence numbers, which are fundamental for fence ordering.
*
* Refer to drm_sched_entity_push_job() documentation for locking
* considerations.
*
+ * Once this function was called, you *must* submit @job with
+ * drm_sched_entity_push_job().
+ *
* This can only be called if drm_sched_job_init() succeeded.
*/
void drm_sched_job_arm(struct drm_sched_job *job)
@@ -1015,13 +1019,14 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency);
* Cleans up the resources allocated with drm_sched_job_init().
*
* Drivers should call this from their error unwind code if @job is aborted
- * before it was submitted to an entity with drm_sched_entity_push_job().
+ * before drm_sched_job_arm() is called.
*
- * Since calling drm_sched_job_arm() causes the job's fences to be initialized,
- * it is up to the driver to ensure that fences that were exposed to external
- * parties get signaled. drm_sched_job_cleanup() does not ensure this.
+ * drm_sched_job_arm() is a point of no return since it initializes the fences
+ * and their sequence number etc. Once that function has been called, you *must*
+ * submit it with drm_sched_entity_push_job() and cannot simply abort it by
+ * calling drm_sched_job_cleanup().
*
- * This function must also be called in &struct drm_sched_backend_ops.free_job
+ * This function should be called in the &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
@@ -1029,10 +1034,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
unsigned long index;
if (kref_read(&job->s_fence->finished.refcount)) {
- /* drm_sched_job_arm() has been called */
+ /* The job has been processed by the scheduler, i.e.,
+ * drm_sched_job_arm() and drm_sched_entity_push_job() have
+ * been called.
+ */
dma_fence_put(&job->s_fence->finished);
} else {
- /* aborted job before arming */
+ /* The job was aborted before it has been committed to be run;
+ * notably, drm_sched_job_arm() has not been called.
+ */
drm_sched_fence_free(job->s_fence);
}
@@ -1220,20 +1230,23 @@ static void drm_sched_run_job_work(struct work_struct *w)
drm_sched_job_begin(sched_job);
trace_drm_run_job(sched_job, entity);
+ /*
+ * The run_job() callback must by definition return a fence whose
+ * refcount has been incremented for the scheduler already.
+ */
fence = sched->ops->run_job(sched_job);
complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence, fence);
if (!IS_ERR_OR_NULL(fence)) {
- /* Drop for original kref_init of the fence */
- dma_fence_put(fence);
-
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(sched_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
+
+ dma_fence_put(fence);
} else {
drm_sched_job_done(sched_job, IS_ERR(fence) ?
PTR_ERR(fence) : 0);
diff --git a/drivers/gpu/drm/scheduler/tests/Makefile b/drivers/gpu/drm/scheduler/tests/Makefile
new file mode 100644
index 000000000000..5bf707bad373
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+drm-sched-tests-y := \
+ mock_scheduler.o \
+ tests_basic.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += drm-sched-tests.o
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
new file mode 100644
index 000000000000..f999c8859cf7
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include "sched_tests.h"
+
+/*
+ * Here we implement the mock "GPU" (or the scheduler backend) which is used by
+ * the DRM scheduler unit tests in order to exercise the core functionality.
+ *
+ * Test cases are implemented in a separate file.
+ */
+
+/**
+ * drm_mock_sched_entity_new - Create a new mock scheduler entity
+ *
+ * @test: KUnit test owning the entity
+ * @priority: Scheduling priority
+ * @sched: Mock scheduler on which the entity can be scheduled
+ *
+ * Returns: New mock scheduler entity with allocation managed by the test
+ */
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_gpu_scheduler *drm_sched;
+ int ret;
+
+ entity = kunit_kzalloc(test, sizeof(*entity), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ drm_sched = &sched->base;
+ ret = drm_sched_entity_init(&entity->base,
+ priority,
+ &drm_sched, 1,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ entity->test = test;
+
+ return entity;
+}
+
+/**
+ * drm_mock_sched_entity_free - Destroys a mock scheduler entity
+ *
+ * @entity: Entity to destroy
+ *
+ * To be used from the test cases once done with the entity.
+ */
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity)
+{
+ drm_sched_entity_destroy(&entity->base);
+}
+
+static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+
+ lockdep_assert_held(&sched->lock);
+
+ job->flags |= DRM_MOCK_SCHED_JOB_DONE;
+ list_move_tail(&job->link, &sched->done_list);
+ dma_fence_signal(&job->hw_fence);
+ complete(&job->done);
+}
+
+static enum hrtimer_restart
+drm_mock_sched_job_signal_timer(struct hrtimer *hrtimer)
+{
+ struct drm_mock_sched_job *job =
+ container_of(hrtimer, typeof(*job), timer);
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+ struct drm_mock_sched_job *next;
+ ktime_t now = ktime_get();
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (!job->duration_us)
+ break;
+
+ if (ktime_before(now, job->finish_at))
+ break;
+
+ sched->hw_timeline.cur_seqno = job->hw_fence.seqno;
+ drm_mock_sched_job_complete(job);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * drm_mock_sched_job_new - Create a new mock scheduler job
+ *
+ * @test: KUnit test owning the job
+ * @entity: Scheduler entity of the job
+ *
+ * Returns: New mock scheduler job with allocation managed by the test
+ */
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity)
+{
+ struct drm_mock_sched_job *job;
+ int ret;
+
+ job = kunit_kzalloc(test, sizeof(*job), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, job);
+
+ ret = drm_sched_job_init(&job->base,
+ &entity->base,
+ 1,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ job->test = test;
+
+ init_completion(&job->done);
+ spin_lock_init(&job->lock);
+ INIT_LIST_HEAD(&job->link);
+ hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
+ return job;
+}
+
+static const char *drm_mock_sched_hw_fence_driver_name(struct dma_fence *fence)
+{
+ return "drm_mock_sched";
+}
+
+static const char *
+drm_mock_sched_hw_fence_timeline_name(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ return (const char *)job->base.sched->name;
+}
+
+static void drm_mock_sched_hw_fence_release(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ hrtimer_cancel(&job->timer);
+
+ /* Containing job is freed by the kunit framework */
+}
+
+static const struct dma_fence_ops drm_mock_sched_hw_fence_ops = {
+ .get_driver_name = drm_mock_sched_hw_fence_driver_name,
+ .get_timeline_name = drm_mock_sched_hw_fence_timeline_name,
+ .release = drm_mock_sched_hw_fence_release,
+};
+
+static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ dma_fence_init(&job->hw_fence,
+ &drm_mock_sched_hw_fence_ops,
+ &job->lock,
+ sched->hw_timeline.context,
+ atomic_inc_return(&sched->hw_timeline.next_seqno));
+
+ dma_fence_get(&job->hw_fence); /* Reference for the job_list */
+
+ spin_lock_irq(&sched->lock);
+ if (job->duration_us) {
+ ktime_t prev_finish_at = 0;
+
+ if (!list_empty(&sched->job_list)) {
+ struct drm_mock_sched_job *prev =
+ list_last_entry(&sched->job_list, typeof(*prev),
+ link);
+
+ prev_finish_at = prev->finish_at;
+ }
+
+ if (!prev_finish_at)
+ prev_finish_at = ktime_get();
+
+ job->finish_at = ktime_add_us(prev_finish_at, job->duration_us);
+ }
+ list_add_tail(&job->link, &sched->job_list);
+ if (job->finish_at)
+ hrtimer_start(&job->timer, job->finish_at, HRTIMER_MODE_ABS);
+ spin_unlock_irq(&sched->lock);
+
+ return &job->hw_fence;
+}
+
+static enum drm_gpu_sched_stat
+mock_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static void mock_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ /* Remove from the scheduler done list. */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_del(&job->link);
+ spin_unlock_irqrestore(&sched->lock, flags);
+ dma_fence_put(&job->hw_fence);
+
+ drm_sched_job_cleanup(sched_job);
+
+ /* Mock job itself is freed by the kunit framework. */
+}
+
+static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
+ .run_job = mock_sched_run_job,
+ .timedout_job = mock_sched_timedout_job,
+ .free_job = mock_sched_free_job
+};
+
+/**
+ * drm_mock_sched_new - Create a new mock scheduler
+ *
+ * @test: KUnit test owning the job
+ * @timeout: Job timeout to set
+ *
+ * Returns: New mock scheduler with allocation managed by the test
+ */
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
+{
+ struct drm_sched_init_args args = {
+ .ops = &drm_mock_scheduler_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = U32_MAX,
+ .hang_limit = 1,
+ .timeout = timeout,
+ .name = "drm-mock-scheduler",
+ };
+ struct drm_mock_scheduler *sched;
+ int ret;
+
+ sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, sched);
+
+ ret = drm_sched_init(&sched->base, &args);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sched->test = test;
+ sched->hw_timeline.context = dma_fence_context_alloc(1);
+ atomic_set(&sched->hw_timeline.next_seqno, 0);
+ INIT_LIST_HEAD(&sched->job_list);
+ INIT_LIST_HEAD(&sched->done_list);
+ spin_lock_init(&sched->lock);
+
+ return sched;
+}
+
+/**
+ * drm_mock_sched_fini - Destroys a mock scheduler
+ *
+ * @sched: Scheduler to destroy
+ *
+ * To be used from the test cases once done with the scheduler.
+ */
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
+{
+ struct drm_mock_sched_job *job, *next;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ drm_sched_wqueue_stop(&sched->base);
+
+ /* Force complete all unfinished jobs. */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->job_list, link)
+ list_move_tail(&job->link, &list);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ list_for_each_entry(job, &list, link)
+ hrtimer_cancel(&job->timer);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &list, link)
+ drm_mock_sched_job_complete(job);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ /*
+ * Free completed jobs and jobs not yet processed by the DRM scheduler
+ * free worker.
+ */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->done_list, link)
+ list_move_tail(&job->link, &list);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ list_for_each_entry_safe(job, next, &list, link)
+ mock_sched_free_job(&job->base);
+
+ drm_sched_fini(&sched->base);
+}
+
+/**
+ * drm_mock_sched_advance - Advances the mock scheduler timeline
+ *
+ * @sched: Scheduler timeline to advance
+ * @num: By how many jobs to advance
+ *
+ * Advancing the scheduler timeline by a number of seqnos will trigger
+ * signalling of the hardware fences and unlinking the jobs from the internal
+ * scheduler tracking.
+ *
+ * This can be used from test cases which want complete control of the simulated
+ * job execution timing. For example submitting one job with no set duration
+ * would never complete it before test cases advances the timeline by one.
+ */
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num)
+{
+ struct drm_mock_sched_job *job, *next;
+ unsigned int found = 0;
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num <
+ sched->hw_timeline.cur_seqno))
+ goto unlock;
+ sched->hw_timeline.cur_seqno += num;
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno)
+ break;
+
+ drm_mock_sched_job_complete(job);
+ found++;
+ }
+unlock:
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return found;
+}
+
+MODULE_DESCRIPTION("DRM mock scheduler and tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
new file mode 100644
index 000000000000..27caf8285fb7
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _SCHED_TESTS_H_
+#define _SCHED_TESTS_H_
+
+#include <kunit/test.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/dma-fence.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include <drm/gpu_scheduler.h>
+
+/*
+ * DOC: Mock DRM scheduler data structures
+ *
+ * drm_mock_* data structures are used to implement a mock "GPU".
+ *
+ * They subclass the core DRM scheduler objects and add their data on top, which
+ * enables tracking the submitted jobs and simulating their execution with the
+ * attributes as specified by the test case.
+ */
+
+/**
+ * struct drm_mock_scheduler - implements a trivial mock GPU execution engine
+ *
+ * @base: DRM scheduler base class
+ * @test: Backpointer to owning the kunit test case
+ * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list
+ * @job_list: List of jobs submitted to the mock GPU
+ * @done_list: List of jobs completed by the mock GPU
+ * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and
+ * @cur_seqno for implementing a struct dma_fence signaling the
+ * simulated job completion.
+ *
+ * Trivial mock GPU execution engine tracks submitted jobs and enables
+ * completing them strictly in submission order.
+ */
+struct drm_mock_scheduler {
+ struct drm_gpu_scheduler base;
+
+ struct kunit *test;
+
+ spinlock_t lock;
+ struct list_head job_list;
+ struct list_head done_list;
+
+ struct {
+ u64 context;
+ atomic_t next_seqno;
+ unsigned int cur_seqno;
+ } hw_timeline;
+};
+
+/**
+ * struct drm_mock_sched_entity - implements a mock GPU sched entity
+ *
+ * @base: DRM scheduler entity base class
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched entity is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_entity {
+ struct drm_sched_entity base;
+
+ struct kunit *test;
+};
+
+/**
+ * struct drm_mock_sched_job - implements a mock GPU job
+ *
+ * @base: DRM sched job base class
+ * @done: Completion signaling job completion.
+ * @flags: Flags designating job state.
+ * @link: List head element used by job tracking by the drm_mock_scheduler
+ * @timer: Timer used for simulating job execution duration
+ * @duration_us: Simulated job duration in micro seconds, or zero if in manual
+ * timeline advance mode
+ * @finish_at: Absolute time when the jobs with set duration will complete
+ * @lock: Lock used for @hw_fence
+ * @hw_fence: Fence returned to DRM scheduler as the hardware fence
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched job is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_job {
+ struct drm_sched_job base;
+
+ struct completion done;
+
+#define DRM_MOCK_SCHED_JOB_DONE 0x1
+#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+ unsigned long flags;
+
+ struct list_head link;
+ struct hrtimer timer;
+
+ unsigned int duration_us;
+ ktime_t finish_at;
+
+ spinlock_t lock;
+ struct dma_fence hw_fence;
+
+ struct kunit *test;
+};
+
+static inline struct drm_mock_scheduler *
+drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched)
+{
+ return container_of(sched, struct drm_mock_scheduler, base);
+};
+
+static inline struct drm_mock_sched_entity *
+drm_sched_entity_to_mock_entity(struct drm_sched_entity *sched_entity)
+{
+ return container_of(sched_entity, struct drm_mock_sched_entity, base);
+};
+
+static inline struct drm_mock_sched_job *
+drm_sched_job_to_mock_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct drm_mock_sched_job, base);
+};
+
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test,
+ long timeout);
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched);
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num);
+
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched);
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity);
+
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity);
+
+/**
+ * drm_mock_sched_job_submit - Arm and submit a job in one go
+ *
+ * @job: Job to arm and submit
+ */
+static inline void drm_mock_sched_job_submit(struct drm_mock_sched_job *job)
+{
+ drm_sched_job_arm(&job->base);
+ drm_sched_entity_push_job(&job->base);
+}
+
+/**
+ * drm_mock_sched_job_set_duration_us - Set a job duration
+ *
+ * @job: Job to set the duration for
+ * @duration_us: Duration in micro seconds
+ *
+ * Jobs with duration set will be automatically completed by the mock scheduler
+ * as the timeline progresses, unless a job without a set duration is
+ * encountered in the timelime in which case calling drm_mock_sched_advance()
+ * will be required to bump the timeline.
+ */
+static inline void
+drm_mock_sched_job_set_duration_us(struct drm_mock_sched_job *job,
+ unsigned int duration_us)
+{
+ job->duration_us = duration_us;
+}
+
+/**
+ * drm_mock_sched_job_is_finished - Check if a job is finished
+ *
+ * @job: Job to check
+ *
+ * Returns: true if finished
+ */
+static inline bool
+drm_mock_sched_job_is_finished(struct drm_mock_sched_job *job)
+{
+ return job->flags & DRM_MOCK_SCHED_JOB_DONE;
+}
+
+/**
+ * drm_mock_sched_job_wait_finished - Wait until a job is finished
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if finished within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_finished(struct drm_mock_sched_job *job, long timeout)
+{
+ if (job->flags & DRM_MOCK_SCHED_JOB_DONE)
+ return true;
+
+ return wait_for_completion_timeout(&job->done, timeout) != 0;
+}
+
+/**
+ * drm_mock_sched_job_wait_scheduled - Wait until a job is scheduled
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if scheduled within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_scheduled(struct drm_mock_sched_job *job, long timeout)
+{
+ KUNIT_ASSERT_EQ(job->test, job->flags & DRM_MOCK_SCHED_JOB_DONE, 0);
+
+ return dma_fence_wait_timeout(&job->base.s_fence->scheduled,
+ false,
+ timeout) != 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
new file mode 100644
index 000000000000..7230057e0594
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include <linux/delay.h>
+
+#include "sched_tests.h"
+
+/*
+ * DRM scheduler basic tests should check the basic functional correctness of
+ * the scheduler, including some very light smoke testing. More targeted tests,
+ * for example focusing on testing specific bugs and other more complicated test
+ * scenarios, should be implemented in separate source units.
+ */
+
+static int drm_sched_basic_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ return 0;
+}
+
+static void drm_sched_basic_exit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+
+ drm_mock_sched_fini(sched);
+}
+
+static int drm_sched_timeout_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, HZ);
+
+ return 0;
+}
+
+static void drm_sched_basic_submit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ unsigned int i;
+ bool done;
+
+ /*
+ * Submit one job to the scheduler and verify that it gets scheduled
+ * and completed only when the mock hw backend processes it.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+struct drm_sched_basic_params {
+ const char *description;
+ unsigned int queue_depth;
+ unsigned int num_entities;
+ unsigned int job_us;
+ bool dep_chain;
+};
+
+static const struct drm_sched_basic_params drm_sched_basic_cases[] = {
+ {
+ .description = "A queue of jobs in a single entity",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ },
+ {
+ .description = "A chain of dependent jobs across multiple entities",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ .dep_chain = true,
+ },
+ {
+ .description = "Multiple independent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ },
+ {
+ .description = "Multiple inter-dependent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ .dep_chain = true,
+ },
+};
+
+static void
+drm_sched_basic_desc(const struct drm_sched_basic_params *params, char *desc)
+{
+ strscpy(desc, params->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(drm_sched_basic, drm_sched_basic_cases, drm_sched_basic_desc);
+
+static void drm_sched_basic_test(struct kunit *test)
+{
+ const struct drm_sched_basic_params *params = test->param_value;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job, *prev = NULL;
+ struct drm_mock_sched_entity **entity;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ entity = kunit_kcalloc(test, params->num_entities, sizeof(*entity),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ for (i = 0; i < params->num_entities; i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < params->queue_depth; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= params->num_entities;
+ drm_mock_sched_job_set_duration_us(job, params->job_us);
+ if (params->dep_chain && prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < params->num_entities; i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_basic_entity_cleanup(struct kunit *test)
+{
+ struct drm_mock_sched_job *job, *mid, *prev = NULL;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity[4];
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ /*
+ * Submit a queue of jobs across different entities with an explicit
+ * chain of dependencies between them and trigger entity cleanup while
+ * the queue is still being processed.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ if (prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ if (i == qd / 2)
+ mid = job;
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(mid, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ /* Exit with half of the queue still pending to be executed. */
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_basic_tests[] = {
+ KUNIT_CASE(drm_sched_basic_submit),
+ KUNIT_CASE_PARAM(drm_sched_basic_test, drm_sched_basic_gen_params),
+ KUNIT_CASE(drm_sched_basic_entity_cleanup),
+ {}
+};
+
+static struct kunit_suite drm_sched_basic = {
+ .name = "drm_sched_basic_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_basic_tests,
+};
+
+static void drm_sched_basic_timeout(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ bool done;
+
+ /*
+ * Submit a single job against a scheduler with the timeout configured
+ * and verify that the timeout handling will run if the backend fails
+ * to complete it in time.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ 0);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ DRM_MOCK_SCHED_JOB_TIMEDOUT);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+static struct kunit_case drm_sched_timeout_tests[] = {
+ KUNIT_CASE(drm_sched_basic_timeout),
+ {}
+};
+
+static struct kunit_suite drm_sched_timeout = {
+ .name = "drm_sched_basic_timeout_tests",
+ .init = drm_sched_timeout_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_timeout_tests,
+};
+
+static void drm_sched_priorities(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+ bool done;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities.
+ */
+
+ BUILD_BUG_ON(DRM_SCHED_PRIORITY_KERNEL > DRM_SCHED_PRIORITY_LOW);
+ BUILD_BUG_ON(ARRAY_SIZE(entity) != DRM_SCHED_PRIORITY_COUNT);
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_change_priority(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities and while waiting for them to complete, periodically keep
+ * changing their priorities.
+ *
+ * We set up the queue-depth (qd) and job duration so the priority
+ * changing loop has some time to interact with submissions to the
+ * backend and job completions as they progress.
+ */
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ drm_sched_entity_set_priority(&entity[cur_ent]->base,
+ (entity[cur_ent]->base.priority + 1) %
+ DRM_SCHED_PRIORITY_COUNT);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ usleep_range(200, 500);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_priority_tests[] = {
+ KUNIT_CASE(drm_sched_priorities),
+ KUNIT_CASE(drm_sched_change_priority),
+ {}
+};
+
+static struct kunit_suite drm_sched_priority = {
+ .name = "drm_sched_basic_priority_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_priority_tests,
+};
+
+static void drm_sched_test_modify_sched(struct kunit *test)
+{
+ unsigned int i, cur_ent = 0, cur_sched = 0;
+ struct drm_mock_sched_entity *entity[13];
+ struct drm_mock_scheduler *sched[3];
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * schedulers and while waiting for them to complete, periodically keep
+ * changing schedulers associated with each entity.
+ *
+ * We set up the queue-depth (qd) and job duration so the sched modify
+ * loop has some time to interact with submissions to the backend and
+ * job completions as they progress.
+ *
+ * For the number of schedulers and entities we use primes in order to
+ * perturb the entity->sched assignments with less of a regular pattern.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ sched[i] = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched[i % ARRAY_SIZE(sched)]);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ struct drm_gpu_scheduler *modify;
+
+ usleep_range(200, 500);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ cur_sched++;
+ cur_sched %= ARRAY_SIZE(sched);
+ modify = &sched[cur_sched]->base;
+ drm_sched_entity_modify_sched(&entity[cur_ent]->base, &modify,
+ 1);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ drm_mock_sched_fini(sched[i]);
+}
+
+static struct kunit_case drm_sched_modify_sched_tests[] = {
+ KUNIT_CASE(drm_sched_test_modify_sched),
+ {}
+};
+
+static struct kunit_suite drm_sched_modify_sched = {
+ .name = "drm_sched_basic_modify_sched_tests",
+ .test_cases = drm_sched_modify_sched_tests,
+};
+
+static void drm_sched_test_credits(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_scheduler *sched;
+ struct drm_mock_sched_job *job[2];
+ bool done;
+ int i;
+
+ /*
+ * Check that the configured credit limit is respected.
+ */
+
+ sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+ sched->base.credit_limit = 1;
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ job[0] = drm_mock_sched_job_new(test, entity);
+ job[1] = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job[0]);
+ drm_mock_sched_job_submit(job[1]);
+
+ done = drm_mock_sched_job_wait_scheduled(job[0], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+ drm_mock_sched_fini(sched);
+}
+
+static struct kunit_case drm_sched_credits_tests[] = {
+ KUNIT_CASE(drm_sched_test_credits),
+ {}
+};
+
+static struct kunit_suite drm_sched_credits = {
+ .name = "drm_sched_basic_credits_tests",
+ .test_cases = drm_sched_credits_tests,
+};
+
+kunit_test_suites(&drm_sched_basic,
+ &drm_sched_timeout,
+ &drm_sched_priority,
+ &drm_sched_modify_sched,
+ &drm_sched_credits);
diff --git a/drivers/gpu/drm/sitronix/Kconfig b/drivers/gpu/drm/sitronix/Kconfig
new file mode 100644
index 000000000000..c069d0d41775
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/Kconfig
@@ -0,0 +1,51 @@
+config DRM_ST7571_I2C
+ tristate "DRM support for Sitronix ST7571 display panels (I2C)"
+ depends on DRM && I2C && MMU
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ DRM driver for Sitronix ST7571 panels controlled over I2C.
+
+ if M is selected the module will be called st7571-i2c.
+
+config TINYDRM_ST7586
+ tristate
+ default n
+
+config DRM_ST7586
+ tristate "DRM support for Sitronix ST7586 display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_MIPI_DBI
+ default TINYDRM_ST7586
+ help
+ DRM driver for the following Sitronix ST7586 panels:
+ * LEGO MINDSTORMS EV3
+
+ If M is selected the module will be called st7586.
+
+config TINYDRM_ST7735R
+ tristate
+ default n
+
+config DRM_ST7735R
+ tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
+ default TINYDRM_ST7735R
+ help
+ DRM driver for Sitronix ST7715R/ST7735R with one of the following
+ LCDs:
+ * Jianda JD-T18003-T01 1.8" 128x160 TFT
+ * Okaya RH128128T 1.44" 128x128 TFT
+
+ If M is selected the module will be called st7735r.
+
diff --git a/drivers/gpu/drm/sitronix/Makefile b/drivers/gpu/drm/sitronix/Makefile
new file mode 100644
index 000000000000..bd139e5a6995
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DRM_ST7571_I2C) += st7571-i2c.o
+obj-$(CONFIG_DRM_ST7586) += st7586.o
+obj-$(CONFIG_DRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
new file mode 100644
index 000000000000..eec846892962
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -0,0 +1,1000 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Sitronix ST7571, a 4 level gray scale dot matrix LCD controller
+ *
+ * Copyright (C) 2025 Marcus Folkesson <marcus.folkesson@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_module.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+
+#define ST7571_COMMAND_MODE (0x00)
+#define ST7571_DATA_MODE (0x40)
+
+/* Normal mode command set */
+#define ST7571_DISPLAY_OFF (0xae)
+#define ST7571_DISPLAY_ON (0xaf)
+#define ST7571_OSC_ON (0xab)
+#define ST7571_SET_COLUMN_LSB(c) (0x00 | FIELD_PREP(GENMASK(3, 0), (c)))
+#define ST7571_SET_COLUMN_MSB(c) (0x10 | FIELD_PREP(GENMASK(2, 0), (c) >> 4))
+#define ST7571_SET_COM0_LSB(x) (FIELD_PREP(GENMASK(6, 0), (x)))
+#define ST7571_SET_COM0_MSB (0x44)
+#define ST7571_SET_COM_SCAN_DIR(d) (0xc0 | FIELD_PREP(GENMASK(3, 3), (d)))
+#define ST7571_SET_CONTRAST_LSB(c) (FIELD_PREP(GENMASK(5, 0), (c)))
+#define ST7571_SET_CONTRAST_MSB (0x81)
+#define ST7571_SET_DISPLAY_DUTY_LSB(d) (FIELD_PREP(GENMASK(7, 0), (d)))
+#define ST7571_SET_DISPLAY_DUTY_MSB (0x48)
+#define ST7571_SET_ENTIRE_DISPLAY_ON(p) (0xa4 | FIELD_PREP(GENMASK(0, 0), (p)))
+#define ST7571_SET_LCD_BIAS(b) (0x50 | FIELD_PREP(GENMASK(2, 0), (b)))
+#define ST7571_SET_MODE_LSB(m) (FIELD_PREP(GENMASK(7, 2), (m)))
+#define ST7571_SET_MODE_MSB (0x38)
+#define ST7571_SET_PAGE(p) (0xb0 | FIELD_PREP(GENMASK(3, 0), (p)))
+#define ST7571_SET_POWER(p) (0x28 | FIELD_PREP(GENMASK(2, 0), (p)))
+#define ST7571_SET_REGULATOR_REG(r) (0x20 | FIELD_PREP(GENMASK(2, 0), (r)))
+#define ST7571_SET_REVERSE(r) (0xa6 | FIELD_PREP(GENMASK(0, 0), (r)))
+#define ST7571_SET_SEG_SCAN_DIR(d) (0xa0 | FIELD_PREP(GENMASK(0, 0), (d)))
+#define ST7571_SET_START_LINE_LSB(l) (FIELD_PREP(GENMASK(6, 0), (l)))
+#define ST7571_SET_START_LINE_MSB (0x40)
+
+/* Extension command set 3 */
+#define ST7571_COMMAND_SET_3 (0x7b)
+#define ST7571_SET_COLOR_MODE(c) (0x10 | FIELD_PREP(GENMASK(0, 0), (c)))
+#define ST7571_COMMAND_SET_NORMAL (0x00)
+
+#define ST7571_PAGE_HEIGHT 8
+
+#define DRIVER_NAME "st7571"
+#define DRIVER_DESC "ST7571 DRM driver"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+enum st7571_color_mode {
+ ST7571_COLOR_MODE_GRAY = 0,
+ ST7571_COLOR_MODE_BLACKWHITE = 1,
+};
+
+struct st7571_device;
+
+struct st7571_panel_constraints {
+ u32 min_nlines;
+ u32 max_nlines;
+ u32 min_ncols;
+ u32 max_ncols;
+ bool support_grayscale;
+};
+
+struct st7571_panel_data {
+ int (*init)(struct st7571_device *st7571);
+ struct st7571_panel_constraints constraints;
+};
+
+struct st7571_panel_format {
+ void (*prepare_buffer)(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state);
+ int (*update_rect)(struct drm_framebuffer *fb, struct drm_rect *rect);
+ enum st7571_color_mode mode;
+ const u8 nformats;
+ const u32 formats[];
+};
+
+struct st7571_device {
+ struct drm_device dev;
+
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct drm_display_mode mode;
+
+ const struct st7571_panel_format *pformat;
+ const struct st7571_panel_data *pdata;
+ struct i2c_client *client;
+ struct gpio_desc *reset;
+ struct regmap *regmap;
+
+ /*
+ * Depending on the hardware design, the acknowledge signal may be hard to
+ * recognize as a valid logic "0" level.
+ * Therefor, ignore NAK if possible to stay compatible with most hardware designs
+ * and off-the-shelf panels out there.
+ *
+ * From section 6.4 MICROPOCESSOR INTERFACE section in the datasheet:
+ *
+ * "By connecting SDA_OUT to SDA_IN externally, the SDA line becomes fully
+ * I2C interface compatible.
+ * Separating acknowledge-output from serial data
+ * input is advantageous for chip-on-glass (COG) applications. In COG
+ * applications, the ITO resistance and the pull-up resistor will form a
+ * voltage divider, which affects acknowledge-signal level. Larger ITO
+ * resistance will raise the acknowledged-signal level and system cannot
+ * recognize this level as a valid logic “0” level. By separating SDA_IN from
+ * SDA_OUT, the IC can be used in a mode that ignores the acknowledge-bit.
+ * For applications which check acknowledge-bit, it is necessary to minimize
+ * the ITO resistance of the SDA_OUT trace to guarantee a valid low level."
+ *
+ */
+ bool ignore_nak;
+
+ bool grayscale;
+ u32 height_mm;
+ u32 width_mm;
+ u32 startline;
+ u32 nlines;
+ u32 ncols;
+ u32 bpp;
+
+ /* Intermediate buffer in LCD friendly format */
+ u8 *hwbuf;
+
+ /* Row of (transformed) pixels ready to be written to the display */
+ u8 *row;
+};
+
+static inline struct st7571_device *drm_to_st7571(struct drm_device *dev)
+{
+ return container_of(dev, struct st7571_device, dev);
+}
+
+static int st7571_regmap_write(void *context, const void *data, size_t count)
+{
+ struct i2c_client *client = context;
+ struct st7571_device *st7571 = i2c_get_clientdata(client);
+ int ret;
+
+ struct i2c_msg msg = {
+ .addr = st7571->client->addr,
+ .flags = st7571->ignore_nak ? I2C_M_IGNORE_NAK : 0,
+ .len = count,
+ .buf = (u8 *)data
+ };
+
+ ret = i2c_transfer(st7571->client->adapter, &msg, 1);
+
+ /*
+ * Unfortunately, there is no way to check if the transfer failed because of
+ * a NAK or something else as I2C bus drivers use different return values for NAK.
+ *
+ * However, if the transfer fails and ignore_nak is set, we know it is an error.
+ */
+ if (ret < 0 && st7571->ignore_nak)
+ return ret;
+
+ return 0;
+}
+
+/* The st7571 driver does not read registers but regmap expects a .read */
+static int st7571_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static int st7571_send_command_list(struct st7571_device *st7571,
+ const u8 *cmd_list, size_t len)
+{
+ int ret;
+
+ for (int i = 0; i < len; i++) {
+ ret = regmap_write(st7571->regmap, ST7571_COMMAND_MODE, cmd_list[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline u8 st7571_transform_xy(const char *p, int x, int y)
+{
+ int xrest = x % 8;
+ u8 result = 0;
+
+ /*
+ * Transforms an (x, y) pixel coordinate into a vertical 8-bit
+ * column from the framebuffer. It calculates the corresponding byte in the
+ * framebuffer, extracts the bit at the given x position across 8 consecutive
+ * rows, and packs those bits into a single byte.
+ *
+ * Return an 8-bit value representing a vertical column of pixels.
+ */
+ x = x / 8;
+ y = (y / 8) * 8;
+
+ for (int i = 0; i < 8; i++) {
+ int row_idx = y + i;
+ u8 byte = p[row_idx * 16 + x];
+ u8 bit = (byte >> xrest) & 1;
+
+ result |= (bit << i);
+ }
+
+ return result;
+}
+
+static int st7571_set_position(struct st7571_device *st7571, int x, int y)
+{
+ u8 cmd_list[] = {
+ ST7571_SET_COLUMN_LSB(x),
+ ST7571_SET_COLUMN_MSB(x),
+ ST7571_SET_PAGE(y / ST7571_PAGE_HEIGHT),
+ };
+
+ return st7571_send_command_list(st7571, cmd_list, ARRAY_SIZE(cmd_list));
+}
+
+static int st7571_fb_clear_screen(struct st7571_device *st7571)
+{
+ u32 npixels = st7571->ncols * round_up(st7571->nlines, ST7571_PAGE_HEIGHT) * st7571->bpp;
+ char pixelvalue = 0x00;
+
+ for (int i = 0; i < npixels; i++)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, &pixelvalue, 1);
+
+ return 0;
+}
+
+static void st7571_prepare_buffer_monochrome(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+ u32 size;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ iosys_map_set_vaddr(&dst, st7571->hwbuf);
+
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ break;
+
+ case DRM_FORMAT_R1:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+ }
+}
+
+static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ u32 size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ iosys_map_set_vaddr(&dst, st7571->hwbuf);
+
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ break;
+
+ case DRM_FORMAT_R1:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+
+ case DRM_FORMAT_R2:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 4;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+ };
+}
+
+static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct drm_rect *rect)
+{
+ struct st7571_device *st7571 = drm_to_st7571(fb->dev);
+ char *row = st7571->row;
+
+ /* Align y to display page boundaries */
+ rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT);
+ rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
+
+ for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
+ for (int x = rect->x1; x < rect->x2; x++)
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+
+ st7571_set_position(st7571, rect->x1, y);
+
+ /* TODO: Investige why we can't write multiple bytes at once */
+ for (int x = rect->x1; x < rect->x2; x++)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+ }
+
+ return 0;
+}
+
+static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct drm_rect *rect)
+{
+ struct st7571_device *st7571 = drm_to_st7571(fb->dev);
+ u32 format = fb->format->format;
+ char *row = st7571->row;
+ int x1;
+ int x2;
+
+ /* Align y to display page boundaries */
+ rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT);
+ rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ /* Threated as monochrome (R1) */
+ fallthrough;
+ case DRM_FORMAT_R1:
+ x1 = rect->x1;
+ x2 = rect->x2;
+ break;
+ case DRM_FORMAT_R2:
+ x1 = rect->x1 * 2;
+ x2 = rect->x2 * 2;
+ break;
+ }
+
+ for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
+ for (int x = x1; x < x2; x++)
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+
+ st7571_set_position(st7571, rect->x1, y);
+
+ /* TODO: Investige why we can't write multiple bytes at once */
+ for (int x = x1; x < x2; x++) {
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+
+ /*
+ * As the display supports grayscale, all pixels must be written as two bits
+ * even if the format is monochrome.
+ *
+ * The bit values maps to the following grayscale:
+ * 0 0 = White
+ * 0 1 = Light gray
+ * 1 0 = Dark gray
+ * 1 1 = Black
+ *
+ * For monochrome formats, write the same value twice to get
+ * either a black or white pixel.
+ */
+ if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int st7571_connector_get_modes(struct drm_connector *conn)
+{
+ struct st7571_device *st7571 = drm_to_st7571(conn->dev);
+
+ return drm_connector_helper_get_modes_fixed(conn, &st7571->mode);
+}
+
+static const struct drm_connector_helper_funcs st7571_connector_helper_funcs = {
+ .get_modes = st7571_connector_get_modes,
+};
+
+static const struct st7571_panel_format st7571_monochrome = {
+ .prepare_buffer = st7571_prepare_buffer_monochrome,
+ .update_rect = st7571_fb_update_rect_monochrome,
+ .mode = ST7571_COLOR_MODE_BLACKWHITE,
+ .formats = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_R1,
+ },
+ .nformats = 2,
+};
+
+static const struct st7571_panel_format st7571_grayscale = {
+ .prepare_buffer = st7571_prepare_buffer_grayscale,
+ .update_rect = st7571_fb_update_rect_grayscale,
+ .mode = ST7571_COLOR_MODE_GRAY,
+ .formats = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_R1,
+ DRM_FORMAT_R2,
+ },
+ .nformats = 3,
+};
+
+static const u64 st7571_primary_plane_fmtmods[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int st7571_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+
+static void st7571_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_device *dev = plane->dev;
+ struct drm_rect damage;
+ struct st7571_device *st7571 = drm_to_st7571(plane->dev);
+ int ret, idx;
+
+ if (!fb)
+ return; /* no framebuffer; plane is disabled */
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ st7571->pformat->prepare_buffer(st7571,
+ &shadow_plane_state->data[0],
+ fb, &damage,
+ &shadow_plane_state->fmtcnv_state);
+
+ st7571->pformat->update_rect(fb, &damage);
+ }
+
+ drm_dev_exit(idx);
+
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+
+static void st7571_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct st7571_device *st7571 = drm_to_st7571(plane->dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ st7571_fb_clear_screen(st7571);
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs st7571_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = st7571_primary_plane_helper_atomic_check,
+ .atomic_update = st7571_primary_plane_helper_atomic_update,
+ .atomic_disable = st7571_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs st7571_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+/*
+ * CRTC
+ */
+
+static enum drm_mode_status st7571_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct st7571_device *st7571 = drm_to_st7571(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &st7571->mode);
+}
+
+static const struct drm_crtc_helper_funcs st7571_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .mode_valid = st7571_crtc_mode_valid,
+};
+
+static const struct drm_crtc_funcs st7571_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+/*
+ * Encoder
+ */
+
+static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct st7571_device *st7571 = drm_to_st7571(drm);
+ u8 command = ST7571_DISPLAY_ON;
+ int ret;
+
+ ret = st7571->pdata->init(st7571);
+ if (ret)
+ return;
+
+ st7571_send_command_list(st7571, &command, 1);
+}
+
+static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct st7571_device *st7571 = drm_to_st7571(drm);
+ u8 command = ST7571_DISPLAY_OFF;
+
+ st7571_send_command_list(st7571, &command, 1);
+}
+
+static const struct drm_encoder_funcs st7571_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+
+};
+
+static const struct drm_encoder_helper_funcs st7571_encoder_helper_funcs = {
+ .atomic_enable = ssd130x_encoder_atomic_enable,
+ .atomic_disable = ssd130x_encoder_atomic_disable,
+};
+
+/*
+ * Connector
+ */
+
+static const struct drm_connector_funcs st7571_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_mode_config_funcs st7571_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static struct drm_display_mode st7571_mode(struct st7571_device *st7571)
+{
+ struct drm_display_mode mode = {
+ DRM_SIMPLE_MODE(st7571->ncols, st7571->nlines,
+ st7571->width_mm, st7571->height_mm),
+ };
+
+ return mode;
+}
+
+static int st7571_mode_config_init(struct st7571_device *st7571)
+{
+ struct drm_device *dev = &st7571->dev;
+ const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
+ dev->mode_config.min_width = constraints->min_ncols;
+ dev->mode_config.min_height = constraints->min_nlines;
+ dev->mode_config.max_width = constraints->max_ncols;
+ dev->mode_config.max_height = constraints->max_nlines;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.funcs = &st7571_mode_config_funcs;
+
+ return 0;
+}
+
+static int st7571_plane_init(struct st7571_device *st7571,
+ const struct st7571_panel_format *pformat)
+{
+ struct drm_plane *primary_plane = &st7571->primary_plane;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &st7571_primary_plane_funcs,
+ pformat->formats,
+ pformat->nformats,
+ st7571_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(primary_plane, &st7571_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ return 0;
+}
+
+static int st7571_crtc_init(struct st7571_device *st7571)
+{
+ struct drm_plane *primary_plane = &st7571->primary_plane;
+ struct drm_crtc *crtc = &st7571->crtc;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &st7571_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &st7571_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int st7571_encoder_init(struct st7571_device *st7571)
+{
+ struct drm_encoder *encoder = &st7571->encoder;
+ struct drm_crtc *crtc = &st7571->crtc;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &st7571_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &st7571_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ return 0;
+}
+
+static int st7571_connector_init(struct st7571_device *st7571)
+{
+ struct drm_connector *connector = &st7571->connector;
+ struct drm_encoder *encoder = &st7571->encoder;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_connector_init(dev, connector, &st7571_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &st7571_connector_helper_funcs);
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+DEFINE_DRM_GEM_FOPS(st7571_fops);
+
+static const struct drm_driver st7571_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .fops = &st7571_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+};
+
+static const struct regmap_bus st7571_regmap_bus = {
+ .read = st7571_regmap_read,
+ .write = st7571_regmap_write,
+};
+
+static const struct regmap_config st7571_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_write = true,
+};
+
+static int st7571_validate_parameters(struct st7571_device *st7571)
+{
+ struct device *dev = st7571->dev.dev;
+ const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints;
+
+ if (st7571->width_mm == 0) {
+ dev_err(dev, "Invalid panel width\n");
+ return -EINVAL;
+ }
+
+ if (st7571->height_mm == 0) {
+ dev_err(dev, "Invalid panel height\n");
+ return -EINVAL;
+ }
+
+ if (st7571->nlines < constraints->min_nlines ||
+ st7571->nlines > constraints->max_nlines) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->startline + st7571->nlines > constraints->max_nlines) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->ncols < constraints->min_ncols ||
+ st7571->ncols > constraints->max_ncols) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->grayscale && !constraints->support_grayscale) {
+ dev_err(dev, "Grayscale not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int st7571_parse_dt(struct st7571_device *st7571)
+{
+ struct device *dev = &st7571->client->dev;
+ struct device_node *np = dev->of_node;
+ struct display_timing dt;
+ int ret;
+
+ ret = of_get_display_timing(np, "panel-timing", &dt);
+ if (ret) {
+ dev_err(dev, "Failed to get display timing from DT\n");
+ return ret;
+ }
+
+ of_property_read_u32(np, "width-mm", &st7571->width_mm);
+ of_property_read_u32(np, "height-mm", &st7571->height_mm);
+ st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale");
+
+ if (st7571->grayscale) {
+ st7571->pformat = &st7571_grayscale;
+ st7571->bpp = 2;
+ } else {
+ st7571->pformat = &st7571_monochrome;
+ st7571->bpp = 1;
+ }
+
+ st7571->startline = dt.vfront_porch.typ;
+ st7571->nlines = dt.vactive.typ;
+ st7571->ncols = dt.hactive.typ;
+
+ st7571->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(st7571->reset))
+ return PTR_ERR(st7571->reset);
+
+ return 0;
+}
+
+static void st7571_reset(struct st7571_device *st7571)
+{
+ gpiod_set_value_cansleep(st7571->reset, 1);
+ fsleep(20);
+ gpiod_set_value_cansleep(st7571->reset, 0);
+}
+
+static int st7571_lcd_init(struct st7571_device *st7571)
+{
+ /*
+ * Most of the initialization sequence is taken directly from the
+ * referential initial code in the ST7571 datasheet.
+ */
+ u8 commands[] = {
+ ST7571_DISPLAY_OFF,
+
+ ST7571_SET_MODE_MSB,
+ ST7571_SET_MODE_LSB(0x2e),
+
+ ST7571_SET_SEG_SCAN_DIR(0),
+ ST7571_SET_COM_SCAN_DIR(1),
+
+ ST7571_SET_COM0_MSB,
+ ST7571_SET_COM0_LSB(0x00),
+
+ ST7571_SET_START_LINE_MSB,
+ ST7571_SET_START_LINE_LSB(st7571->startline),
+
+ ST7571_OSC_ON,
+ ST7571_SET_REGULATOR_REG(5),
+ ST7571_SET_CONTRAST_MSB,
+ ST7571_SET_CONTRAST_LSB(0x33),
+ ST7571_SET_LCD_BIAS(0x04),
+ ST7571_SET_DISPLAY_DUTY_MSB,
+ ST7571_SET_DISPLAY_DUTY_LSB(st7571->nlines),
+
+ ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */
+ ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
+ ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
+
+ ST7571_COMMAND_SET_3,
+ ST7571_SET_COLOR_MODE(st7571->pformat->mode),
+ ST7571_COMMAND_SET_NORMAL,
+
+ ST7571_SET_REVERSE(0),
+ ST7571_SET_ENTIRE_DISPLAY_ON(0),
+ };
+
+ /* Perform a reset before initializing the controller */
+ st7571_reset(st7571);
+
+ return st7571_send_command_list(st7571, commands, ARRAY_SIZE(commands));
+}
+
+static int st7571_probe(struct i2c_client *client)
+{
+ struct st7571_device *st7571;
+ struct drm_device *dev;
+ int ret;
+
+ st7571 = devm_drm_dev_alloc(&client->dev, &st7571_driver,
+ struct st7571_device, dev);
+ if (IS_ERR(st7571))
+ return PTR_ERR(st7571);
+
+ dev = &st7571->dev;
+ st7571->client = client;
+ i2c_set_clientdata(client, st7571);
+ st7571->pdata = device_get_match_data(&client->dev);
+
+ ret = st7571_parse_dt(st7571);
+ if (ret)
+ return ret;
+
+ ret = st7571_validate_parameters(st7571);
+ if (ret)
+ return ret;
+
+ st7571->mode = st7571_mode(st7571);
+
+ /*
+ * The hardware design could make it hard to detect a NAK on the I2C bus.
+ * If the adapter does not support protocol mangling do
+ * not set the I2C_M_IGNORE_NAK flag at the expense * of possible
+ * cruft in the logs.
+ */
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ st7571->ignore_nak = true;
+
+ st7571->regmap = devm_regmap_init(&client->dev, &st7571_regmap_bus,
+ client, &st7571_regmap_config);
+ if (IS_ERR(st7571->regmap)) {
+ return dev_err_probe(&client->dev, PTR_ERR(st7571->regmap),
+ "Failed to initialize regmap\n");
+ }
+
+ st7571->hwbuf = devm_kzalloc(&client->dev,
+ (st7571->nlines * st7571->ncols * st7571->bpp) / 8,
+ GFP_KERNEL);
+ if (!st7571->hwbuf)
+ return -ENOMEM;
+
+ st7571->row = devm_kzalloc(&client->dev,
+ (st7571->ncols * st7571->bpp),
+ GFP_KERNEL);
+ if (!st7571->row)
+ return -ENOMEM;
+
+ ret = st7571_mode_config_init(st7571);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize mode config\n");
+
+ ret = st7571_plane_init(st7571, st7571->pformat);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize primary plane\n");
+
+ ret = st7571_crtc_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize CRTC\n");
+
+ ret = st7571_encoder_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize encoder\n");
+
+ ret = st7571_connector_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize connector\n");
+
+ drm_mode_config_reset(dev);
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register DRM device\n");
+
+ drm_client_setup(dev, NULL);
+ return 0;
+}
+
+static void st7571_remove(struct i2c_client *client)
+{
+ struct st7571_device *st7571 = i2c_get_clientdata(client);
+
+ drm_dev_unplug(&st7571->dev);
+}
+
+struct st7571_panel_data st7571_config = {
+ .init = st7571_lcd_init,
+ .constraints = {
+ .min_nlines = 1,
+ .max_nlines = 128,
+ .min_ncols = 128,
+ .max_ncols = 128,
+ .support_grayscale = true,
+ },
+};
+
+static const struct of_device_id st7571_of_match[] = {
+ { .compatible = "sitronix,st7571", .data = &st7571_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st7571_of_match);
+
+static const struct i2c_device_id st7571_id[] = {
+ { "st7571", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, st7571_id);
+
+static struct i2c_driver st7571_i2c_driver = {
+ .driver = {
+ .name = "st7571",
+ .of_match_table = st7571_of_match,
+ },
+ .probe = st7571_probe,
+ .remove = st7571_remove,
+ .id_table = st7571_id,
+};
+
+module_i2c_driver(st7571_i2c_driver);
+
+MODULE_AUTHOR("Marcus Folkesson <marcus.folkesson@gmail.com>");
+MODULE_DESCRIPTION("DRM Driver for Sitronix ST7571 LCD controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/sitronix/st7586.c
index a29672d84ede..a29672d84ede 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/sitronix/st7586.c
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/sitronix/st7735r.c
index 1d60f6e5b3bc..1d60f6e5b3bc 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/sitronix/st7735r.c
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index cb2816985305..a3447622a33c 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -784,19 +784,12 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu,
{
struct platform_device *pdev = to_platform_device(dev);
struct dpu_context *ctx = &dpu->ctx;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
dev_err(dev, "failed to map dpu registers\n");
- return -EFAULT;
+ return PTR_ERR(ctx->base);
}
ctx->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index 8fc26479bb6b..23b0e1dc547a 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -901,18 +901,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi,
{
struct platform_device *pdev = to_platform_device(dev);
struct dsi_context *ctx = &dsi->ctx;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
drm_err(dsi->drm, "failed to map dsi host registers\n");
- return -ENXIO;
+ return PTR_ERR(ctx->base);
}
ctx->regmap = devm_regmap_init(dev, &regmap_tst_io, dsi, &byte_config);
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 063f82d23d80..8c529b0cca8b 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -177,7 +177,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct device_node *vtg_np;
struct sti_compositor *compo;
- struct resource *res;
unsigned int i;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
@@ -194,17 +193,10 @@ static int sti_compositor_probe(struct platform_device *pdev)
memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
sizeof(struct sti_compositor_data));
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- compo->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (compo->regs == NULL) {
+ compo->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(compo->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(compo->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 4dcddd02629b..74a1eef4674e 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -511,7 +511,6 @@ static int sti_dvo_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_dvo *dvo;
- struct resource *res;
struct device_node *np = dev->of_node;
DRM_INFO("%s\n", __func__);
@@ -523,16 +522,9 @@ static int sti_dvo_probe(struct platform_device *pdev)
}
dvo->dev = pdev->dev;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg");
- if (!res) {
- DRM_ERROR("Invalid dvo resource\n");
- return -ENOMEM;
- }
- dvo->regs = devm_ioremap(dev, res->start,
- resource_size(res));
- if (!dvo->regs)
- return -ENOMEM;
+ dvo->regs = devm_platform_ioremap_resource_byname(pdev, "dvo-reg");
+ if (IS_ERR(dvo->regs))
+ return PTR_ERR(dvo->regs);
dvo->clk_pix = devm_clk_get(dev, "dvo_pix");
if (IS_ERR(dvo->clk_pix)) {
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 14fdc00d2ba0..d202b6c1eb8f 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -693,7 +693,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
connector->hda = hda;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
@@ -750,16 +750,9 @@ static int sti_hda_probe(struct platform_device *pdev)
return -ENOMEM;
hda->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
- if (!res) {
- DRM_ERROR("Invalid hda resource\n");
- return -ENOMEM;
- }
- hda->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hda->regs)
- return -ENOMEM;
+ hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg");
+ if (IS_ERR(hda->regs))
+ return PTR_ERR(hda->regs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 164a34d793d8..37b8d619066e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1380,7 +1380,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sti_hdmi *hdmi;
struct device_node *np = dev->of_node;
- struct resource *res;
struct device_node *ddc;
int ret;
@@ -1399,17 +1398,9 @@ static int sti_hdmi_probe(struct platform_device *pdev)
}
hdmi->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
- if (!res) {
- DRM_ERROR("Invalid hdmi resource\n");
- ret = -ENOMEM;
- goto release_adapter;
- }
- hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hdmi->regs) {
- ret = -ENOMEM;
+ hdmi->regs = devm_platform_ioremap_resource_byname(pdev, "hdmi-reg");
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
goto release_adapter;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 0f658709c9d0..03684062309b 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1356,7 +1356,6 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *vtg_np;
struct sti_hqvdp *hqvdp;
- struct resource *res;
DRM_DEBUG_DRIVER("\n");
@@ -1367,17 +1366,10 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
}
hqvdp->dev = dev;
-
- /* Get Memory resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hqvdp->regs) {
+ hqvdp->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hqvdp->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(hqvdp->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index af6c06f448c4..6a464b035de8 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -838,7 +838,6 @@ static int sti_tvout_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct sti_tvout *tvout;
- struct resource *res;
DRM_INFO("%s\n", __func__);
@@ -850,16 +849,9 @@ static int sti_tvout_probe(struct platform_device *pdev)
return -ENOMEM;
tvout->dev = dev;
-
- /* get memory resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
- if (!res) {
- DRM_ERROR("Invalid glue resource\n");
- return -ENOMEM;
- }
- tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!tvout->regs)
- return -ENOMEM;
+ tvout->regs = devm_platform_ioremap_resource_byname(pdev, "tvout-reg");
+ if (IS_ERR(tvout->regs))
+ return PTR_ERR(tvout->regs);
/* get reset resources */
tvout->reset = devm_reset_control_get(dev, "tvout");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 5ba469b711b5..ee81691b3203 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -380,23 +380,15 @@ static int vtg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_vtg *vtg;
- struct resource *res;
int ret;
vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
if (!vtg)
return -ENOMEM;
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENOMEM;
- }
- vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vtg->regs) {
+ vtg->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vtg->regs)) {
DRM_ERROR("failed to remap I/O memory\n");
- return -ENOMEM;
+ return PTR_ERR(vtg->regs);
}
vtg->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 4613e8e3b8fd..a3ae9a93ce66 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -934,28 +934,27 @@ static const struct drm_connector_funcs lvds_conn_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int lvds_attach(struct drm_bridge *bridge,
+static int lvds_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct stm_lvds *lvds = bridge_to_stm_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
- struct drm_encoder *encoder = bridge->encoder;
int ret;
- if (!bridge->encoder) {
+ if (!encoder) {
drm_err(bridge->dev, "Parent encoder object not found\n");
return -ENODEV;
}
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
/* No cloning support */
- bridge->encoder->possible_clones = 0;
+ encoder->possible_clones = 0;
/* If we have a next bridge just attach it. */
if (lvds->next_bridge)
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge,
bridge, flags);
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
diff --git a/drivers/gpu/drm/sysfb/Kconfig b/drivers/gpu/drm/sysfb/Kconfig
new file mode 100644
index 000000000000..9c9884c7efc6
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Kconfig
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "Drivers for system framebuffers"
+ depends on DRM
+
+config DRM_SYSFB_HELPER
+ tristate
+ depends on DRM
+
+config DRM_EFIDRM
+ tristate "EFI framebuffer driver"
+ depends on DRM && MMU && EFI && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for EFI framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via EFI interfaces.
+
+config DRM_OFDRM
+ tristate "Open Firmware display driver"
+ depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for Open Firmware framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the Open Firmware before the kernel boots. Scanout buffer, size,
+ and display format must be provided via device tree.
+
+config DRM_SIMPLEDRM
+ tristate "Simple framebuffer driver"
+ depends on DRM && MMU
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for simple platform-provided framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via device tree,
+ UEFI, VESA, etc.
+
+ On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
+ to use UEFI and VESA framebuffers.
+
+config DRM_VESADRM
+ tristate "VESA framebuffer driver"
+ depends on DRM && MMU && X86 && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for VESA framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via VBE interfaces.
+
+endmenu
diff --git a/drivers/gpu/drm/sysfb/Makefile b/drivers/gpu/drm/sysfb/Makefile
new file mode 100644
index 000000000000..a156c496413d
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+drm_sysfb_helper-y := \
+ drm_sysfb.o \
+ drm_sysfb_modeset.o
+drm_sysfb_helper-$(CONFIG_SCREEN_INFO) += drm_sysfb_screen_info.o
+obj-$(CONFIG_DRM_SYSFB_HELPER) += drm_sysfb_helper.o
+
+obj-$(CONFIG_DRM_EFIDRM) += efidrm.o
+obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
+obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
+obj-$(CONFIG_DRM_VESADRM) += vesadrm.o
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb.c b/drivers/gpu/drm/sysfb/drm_sysfb.c
new file mode 100644
index 000000000000..308f82153b15
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+
+#include <drm/drm_print.h>
+
+#include "drm_sysfb_helper.h"
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (value > min(max, INT_MAX)) {
+ drm_warn(dev, "%s of %llu exceeds maximum of %u\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+EXPORT_SYMBOL(drm_sysfb_get_validated_int);
+
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (!value) {
+ drm_warn(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ }
+ return drm_sysfb_get_validated_int(dev, name, value, max);
+}
+EXPORT_SYMBOL(drm_sysfb_get_validated_int0);
+
+MODULE_DESCRIPTION("Helpers for DRM sysfb drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
new file mode 100644
index 000000000000..cb08a88242cc
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef DRM_SYSFB_HELPER_H
+#define DRM_SYSFB_HELPER_H
+
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
+
+#include <video/pixel_format.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modes.h>
+
+struct drm_format_info;
+struct drm_scanout_buffer;
+struct screen_info;
+
+/*
+ * Input parsing
+ */
+
+struct drm_sysfb_format {
+ struct pixel_format pixel;
+ u32 fourcc;
+};
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+
+#if defined(CONFIG_SCREEN_INFO)
+int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si);
+int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si);
+struct resource *drm_sysfb_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res);
+int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size);
+u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size);
+const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
+ const struct drm_sysfb_format *formats,
+ size_t nformats,
+ const struct screen_info *si);
+#endif
+
+/*
+ * Input parsing
+ */
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+
+/*
+ * Display modes
+ */
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm);
+
+/*
+ * Device
+ */
+
+struct drm_sysfb_device {
+ struct drm_device dev;
+
+ const u8 *edid; /* can be NULL */
+
+ /* hardware settings */
+ struct drm_display_mode fb_mode;
+ const struct drm_format_info *fb_format;
+ unsigned int fb_pitch;
+ unsigned int fb_gamma_lut_size;
+
+ /* hardware-framebuffer kernel address */
+ struct iosys_map fb_addr;
+};
+
+static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *dev)
+{
+ return container_of(dev, struct drm_sysfb_device, dev);
+}
+
+/*
+ * Plane
+ */
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state);
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
+#define DRM_SYSFB_PLANE_NFORMATS(_num_native) \
+ ((_num_native) + 1)
+
+#define DRM_SYSFB_PLANE_FORMAT_MODIFIERS \
+ DRM_FORMAT_MOD_LINEAR, \
+ DRM_FORMAT_MOD_INVALID
+
+#define DRM_SYSFB_PLANE_HELPER_FUNCS \
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .atomic_check = drm_sysfb_plane_helper_atomic_check, \
+ .atomic_update = drm_sysfb_plane_helper_atomic_update, \
+ .atomic_disable = drm_sysfb_plane_helper_atomic_disable, \
+ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer
+
+#define DRM_SYSFB_PLANE_FUNCS \
+ .update_plane = drm_atomic_helper_update_plane, \
+ .disable_plane = drm_atomic_helper_disable_plane, \
+ DRM_GEM_SHADOW_PLANE_FUNCS
+
+/*
+ * CRTC
+ */
+
+struct drm_sysfb_crtc_state {
+ struct drm_crtc_state base;
+
+ /* Primary-plane format; required for color mgmt. */
+ const struct drm_format_info *format;
+};
+
+static inline struct drm_sysfb_crtc_state *
+to_drm_sysfb_crtc_state(struct drm_crtc_state *base)
+{
+ return container_of(base, struct drm_sysfb_crtc_state, base);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+
+#define DRM_SYSFB_CRTC_HELPER_FUNCS \
+ .mode_valid = drm_sysfb_crtc_helper_mode_valid, \
+ .atomic_check = drm_sysfb_crtc_helper_atomic_check
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+
+#define DRM_SYSFB_CRTC_FUNCS \
+ .reset = drm_sysfb_crtc_reset, \
+ .set_config = drm_atomic_helper_set_config, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = drm_sysfb_crtc_atomic_duplicate_state, \
+ .atomic_destroy_state = drm_sysfb_crtc_atomic_destroy_state
+
+/*
+ * Connector
+ */
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector);
+
+#define DRM_SYSFB_CONNECTOR_HELPER_FUNCS \
+ .get_modes = drm_sysfb_connector_helper_get_modes
+
+#define DRM_SYSFB_CONNECTOR_FUNCS \
+ .reset = drm_atomic_helper_connector_reset, \
+ .fill_modes = drm_helper_probe_single_connector_modes, \
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+
+/*
+ * Mode config
+ */
+
+#define DRM_SYSFB_MODE_CONFIG_FUNCS \
+ .fb_create = drm_gem_fb_create_with_dirty, \
+ .atomic_check = drm_atomic_helper_check, \
+ .atomic_commit = drm_atomic_helper_commit
+
+#endif
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
new file mode 100644
index 000000000000..ffaa2522ab96
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "drm_sysfb_helper.h"
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm)
+{
+ /*
+ * Assume a monitor resolution of 96 dpi to
+ * get a somewhat reasonable screen size.
+ */
+ if (!width_mm)
+ width_mm = DRM_MODE_RES_MM(width, 96ul);
+ if (!height_mm)
+ height_mm = DRM_MODE_RES_MM(height, 96ul);
+
+ {
+ const struct drm_display_mode mode = {
+ DRM_MODE_INIT(60, width, height, width_mm, height_mm)
+ };
+
+ return mode;
+ }
+}
+EXPORT_SYMBOL(drm_sysfb_mode);
+
+/*
+ * Plane
+ */
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_shadow_plane_state *new_shadow_plane_state =
+ to_drm_shadow_plane_state(new_plane_state);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ if (new_fb->format != sysfb->fb_format) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
+ sysfb->fb_pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+ new_sysfb_crtc_state->format = new_fb->format;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_check);
+
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_rect dst_clip = plane_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
+ drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
+ &damage, &shadow_plane_state->fmtcnv_state);
+ }
+
+ drm_dev_exit(idx);
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_update);
+
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_rect dst_clip;
+ unsigned long lines, linepixels, i;
+ int idx;
+
+ drm_rect_init(&dst_clip,
+ plane_state->src_x >> 16, plane_state->src_y >> 16,
+ plane_state->src_w >> 16, plane_state->src_h >> 16);
+
+ lines = drm_rect_height(&dst_clip);
+ linepixels = drm_rect_width(&dst_clip);
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ /* Clear buffer to black if disabled */
+ dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
+ for (i = 0; i < lines; ++i) {
+ memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
+ dst_vmap += dst_pitch;
+ }
+
+ drm_dev_exit(idx);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_disable);
+
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+
+ sb->width = sysfb->fb_mode.hdisplay;
+ sb->height = sysfb->fb_mode.vdisplay;
+ sb->format = sysfb->fb_format;
+ sb->pitch[0] = sysfb->fb_pitch;
+ sb->map[0] = sysfb->fb_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer);
+
+/*
+ * CRTC
+ */
+
+static void drm_sysfb_crtc_state_destroy(struct drm_sysfb_crtc_state *sysfb_crtc_state)
+{
+ __drm_atomic_helper_crtc_destroy_state(&sysfb_crtc_state->base);
+
+ kfree(sysfb_crtc_state);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_mode_valid);
+
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
+
+ if (!new_crtc_state->enable)
+ return 0;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
+ if (ret)
+ return ret;
+
+ if (new_crtc_state->color_mgmt_changed) {
+ const size_t gamma_lut_length =
+ sysfb->fb_gamma_lut_size * sizeof(struct drm_color_lut);
+ const struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
+
+ if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
+ drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_atomic_check);
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (crtc->state)
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc->state));
+
+ sysfb_crtc_state = kzalloc(sizeof(*sysfb_crtc_state), GFP_KERNEL);
+ if (sysfb_crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, &sysfb_crtc_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_reset);
+
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (drm_WARN_ON(dev, !crtc_state))
+ return NULL;
+
+ new_sysfb_crtc_state = kzalloc(sizeof(*new_sysfb_crtc_state), GFP_KERNEL);
+ if (!new_sysfb_crtc_state)
+ return NULL;
+
+ sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new_sysfb_crtc_state->base);
+ new_sysfb_crtc_state->format = sysfb_crtc_state->format;
+
+ return &new_sysfb_crtc_state->base;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_duplicate_state);
+
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
+{
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc_state));
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_destroy_state);
+
+/*
+ * Connector
+ */
+
+static int drm_sysfb_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct drm_sysfb_device *sysfb = data;
+ const u8 *edid = sysfb->edid;
+ size_t off = block * EDID_LENGTH;
+ size_t end = off + len;
+
+ if (!edid)
+ return -EINVAL;
+ if (end > EDID_LENGTH)
+ return -EINVAL;
+ memcpy(buf, &edid[off], len);
+
+ /*
+ * We don't have EDID extensions available and reporting them
+ * will upset DRM helpers. Thus clear the extension field and
+ * update the checksum. Adding the extension flag to the checksum
+ * does this.
+ */
+ buf[127] += buf[126];
+ buf[126] = 0;
+
+ return 0;
+}
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(connector->dev);
+ const struct drm_edid *drm_edid;
+
+ if (sysfb->edid) {
+ drm_edid = drm_edid_read_custom(connector, drm_sysfb_get_edid_block, sysfb);
+ drm_edid_connector_update(connector, drm_edid);
+ drm_edid_free(drm_edid);
+ }
+
+ /* Return the fixed mode even with EDID */
+ return drm_connector_helper_get_modes_fixed(connector, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_connector_helper_get_modes);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
new file mode 100644
index 000000000000..0b3fb874a51f
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/screen_info.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+
+#include "drm_sysfb_helper.h"
+
+static s64 drm_sysfb_get_validated_size0(struct drm_device *dev, const char *name,
+ u64 value, u64 max)
+{
+ if (!value) {
+ drm_warn(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ } else if (value > min(max, S64_MAX)) {
+ drm_warn(dev, "%s of %llu exceeds maximum of %llu\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+
+int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return drm_sysfb_get_validated_int0(dev, "width", si->lfb_width, U16_MAX);
+}
+EXPORT_SYMBOL(drm_sysfb_get_width_si);
+
+int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return drm_sysfb_get_validated_int0(dev, "height", si->lfb_height, U16_MAX);
+}
+EXPORT_SYMBOL(drm_sysfb_get_height_si);
+
+struct resource *drm_sysfb_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res)
+{
+ ssize_t num;
+
+ num = screen_info_resources(si, res, 1);
+ if (!num) {
+ drm_warn(dev, "memory resource not found\n");
+ return NULL;
+ }
+
+ return res;
+}
+EXPORT_SYMBOL(drm_sysfb_get_memory_si);
+
+int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size)
+{
+ u64 lfb_linelength = si->lfb_linelength;
+
+ if (!lfb_linelength)
+ lfb_linelength = drm_format_info_min_pitch(format, 0, width);
+
+ return drm_sysfb_get_validated_int0(dev, "stride", lfb_linelength, div64_u64(size, height));
+}
+EXPORT_SYMBOL(drm_sysfb_get_stride_si);
+
+u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size)
+{
+ u64 vsize = PAGE_ALIGN(height * stride);
+
+ return drm_sysfb_get_validated_size0(dev, "visible size", vsize, size);
+}
+EXPORT_SYMBOL(drm_sysfb_get_visible_size_si);
+
+const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
+ const struct drm_sysfb_format *formats,
+ size_t nformats,
+ const struct screen_info *si)
+{
+ const struct drm_format_info *format = NULL;
+ u32 bits_per_pixel;
+ size_t i;
+
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+
+ for (i = 0; i < nformats; ++i) {
+ const struct pixel_format *f = &formats[i].pixel;
+
+ if (bits_per_pixel == f->bits_per_pixel &&
+ si->red_size == f->red.length &&
+ si->red_pos == f->red.offset &&
+ si->green_size == f->green.length &&
+ si->green_pos == f->green.offset &&
+ si->blue_size == f->blue.length &&
+ si->blue_pos == f->blue.offset) {
+ format = drm_format_info(formats[i].fourcc);
+ break;
+ }
+ }
+
+ if (!format)
+ drm_warn(dev, "No compatible color format found\n");
+
+ return format;
+}
+EXPORT_SYMBOL(drm_sysfb_get_format_si);
diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c
new file mode 100644
index 000000000000..46912924636a
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/efidrm.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/efi.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "efidrm"
+#define DRIVER_DESC "DRM driver for EFI platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static const struct drm_format_info *efidrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct drm_sysfb_format formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ { PIXEL_FORMAT_XRGB2101010, DRM_FORMAT_XRGB2101010, },
+ };
+
+ return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
+}
+
+static u64 efidrm_get_mem_flags(struct drm_device *dev, resource_size_t start,
+ resource_size_t len)
+{
+ u64 attribute = EFI_MEMORY_UC | EFI_MEMORY_WC |
+ EFI_MEMORY_WT | EFI_MEMORY_WB;
+ u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
+ resource_size_t end = start + len;
+ efi_memory_desc_t md;
+ u64 md_end;
+
+ if (!efi_enabled(EFI_MEMMAP) || efi_mem_desc_lookup(start, &md))
+ goto out;
+
+ md_end = md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT);
+ if (end > md_end)
+ goto out;
+
+ attribute &= md.attribute;
+ if (attribute) {
+ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+ mem_flags &= attribute;
+ }
+
+out:
+ return mem_flags;
+}
+
+/*
+ * EFI device
+ */
+
+struct efidrm_device {
+ struct drm_sysfb_device sysfb;
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+/*
+ * Modesetting
+ */
+
+static const u64 efidrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs efidrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs efidrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs efidrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs efidrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs efidrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs efidrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs efidrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs efidrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize, mem_flags;
+ struct resource resbuf;
+ struct resource *res;
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base = NULL;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_EFI)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * EFI DRM driver
+ */
+
+ efi = devm_drm_dev_alloc(&pdev->dev, drv, struct efidrm_device, sysfb.dev);
+ if (IS_ERR(efi))
+ return ERR_CAST(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = efidrm_get_format_si(dev, si);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ width = drm_sysfb_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = drm_sysfb_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = drm_sysfb_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+#ifdef CONFIG_X86
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ mem_flags = efidrm_get_mem_flags(dev, res->start, vsize);
+
+ if (mem_flags & EFI_MEMORY_WC)
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_UC)
+ screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_WT)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WT);
+ else if (mem_flags & EFI_MEMORY_WB)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WB);
+ else
+ drm_err(dev, "invalid mem_flags: 0x%llx\n", mem_flags);
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &efidrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ efi->formats, ARRAY_SIZE(efi->formats));
+
+ primary_plane = &efi->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &efidrm_primary_plane_funcs,
+ efi->formats, nformats,
+ efidrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &efidrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &efi->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &efidrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &efidrm_crtc_helper_funcs);
+
+ /* Encoder */
+
+ encoder = &efi->encoder;
+ ret = drm_encoder_init(dev, encoder, &efidrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &efi->connector;
+ ret = drm_connector_init(dev, connector, &efidrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &efidrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return efi;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(efidrm_fops);
+
+static struct drm_driver efidrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &efidrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int efidrm_probe(struct platform_device *pdev)
+{
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ efi = efidrm_device_create(&efidrm_driver, pdev);
+ if (IS_ERR(efi))
+ return PTR_ERR(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void efidrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver efidrm_platform_driver = {
+ .driver = {
+ .name = "efi-framebuffer",
+ },
+ .probe = efidrm_probe,
+ .remove = efidrm_remove,
+};
+
+module_platform_driver(efidrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c
index 13491c0e704a..fddfe8bea9f7 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/sysfb/ofdrm.c
@@ -12,6 +12,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
@@ -21,7 +22,8 @@
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+
+#include "drm_sysfb_helper.h"
#define DRIVER_NAME "ofdrm"
#define DRIVER_DESC "DRM driver for OF platform devices"
@@ -76,20 +78,12 @@ enum ofdrm_model {
static int display_get_validated_int(struct drm_device *dev, const char *name, uint32_t value)
{
- if (value > INT_MAX) {
- drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
- return -EINVAL;
- }
- return (int)value;
+ return drm_sysfb_get_validated_int(dev, name, value, INT_MAX);
}
static int display_get_validated_int0(struct drm_device *dev, const char *name, uint32_t value)
{
- if (!value) {
- drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
- return -EINVAL;
- }
- return display_get_validated_int(dev, name, value);
+ return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX);
}
static const struct drm_format_info *display_get_validated_format(struct drm_device *dev,
@@ -226,6 +220,16 @@ static u64 display_get_address_of(struct drm_device *dev, struct device_node *of
return address;
}
+static const u8 *display_get_edid_of(struct drm_device *dev, struct device_node *of_node,
+ u8 buf[EDID_LENGTH])
+{
+ int ret = of_property_read_u8_array(of_node, "EDID", buf, EDID_LENGTH);
+
+ if (ret)
+ return NULL;
+ return buf;
+}
+
static bool is_avivo(u32 vendor, u32 device)
{
/* This will match most R5xx */
@@ -290,22 +294,17 @@ struct ofdrm_device_funcs {
};
struct ofdrm_device {
- struct drm_device dev;
- struct platform_device *pdev;
+ struct drm_sysfb_device sysfb;
const struct ofdrm_device_funcs *funcs;
- /* firmware-buffer settings */
- struct iosys_map screen_base;
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
/* colormap */
void __iomem *cmap_base;
+ u8 edid[EDID_LENGTH];
+
/* modesetting */
- uint32_t formats[8];
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
@@ -314,7 +313,7 @@ struct ofdrm_device {
static struct ofdrm_device *ofdrm_device_of_dev(struct drm_device *dev)
{
- return container_of(dev, struct ofdrm_device, dev);
+ return container_of(to_drm_sysfb_device(dev), struct ofdrm_device, sysfb);
}
/*
@@ -354,7 +353,7 @@ static void ofdrm_pci_release(void *data)
static int ofdrm_device_init_pci(struct ofdrm_device *odev)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct pci_dev *pcidev;
@@ -397,7 +396,7 @@ static int ofdrm_device_init_pci(struct ofdrm_device *odev)
static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
struct resource *fb_res)
{
- struct platform_device *pdev = to_platform_device(odev->dev.dev);
+ struct platform_device *pdev = to_platform_device(odev->sysfb.dev.dev);
struct resource *res, *max_res = NULL;
u32 i;
@@ -423,7 +422,7 @@ static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct device_node *of_node,
int bar_no, unsigned long offset, unsigned long size)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
const __be32 *addr_p;
u64 max_size, address;
unsigned int flags;
@@ -456,7 +455,7 @@ static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -618,7 +617,7 @@ static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev,
cpu_to_be32(0x00),
};
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -648,7 +647,7 @@ static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index
static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
const struct drm_format_info *format)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
int i;
switch (format->format) {
@@ -687,7 +686,7 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
int i;
switch (format->format) {
@@ -731,205 +730,27 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
* Modesetting
*/
-struct ofdrm_crtc_state {
- struct drm_crtc_state base;
-
- /* Primary-plane format; required for color mgmt. */
- const struct drm_format_info *format;
+static const u64 ofdrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static struct ofdrm_crtc_state *to_ofdrm_crtc_state(struct drm_crtc_state *base)
-{
- return container_of(base, struct ofdrm_crtc_state, base);
-}
-
-static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
-{
- __drm_atomic_helper_crtc_destroy_state(&ofdrm_crtc_state->base);
- kfree(ofdrm_crtc_state);
-}
-
-static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *new_state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != odev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- odev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
- new_ofdrm_crtc_state->format = new_fb->format;
-
- return 0;
-}
-
-static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct iosys_map dst = odev->screen_base;
- struct drm_rect dst_clip = plane_state->dst;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
- drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void ofdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct iosys_map dst = odev->screen_base;
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_rect dst_clip;
- unsigned long lines, linepixels, i;
- int idx;
-
- drm_rect_init(&dst_clip,
- plane_state->src_x >> 16, plane_state->src_y >> 16,
- plane_state->src_w >> 16, plane_state->src_h >> 16);
-
- lines = drm_rect_height(&dst_clip);
- linepixels = drm_rect_width(&dst_clip);
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear buffer to black if disabled */
- dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
- for (i = 0; i < lines; ++i) {
- memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
- dst_vmap += dst_pitch;
- }
-
- drm_dev_exit(idx);
-}
-
static const struct drm_plane_helper_funcs ofdrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = ofdrm_primary_plane_helper_atomic_check,
- .atomic_update = ofdrm_primary_plane_helper_atomic_update,
- .atomic_disable = ofdrm_primary_plane_helper_atomic_disable,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs ofdrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status ofdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &odev->mode);
-}
-
-static int ofdrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *new_state)
-{
- static const size_t gamma_lut_length = OFDRM_GAMMA_LUT_SIZE * sizeof(struct drm_color_lut);
-
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
- int ret;
-
- if (!new_crtc_state->enable)
- return 0;
-
- ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
- if (ret)
- return ret;
-
- if (new_crtc_state->color_mgmt_changed) {
- struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
-
- if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
- drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- struct ofdrm_crtc_state *ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
- const struct drm_format_info *format = ofdrm_crtc_state->format;
+ const struct drm_format_info *format = sysfb_crtc_state->format;
if (crtc_state->gamma_lut)
ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
@@ -938,91 +759,31 @@ static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_ato
}
}
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs ofdrm_crtc_helper_funcs = {
- .mode_valid = ofdrm_crtc_helper_mode_valid,
- .atomic_check = ofdrm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
.atomic_flush = ofdrm_crtc_helper_atomic_flush,
};
-static void ofdrm_crtc_reset(struct drm_crtc *crtc)
-{
- struct ofdrm_crtc_state *ofdrm_crtc_state =
- kzalloc(sizeof(*ofdrm_crtc_state), GFP_KERNEL);
-
- if (crtc->state)
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc->state));
-
- if (ofdrm_crtc_state)
- __drm_atomic_helper_crtc_reset(crtc, &ofdrm_crtc_state->base);
- else
- __drm_atomic_helper_crtc_reset(crtc, NULL);
-}
-
-static struct drm_crtc_state *ofdrm_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *crtc_state = crtc->state;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- struct ofdrm_crtc_state *ofdrm_crtc_state;
-
- if (drm_WARN_ON(dev, !crtc_state))
- return NULL;
-
- new_ofdrm_crtc_state = kzalloc(sizeof(*new_ofdrm_crtc_state), GFP_KERNEL);
- if (!new_ofdrm_crtc_state)
- return NULL;
-
- ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
-
- __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ofdrm_crtc_state->base);
- new_ofdrm_crtc_state->format = ofdrm_crtc_state->format;
-
- return &new_ofdrm_crtc_state->base;
-}
-
-static void ofdrm_crtc_atomic_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
-{
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc_state));
-}
-
static const struct drm_crtc_funcs ofdrm_crtc_funcs = {
- .reset = ofdrm_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = ofdrm_crtc_atomic_duplicate_state,
- .atomic_destroy_state = ofdrm_crtc_atomic_destroy_state,
};
-static int ofdrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &odev->mode);
-}
+static const struct drm_encoder_funcs ofdrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
static const struct drm_connector_helper_funcs ofdrm_connector_helper_funcs = {
- .get_modes = ofdrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs ofdrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs ofdrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
@@ -1072,32 +833,19 @@ static const struct ofdrm_device_funcs ofdrm_qemu_device_funcs = {
.cmap_write = ofdrm_qemu_cmap_write,
};
-static struct drm_display_mode ofdrm_mode(unsigned int width, unsigned int height)
-{
- /*
- * Assume a monitor resolution of 96 dpi to
- * get a somewhat reasonable screen size.
- */
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height,
- DRM_MODE_RES_MM(width, 96ul),
- DRM_MODE_RES_MM(height, 96ul))
- };
-
- return mode;
-}
-
static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
struct device_node *of_node = pdev->dev.of_node;
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
enum ofdrm_model model;
bool big_endian;
int width, height, depth, linebytes;
const struct drm_format_info *format;
u64 address;
+ const u8 *edid;
resource_size_t fb_size, fb_base, fb_pgbase, fb_pgsize;
struct resource *res, *mem;
void __iomem *screen_base;
@@ -1109,10 +857,11 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, dev);
+ odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, sysfb.dev);
if (IS_ERR(odev))
return ERR_CAST(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, dev);
ret = ofdrm_device_init_pci(odev);
@@ -1246,16 +995,22 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
}
}
+ /* EDID is optional */
+ edid = display_get_edid_of(dev, of_node, odev->edid);
+
/*
* Firmware framebuffer
*/
- iosys_map_set_vaddr_iomem(&odev->screen_base, screen_base);
- odev->mode = ofdrm_mode(width, height);
- odev->format = format;
- odev->pitch = linebytes;
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = linebytes;
+ if (odev->cmap_base)
+ sysfb->fb_gamma_lut_size = OFDRM_GAMMA_LUT_SIZE;
+ sysfb->edid = edid;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&odev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, linebytes=%d byte\n",
&format->format, width, height, linebytes);
@@ -1302,15 +1057,16 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &ofdrm_crtc_helper_funcs);
- if (odev->cmap_base) {
- drm_mode_crtc_set_gamma_size(crtc, OFDRM_GAMMA_LUT_SIZE);
- drm_crtc_enable_color_mgmt(crtc, 0, false, OFDRM_GAMMA_LUT_SIZE);
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
}
/* Encoder */
encoder = &odev->encoder;
- ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
+ ret = drm_encoder_init(dev, encoder, &ofdrm_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ERR_PTR(ret);
encoder->possible_crtcs = drm_crtc_mask(crtc);
@@ -1326,6 +1082,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
drm_connector_set_panel_orientation_with_quirk(connector,
DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
width, height);
+ if (edid)
+ drm_connector_attach_edid_property(connector);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
@@ -1360,19 +1118,21 @@ static struct drm_driver ofdrm_driver = {
static int ofdrm_probe(struct platform_device *pdev)
{
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
odev = ofdrm_device_create(&ofdrm_driver, pdev);
if (IS_ERR(odev))
return PTR_ERR(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, odev->format);
+ drm_client_setup(dev, sysfb->fb_format);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index 5d9ab8adf800..a1c3119330de 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -14,7 +14,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -26,9 +25,10 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
+#include "drm_sysfb_helper.h"
+
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
#define DRIVER_MAJOR 1
@@ -42,24 +42,14 @@ static int
simplefb_get_validated_int(struct drm_device *dev, const char *name,
uint32_t value)
{
- if (value > INT_MAX) {
- drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
- name, value);
- return -EINVAL;
- }
- return (int)value;
+ return drm_sysfb_get_validated_int(dev, name, value, INT_MAX);
}
static int
simplefb_get_validated_int0(struct drm_device *dev, const char *name,
uint32_t value)
{
- if (!value) {
- drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
- name, value);
- return -EINVAL;
- }
- return simplefb_get_validated_int(dev, name, value);
+ return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX);
}
static const struct drm_format_info *
@@ -217,7 +207,7 @@ simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
*/
struct simpledrm_device {
- struct drm_device dev;
+ struct drm_sysfb_device sysfb;
/* clocks */
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
@@ -236,28 +226,14 @@ struct simpledrm_device {
struct device_link **pwr_dom_links;
#endif
- /* simplefb settings */
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
- /* memory management */
- struct iosys_map screen_base;
-
/* modesetting */
- uint32_t formats[8];
- size_t nformats;
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
};
-static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
-{
- return container_of(dev, struct simpledrm_device, dev);
-}
-
/*
* Hardware
*/
@@ -284,7 +260,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
static void simpledrm_device_release_clocks(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->clk_count; ++i) {
@@ -297,7 +273,7 @@ static void simpledrm_device_release_clocks(void *res)
static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct clk *clock;
@@ -382,7 +358,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
static void simpledrm_device_release_regulators(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->regulator_count; ++i) {
@@ -395,7 +371,7 @@ static void simpledrm_device_release_regulators(void *res)
static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct property *prop;
@@ -516,7 +492,7 @@ static void simpledrm_device_detach_genpd(void *res)
static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
{
- struct device *dev = sdev->dev.dev;
+ struct device *dev = sdev->sysfb.dev.dev;
int i;
sdev->pwr_dom_count = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -548,7 +524,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
simpledrm_device_detach_genpd(sdev);
return ret;
}
- drm_warn(&sdev->dev,
+ drm_warn(&sdev->sysfb.dev,
"pm_domain_attach_by_id(%u) failed: %d\n", i, ret);
continue;
}
@@ -559,7 +535,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!sdev->pwr_dom_links[i])
- drm_warn(&sdev->dev, "failed to link power-domain %d\n", i);
+ drm_warn(&sdev->sysfb.dev, "failed to link power-domain %d\n", i);
}
return devm_add_action_or_reset(dev, simpledrm_device_detach_genpd, sdev);
@@ -575,210 +551,56 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
* Modesetting
*/
-static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const u64 simpledrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != sdev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- sdev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct drm_rect dst_clip = plane_state->dst;
- struct iosys_map dst = sdev->screen_base;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
- drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data,
- fb, &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int idx;
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear screen to black if disabled */
- iosys_map_memset(&sdev->screen_base, 0, 0, sdev->pitch * sdev->mode.vdisplay);
-
- drm_dev_exit(idx);
-}
-
-static int simpledrm_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
- struct drm_scanout_buffer *sb)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(plane->dev);
-
- sb->width = sdev->mode.hdisplay;
- sb->height = sdev->mode.vdisplay;
- sb->format = sdev->format;
- sb->pitch[0] = sdev->pitch;
- sb->map[0] = sdev->screen_base;
-
- return 0;
-}
-
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = simpledrm_primary_plane_helper_atomic_check,
- .atomic_update = simpledrm_primary_plane_helper_atomic_update,
- .atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
- .get_scanout_buffer = simpledrm_primary_plane_helper_get_scanout_buffer,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
-}
-
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
- .mode_valid = simpledrm_crtc_helper_mode_valid,
- .atomic_check = drm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
};
static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_encoder_funcs simpledrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &sdev->mode);
-}
-
static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
- .get_modes = simpledrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs simpledrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
* Init / Cleanup
*/
-static struct drm_display_mode simpledrm_mode(unsigned int width,
- unsigned int height,
- unsigned int width_mm,
- unsigned int height_mm)
-{
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height, width_mm, height_mm)
- };
-
- return mode;
-}
-
static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int width, height, stride;
int width_mm = 0, height_mm = 0;
@@ -793,10 +615,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, dev);
+ sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, sysfb.dev);
if (IS_ERR(sdev))
return ERR_CAST(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, sdev);
/*
@@ -858,20 +681,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
return ERR_PTR(-EINVAL);
}
- /*
- * Assume a monitor resolution of 96 dpi if physical dimensions
- * are not specified to get a somewhat reasonable screen size.
- */
- if (!width_mm)
- width_mm = DRM_MODE_RES_MM(width, 96ul);
- if (!height_mm)
- height_mm = DRM_MODE_RES_MM(height, 96ul);
-
- sdev->mode = simpledrm_mode(width, height, width_mm, height_mm);
- sdev->format = format;
- sdev->pitch = stride;
+ sysfb->fb_mode = drm_sysfb_mode(width, height, width_mm, height_mm);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sdev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
&format->format, width, height, stride);
@@ -895,7 +709,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (IS_ERR(screen_base))
return screen_base;
- iosys_map_set_vaddr(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr(&sysfb->fb_addr, screen_base);
} else {
void __iomem *screen_base;
@@ -928,7 +742,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (!screen_base)
return ERR_PTR(-ENOMEM);
- iosys_map_set_vaddr_iomem(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
}
/*
@@ -1027,19 +841,21 @@ static struct drm_driver simpledrm_driver = {
static int simpledrm_probe(struct platform_device *pdev)
{
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
sdev = simpledrm_device_create(&simpledrm_driver, pdev);
if (IS_ERR(sdev))
return PTR_ERR(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, sdev->format);
+ drm_client_setup(dev, sdev->sysfb.fb_format);
return 0;
}
@@ -1047,7 +863,7 @@ static int simpledrm_probe(struct platform_device *pdev)
static void simpledrm_remove(struct platform_device *pdev)
{
struct simpledrm_device *sdev = platform_get_drvdata(pdev);
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
drm_dev_unplug(dev);
}
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
new file mode 100644
index 000000000000..4d62c78e7d1e
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/ioport.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+#include <video/vga.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "vesadrm"
+#define DRIVER_DESC "DRM driver for VESA platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define VESADRM_GAMMA_LUT_SIZE 256
+
+static const struct drm_format_info *vesadrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct drm_sysfb_format formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ };
+
+ return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
+}
+
+/*
+ * VESA device
+ */
+
+struct vesadrm_device {
+ struct drm_sysfb_device sysfb;
+
+#if defined(CONFIG_X86_32)
+ /* VESA Protected Mode interface */
+ struct {
+ const u8 *PrimaryPalette;
+ } pmi;
+#endif
+
+ void (*cmap_write)(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue);
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
+{
+ return container_of(to_drm_sysfb_device(dev), struct vesadrm_device, sysfb);
+}
+
+/*
+ * Palette
+ */
+
+static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u8 i8, r8, g8, b8;
+
+ if (index > 255)
+ return;
+
+ i8 = index;
+ r8 = red >> 8;
+ g8 = green >> 8;
+ b8 = blue >> 8;
+
+ outb_p(i8, VGA_PEL_IW);
+ outb_p(r8, VGA_PEL_D);
+ outb_p(g8, VGA_PEL_D);
+ outb_p(b8, VGA_PEL_D);
+}
+
+#if defined(CONFIG_X86_32)
+static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u32 i32 = index;
+ struct {
+ u8 b8;
+ u8 g8;
+ u8 r8;
+ u8 x8;
+ } PaletteEntry = {
+ blue >> 8,
+ green >> 8,
+ red >> 8,
+ 0x00,
+ };
+
+ if (index > 255)
+ return;
+
+ __asm__ __volatile__ (
+ "call *(%%esi)"
+ : /* no return value */
+ : "a" (0x4f09),
+ "b" (0),
+ "c" (1),
+ "d" (i32),
+ "D" (&PaletteEntry),
+ "S" (&vesa->pmi.PrimaryPalette));
+}
+#endif
+
+static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ size_t i;
+ u16 r16, g16, b16;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ for (i = 0; i < 32; ++i) {
+ r16 = i * 8 + i / 4;
+ r16 |= (r16 << 8) | r16;
+ vesa->cmap_write(vesa, i, r16, r16, r16);
+ }
+ break;
+ case DRM_FORMAT_RGB565:
+ for (i = 0; i < 32; ++i) {
+ r16 = i * 8 + i / 4;
+ r16 |= (r16 << 8) | r16;
+ g16 = i * 4 + i / 16;
+ g16 |= (g16 << 8) | g16;
+ b16 = r16;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ for (i = 32; i < 64; ++i) {
+ g16 = i * 4 + i / 16;
+ g16 |= (g16 << 8) | g16;
+ vesa->cmap_write(vesa, i, 0, g16, 0);
+ }
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < 256; ++i) {
+ r16 = (i << 8) | i;
+ vesa->cmap_write(vesa, i, r16, r16, r16);
+ }
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_set_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ size_t i;
+ u16 r16, g16, b16;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ for (i = 0; i < 32; ++i) {
+ r16 = lut[i * 8 + i / 4].red;
+ g16 = lut[i * 8 + i / 4].green;
+ b16 = lut[i * 8 + i / 4].blue;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ break;
+ case DRM_FORMAT_RGB565:
+ for (i = 0; i < 32; ++i) {
+ r16 = lut[i * 8 + i / 4].red;
+ g16 = lut[i * 4 + i / 16].green;
+ b16 = lut[i * 8 + i / 4].blue;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ for (i = 32; i < 64; ++i)
+ vesa->cmap_write(vesa, i, 0, lut[i * 4 + i / 16].green, 0);
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < 256; ++i)
+ vesa->cmap_write(vesa, i, lut[i].red, lut[i].green, lut[i].blue);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+/*
+ * Modesetting
+ */
+
+static const u64 vesadrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs vesadrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct vesadrm_device *vesa = to_vesadrm_device(dev);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (crtc_state->enable && crtc_state->color_mgmt_changed) {
+ if (sysfb_crtc_state->format == sysfb->fb_format) {
+ if (crtc_state->gamma_lut)
+ vesadrm_set_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ else
+ vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ } else {
+ vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ }
+ }
+}
+
+static const struct drm_crtc_helper_funcs vesadrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+ .atomic_flush = vesadrm_crtc_helper_atomic_flush,
+};
+
+static const struct drm_crtc_funcs vesadrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs vesadrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs vesadrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs vesadrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs vesadrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize;
+ struct resource resbuf;
+ struct resource *res;
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_VLFB)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * VESA DRM driver
+ */
+
+ vesa = devm_drm_dev_alloc(&pdev->dev, drv, struct vesadrm_device, sysfb.dev);
+ if (IS_ERR(vesa))
+ return ERR_CAST(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = vesadrm_get_format_si(dev, si);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ width = drm_sysfb_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = drm_sysfb_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = drm_sysfb_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+ if (!__screen_info_vbe_mode_nonvga(si)) {
+ vesa->cmap_write = vesadrm_vga_cmap_write;
+#if defined(CONFIG_X86_32)
+ } else {
+ phys_addr_t pmi_base = __screen_info_vesapm_info_base(si);
+ const u16 *pmi_addr = phys_to_virt(pmi_base);
+
+ vesa->pmi.PrimaryPalette = (u8 *)pmi_addr + pmi_addr[2];
+ vesa->cmap_write = vesadrm_pmi_cmap_write;
+#endif
+ }
+
+#ifdef CONFIG_X86
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+ if (vesa->cmap_write)
+ sysfb->fb_gamma_lut_size = VESADRM_GAMMA_LUT_SIZE;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &vesadrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ vesa->formats, ARRAY_SIZE(vesa->formats));
+
+ primary_plane = &vesa->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &vesadrm_primary_plane_funcs,
+ vesa->formats, nformats,
+ vesadrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &vesadrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &vesa->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &vesadrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &vesadrm_crtc_helper_funcs);
+
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
+ }
+
+ /* Encoder */
+
+ encoder = &vesa->encoder;
+ ret = drm_encoder_init(dev, encoder, &vesadrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &vesa->connector;
+ ret = drm_connector_init(dev, connector, &vesadrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &vesadrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return vesa;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(vesadrm_fops);
+
+static struct drm_driver vesadrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &vesadrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int vesadrm_probe(struct platform_device *pdev)
+{
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ vesa = vesadrm_device_create(&vesadrm_driver, pdev);
+ if (IS_ERR(vesa))
+ return PTR_ERR(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void vesadrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver vesadrm_platform_driver = {
+ .driver = {
+ .name = "vesa-framebuffer",
+ },
+ .probe = vesadrm_probe,
+ .remove = vesadrm_remove,
+};
+
+module_platform_driver(vesadrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 798507a8ae56..59d5c1ba145a 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1321,10 +1321,16 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
if (wgrp->dc == dc->pipe) {
for (j = 0; j < wgrp->num_windows; j++) {
unsigned int index = wgrp->windows[j];
+ enum drm_plane_type type;
+
+ if (primary)
+ type = DRM_PLANE_TYPE_OVERLAY;
+ else
+ type = DRM_PLANE_TYPE_PRIMARY;
plane = tegra_shared_plane_create(drm, dc,
wgrp->index,
- index);
+ index, type);
if (IS_ERR(plane))
return plane;
@@ -1332,10 +1338,8 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
* Choose the first shared plane owned by this
* head as the primary plane.
*/
- if (!primary) {
- plane->type = DRM_PLANE_TYPE_PRIMARY;
+ if (!primary)
primary = plane;
- }
}
}
}
@@ -1389,7 +1393,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
tegra_crtc_atomic_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
index 08fbd8f151a1..990e744b0923 100644
--- a/drivers/gpu/drm/tegra/dp.c
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -256,73 +256,6 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
}
/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
index cb12ed0c54e7..695060cafac0 100644
--- a/drivers/gpu/drm/tegra/dp.h
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -164,8 +164,6 @@ int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
void drm_dp_link_update_rates(struct drm_dp_link *link);
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_choose(struct drm_dp_link *link,
const struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 2cd8dcb959c0..e5297ac5c0fc 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -501,14 +501,9 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd)) {
- if (PTR_ERR(dpaux->vdd) != -ENODEV) {
- if (PTR_ERR(dpaux->vdd) != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get VDD supply: %ld\n",
- PTR_ERR(dpaux->vdd));
-
- return PTR_ERR(dpaux->vdd);
- }
+ if (PTR_ERR(dpaux->vdd) != -ENODEV)
+ return dev_err_probe(&pdev->dev, PTR_ERR(dpaux->vdd),
+ "failed to get VDD supply\n");
dpaux->vdd = NULL;
}
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 9bb077558167..b5089b772267 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1564,7 +1564,6 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
static int tegra_dsi_probe(struct platform_device *pdev)
{
struct tegra_dsi *dsi;
- struct resource *regs;
int err;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
@@ -1636,8 +1635,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto remove;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs)) {
err = PTR_ERR(dsi->regs);
goto remove;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index c0d85463eb1a..17f616bbcb45 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -30,6 +30,14 @@ int falcon_wait_idle(struct falcon *falcon)
(value == 0), 10, 100000);
}
+static int falcon_dma_wait_not_full(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
+ !(value & FALCON_DMATRFCMD_FULL), 10, 100000);
+}
+
static int falcon_dma_wait_idle(struct falcon *falcon)
{
u32 value;
@@ -44,6 +52,7 @@ static int falcon_copy_chunk(struct falcon *falcon,
enum falcon_memory target)
{
u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
+ int err;
if (target == FALCON_MEMORY_IMEM)
cmd |= FALCON_DMATRFCMD_IMEM;
@@ -56,11 +65,15 @@ static int falcon_copy_chunk(struct falcon *falcon,
*/
cmd |= FALCON_DMATRFCMD_DMACTX(1);
+ err = falcon_dma_wait_not_full(falcon);
+ if (err < 0)
+ return err;
+
falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
- return falcon_dma_wait_idle(falcon);
+ return 0;
}
static void falcon_copy_firmware_image(struct falcon *falcon,
@@ -191,6 +204,11 @@ int falcon_boot(struct falcon *falcon)
falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
offset, FALCON_MEMORY_IMEM);
+ /* wait for DMA to complete */
+ err = falcon_dma_wait_idle(falcon);
+ if (err < 0)
+ return err;
+
/* setup falcon interrupts */
falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
FALCON_IRQMSET_SWGEN1 |
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index 1955cf11a8a6..902bb7e4fd0f 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -47,6 +47,7 @@
#define FALCON_DMATRFMOFFS 0x00001114
#define FALCON_DMATRFCMD 0x00001118
+#define FALCON_DMATRFCMD_FULL (1 << 0)
#define FALCON_DMATRFCMD_IDLE (1 << 1)
#define FALCON_DMATRFCMD_IMEM (1 << 4)
#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ace3e5a805cf..dbc1394f96b8 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -17,7 +17,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
-#include <drm/tegra_drm.h>
#include "drm.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index fa6140fc37fb..8f779f23dc09 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -755,9 +755,9 @@ static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
- unsigned int index)
+ unsigned int index,
+ enum drm_plane_type type)
{
- enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
struct tegra_shared_plane *plane;
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 23c4b2115ed1..a66f18c4facc 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -80,7 +80,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub);
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
- unsigned int index);
+ unsigned int index,
+ enum drm_plane_type type);
int tegra_display_hub_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 1e8ec50b759e..ff5a749710db 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -200,6 +200,11 @@ static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
.atomic_check = tegra_rgb_encoder_atomic_check,
};
+static void tegra_dc_of_node_put(void *data)
+{
+ of_node_put(data);
+}
+
int tegra_dc_rgb_probe(struct tegra_dc *dc)
{
struct device_node *np;
@@ -207,7 +212,14 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
int err;
np = of_get_child_by_name(dc->dev->of_node, "rgb");
- if (!np || !of_device_is_available(np))
+ if (!np)
+ return -ENODEV;
+
+ err = devm_add_action_or_reset(dc->dev, tegra_dc_of_node_put, np);
+ if (err < 0)
+ return err;
+
+ if (!of_device_is_available(np))
return -ENODEV;
rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index f98f70eda906..21f3dfdcc5c9 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2666,7 +2666,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder)
* the AUX transactions would just be timing out.
*/
if (output->connector.status != connector_status_disconnected) {
- err = drm_dp_link_power_down(sor->aux, &sor->link);
+ err = drm_dp_link_power_down(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power down link: %d\n",
err);
@@ -2882,7 +2882,7 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder)
else
dev_dbg(sor->dev, "link training succeeded\n");
- err = drm_dp_link_power_up(sor->aux, &sor->link);
+ err = drm_dp_link_power_up(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power up DP link: %d\n", err);
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 0109bcf7faa5..3afd6587df08 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -4,7 +4,9 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \
drm_kunit_helpers.o
obj-$(CONFIG_DRM_KUNIT_TEST) += \
+ drm_atomic_test.o \
drm_atomic_state_test.o \
+ drm_bridge_test.o \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
drm_connector_test.o \
diff --git a/drivers/gpu/drm/tests/drm_atomic_test.c b/drivers/gpu/drm/tests/drm_atomic_test.c
new file mode 100644
index 000000000000..ea91bec6569e
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_atomic_test.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_atomic functions
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include <kunit/test.h>
+
+struct drm_atomic_test_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static const struct drm_connector_helper_funcs drm_atomic_init_connector_helper_funcs = {
+};
+
+static const struct drm_connector_funcs drm_atomic_init_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static struct drm_atomic_test_priv *create_device(struct kunit *test)
+{
+ struct drm_atomic_test_priv *priv;
+ struct drm_connector *connector;
+ struct drm_encoder *enc;
+ struct drm_device *drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_atomic_test_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ drm = &priv->drm;
+ plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(plane))
+ return ERR_CAST(plane);
+ priv->plane = plane;
+
+ crtc = drm_kunit_helper_create_crtc(test, drm,
+ plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(crtc))
+ return ERR_CAST(crtc);
+ priv->crtc = crtc;
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(crtc);
+
+ connector = &priv->connector;
+ ret = drmm_connector_init(drm, connector,
+ &drm_atomic_init_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(connector, &drm_atomic_init_connector_helper_funcs);
+
+ drm_connector_attach_encoder(connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+static void drm_test_drm_atomic_get_connector_for_encoder(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_test_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_connector *curr_connector;
+ int ret;
+
+ priv = create_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, &priv->drm,
+ priv->crtc, &priv->connector,
+ mode, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_enable;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn:
+ curr_connector = drm_atomic_get_connector_for_encoder(&priv->encoder,
+ &ctx);
+ if (PTR_ERR(curr_connector) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_conn;
+ }
+ KUNIT_EXPECT_PTR_EQ(test, curr_connector, &priv->connector);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static struct kunit_case drm_atomic_get_connector_for_encoder_tests[] = {
+ KUNIT_CASE(drm_test_drm_atomic_get_connector_for_encoder),
+ { }
+};
+
+
+static struct kunit_suite drm_atomic_get_connector_for_encoder_test_suite = {
+ .name = "drm_test_atomic_get_connector_for_encoder",
+ .test_cases = drm_atomic_get_connector_for_encoder_tests,
+};
+
+kunit_test_suite(drm_atomic_get_connector_for_encoder_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_DESCRIPTION("Kunit test for drm_atomic functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c
new file mode 100644
index 000000000000..ff88ec2e911c
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_bridge_test.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_bridge functions
+ */
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/test.h>
+
+struct drm_bridge_init_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_bridge bridge;
+ struct drm_connector *connector;
+ unsigned int enable_count;
+ unsigned int disable_count;
+};
+
+static void drm_test_bridge_enable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_disable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
+ .enable = drm_test_bridge_enable,
+ .disable = drm_test_bridge_disable,
+};
+
+static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = {
+ .atomic_enable = drm_test_bridge_atomic_enable,
+ .atomic_disable = drm_test_bridge_atomic_disable,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+KUNIT_DEFINE_ACTION_WRAPPER(drm_bridge_remove_wrapper,
+ drm_bridge_remove,
+ struct drm_bridge *);
+
+static int drm_kunit_bridge_add(struct kunit *test,
+ struct drm_bridge *bridge)
+{
+ drm_bridge_add(bridge);
+
+ return kunit_add_action_or_reset(test,
+ drm_bridge_remove_wrapper,
+ bridge);
+}
+
+static struct drm_bridge_init_priv *
+drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_encoder *enc;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_bridge_init_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ drm = &priv->drm;
+ priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(priv->plane))
+ return ERR_CAST(priv->plane);
+
+ priv->crtc = drm_kunit_helper_create_crtc(test, drm,
+ priv->plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(priv->crtc))
+ return ERR_CAST(priv->crtc);
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(priv->crtc);
+
+ bridge = &priv->bridge;
+ bridge->type = DRM_MODE_CONNECTOR_VIRTUAL;
+ bridge->funcs = funcs;
+
+ ret = drm_kunit_bridge_add(test, bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_bridge_attach(enc, bridge, NULL, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv->connector = drm_bridge_connector_init(drm, enc);
+ if (IS_ERR(priv->connector))
+ return ERR_CAST(priv->connector);
+
+ drm_connector_attach_encoder(priv->connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns the last committed
+ * state for an atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge_state *curr_bridge_state;
+ struct drm_bridge_state *bridge_state;
+ struct drm_atomic_state *state;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &priv->drm;
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_commit:
+ bridge = &priv->bridge;
+ bridge_state = drm_atomic_get_bridge_state(state, bridge);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state);
+
+ ret = drm_atomic_commit(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_state:
+ ret = drm_modeset_lock(&bridge->base.lock, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_state;
+ }
+
+ curr_bridge_state = drm_bridge_get_current_state(bridge);
+ KUNIT_EXPECT_PTR_EQ(test, curr_bridge_state, bridge_state);
+
+ drm_modeset_unlock(&bridge->base.lock);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns NULL for a
+ * non-atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge *bridge;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ /*
+ * NOTE: Strictly speaking, we should take the bridge->base.lock
+ * before calling that function. However, bridge->base is only
+ * initialized if the bridge is atomic, while we explicitly
+ * initialize one that isn't there.
+ *
+ * In order to avoid unnecessary warnings, let's skip the
+ * locking. The function would return NULL in all cases anyway,
+ * so we don't really have any concurrency to worry about.
+ */
+ bridge = &priv->bridge;
+ KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge));
+}
+
+static struct kunit_case drm_bridge_get_current_state_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_legacy),
+ { }
+};
+
+
+static struct kunit_suite drm_bridge_get_current_state_test_suite = {
+ .name = "drm_test_bridge_get_current_state",
+ .test_cases = drm_bridge_get_current_state_tests,
+};
+
+/*
+ * Test that an atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+}
+
+/*
+ * Test that calling drm_bridge_helper_reset_crtc() on a disabled atomic
+ * bridge will fail and not call the enable / disable callbacks
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 0);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 0);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 0);
+}
+
+/*
+ * Test that a non-atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+}
+
+static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic_disabled),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_legacy),
+ { }
+};
+
+static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = {
+ .name = "drm_test_bridge_helper_reset_crtc",
+ .test_cases = drm_bridge_helper_reset_crtc_tests,
+};
+
+kunit_test_suites(
+ &drm_bridge_get_current_state_test_suite,
+ &drm_bridge_helper_reset_crtc_test_suite,
+);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_DESCRIPTION("Kunit test for drm_bridge functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index b2fdb1a774fe..3f44fe5e92e4 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -88,7 +88,8 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- struct drm_display_mode *expected_mode, *mode;
+ struct drm_display_mode *expected_mode;
+ const struct drm_display_mode *mode;
const char *cmdline = "1920x1080@60";
int ret;
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index 925fbc2cda70..68f2c3162354 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -134,7 +134,7 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -142,14 +142,14 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
ret = drm_gem_shmem_pin(shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
drm_gem_shmem_unpin(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
}
/*
@@ -168,24 +168,24 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_vmap(shmem, &map);
+ ret = drm_gem_shmem_vmap_locked(shmem, &map);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
for (i = 0; i < TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
- drm_gem_shmem_vunmap(shmem, &map);
+ drm_gem_shmem_vunmap_locked(shmem, &map);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
}
/*
@@ -254,7 +254,7 @@ static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
sgt = drm_gem_shmem_get_pages_sgt(shmem);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
for_each_sgtable_sg(sgt, sg, si) {
@@ -284,17 +284,17 @@ static void drm_gem_shmem_test_madvise(struct kunit *test)
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, 1);
/* Set madv to a negative value */
- ret = drm_gem_shmem_madvise(shmem, -1);
+ ret = drm_gem_shmem_madvise_locked(shmem, -1);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
/* Check that madv cannot be set back to a positive value */
- ret = drm_gem_shmem_madvise(shmem, 0);
+ ret = drm_gem_shmem_madvise_locked(shmem, 0);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
}
@@ -322,7 +322,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_FALSE(test, ret);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
/* The scatter/gather table will be freed by drm_gem_shmem_free */
@@ -332,7 +332,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_TRUE(test, ret);
- drm_gem_shmem_purge(shmem);
+ drm_gem_shmem_purge_locked(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
KUNIT_EXPECT_NULL(test, shmem->sgt);
KUNIT_EXPECT_EQ(test, shmem->madv, -1);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index e97efd3af9ed..7ffd666753b1 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -55,49 +55,6 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
return preferred;
}
-static int light_up_connector(struct kunit *test,
- struct drm_device *drm,
- struct drm_crtc *crtc,
- struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_connector_state *conn_state;
- struct drm_crtc_state *crtc_state;
- int ret;
-
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
-
-retry:
- conn_state = drm_atomic_get_connector_state(state, connector);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
-
- ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- ret = drm_modeset_backoff(ctx);
- if (!ret)
- goto retry;
- }
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
-
- ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state->enable = true;
- crtc_state->active = true;
-
- ret = drm_atomic_commit(state);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- return 0;
-}
-
static int set_connector_edid(struct kunit *test, struct drm_connector *connector,
const char *edid, size_t edid_len)
{
@@ -298,7 +255,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -364,7 +324,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -432,7 +395,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -489,7 +455,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -547,7 +516,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -606,7 +578,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -666,7 +641,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -725,7 +703,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -789,7 +770,10 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -865,7 +849,10 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -941,7 +928,10 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -988,7 +978,10 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1037,7 +1030,10 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1086,7 +1082,10 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1134,7 +1133,10 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
/* You shouldn't be doing that at home. */
@@ -1208,7 +1210,10 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1282,7 +1287,10 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1347,7 +1355,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1414,7 +1425,10 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1483,7 +1497,10 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1543,7 +1560,10 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1605,7 +1625,10 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1645,7 +1668,10 @@ static void drm_test_check_disable_connector(struct kunit *test)
drm = &priv->drm;
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 6f6616cf4966..5f7257840d8e 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -2,6 +2,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
@@ -271,6 +272,66 @@ drm_kunit_helper_create_crtc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
+/**
+ * drm_kunit_helper_enable_crtc_connector - Enables a CRTC -> Connector output
+ * @test: The test context object
+ * @drm: The device to alloc the plane for
+ * @crtc: The CRTC to enable
+ * @connector: The Connector to enable
+ * @mode: The display mode to configure the CRTC with
+ * @ctx: Locking context
+ *
+ * This function creates an atomic update to enable the route from @crtc
+ * to @connector, with the given @mode.
+ *
+ * Returns:
+ *
+ * A pointer to the new CRTC, or an ERR_PTR() otherwise. If the error
+ * returned is EDEADLK, the entire atomic sequence must be restarted.
+ */
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ if (ret)
+ return ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
+ if (ret)
+ return ret;
+
+ crtc_state->enable = true;
+ crtc_state->active = true;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_enable_crtc_connector);
+
static void kunit_action_drm_mode_destroy(void *ptr)
{
struct drm_display_mode *mode = ptr;
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index 17a86bed8054..95b4aeff2775 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -34,11 +34,12 @@ static inline struct tidss_encoder
}
static int tidss_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge);
- return drm_bridge_attach(bridge->encoder, t_enc->next_bridge,
+ return drm_bridge_attach(encoder, t_enc->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 54c84c9801c1..6d1b3e2cb3fb 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -3,6 +3,7 @@
config DRM_APPLETBDRM
tristate "DRM support for Apple Touch Bars"
depends on DRM && USB && MMU
+ depends on X86 || COMPILE_TEST
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
@@ -65,20 +66,6 @@ config DRM_GM12U320
This is a KMS driver for projectors which use the GM12U320 chipset
for video transfer over USB2/3, such as the Acer C120 mini projector.
-config DRM_OFDRM
- tristate "Open Firmware display driver"
- depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for Open Firmware framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the Open Firmware before the kernel boots. Scanout buffer, size,
- and display format must be provided via device tree.
-
config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
@@ -95,24 +82,6 @@ config DRM_PANEL_MIPI_DBI
https://github.com/notro/panel-mipi-dbi/wiki.
To compile this driver as a module, choose M here.
-config DRM_SIMPLEDRM
- tristate "Simple framebuffer driver"
- depends on DRM && MMU
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for simple platform-provided framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the firmware or bootloader before the kernel boots. Scanout
- buffer, size, and display format must be provided via device tree,
- UEFI, VESA, etc.
-
- On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
- to use UEFI and VESA framebuffers.
-
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
@@ -230,32 +199,3 @@ config TINYDRM_SHARP_MEMORY
* 4.40" Sharp Memory LCD (LS044Q7DH01)
If M is selected the module will be called sharp_memory.
-
-config TINYDRM_ST7586
- tristate "DRM support for Sitronix ST7586 display panels"
- depends on DRM && SPI
- select DRM_CLIENT_SELECTION
- select DRM_KMS_HELPER
- select DRM_GEM_DMA_HELPER
- select DRM_MIPI_DBI
- help
- DRM driver for the following Sitronix ST7586 panels:
- * LEGO MINDSTORMS EV3
-
- If M is selected the module will be called st7586.
-
-config TINYDRM_ST7735R
- tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
- depends on DRM && SPI
- select DRM_CLIENT_SELECTION
- select DRM_KMS_HELPER
- select DRM_GEM_DMA_HELPER
- select DRM_MIPI_DBI
- select BACKLIGHT_CLASS_DEVICE
- help
- DRM driver for Sitronix ST7715R/ST7735R with one of the following
- LCDs:
- * Jianda JD-T18003-T01 1.8" 128x160 TFT
- * Okaya RH128128T 1.44" 128x128 TFT
-
- If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 0a3a7837a58b..4a9ff61ec254 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -5,9 +5,7 @@ obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
-obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
-obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
@@ -16,5 +14,3 @@ obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_SHARP_MEMORY) += sharp-memory.o
-obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
-obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c
index 4370ba22dd88..751b05753c94 100644
--- a/drivers/gpu/drm/tiny/appletbdrm.c
+++ b/drivers/gpu/drm/tiny/appletbdrm.c
@@ -45,7 +45,7 @@
#define APPLETBDRM_BULK_MSG_TIMEOUT 1000
#define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm)
-#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev))
+#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface((adev)->drm.dev))
struct appletbdrm_msg_request_header {
__le16 unk_00;
@@ -123,8 +123,6 @@ struct appletbdrm_fb_request_response {
} __packed;
struct appletbdrm_device {
- struct device *dmadev;
-
unsigned int in_ep;
unsigned int out_ep;
@@ -214,7 +212,7 @@ retry:
}
if (response->msg != expected_response) {
- drm_err(drm, "Unexpected response from device (expected %p4cc found %p4cc)\n",
+ drm_err(drm, "Unexpected response from device (expected %p4cl found %p4cl)\n",
&expected_response, &response->msg);
return -EIO;
}
@@ -288,7 +286,7 @@ static int appletbdrm_get_information(struct appletbdrm_device *adev)
}
if (pixel_format != APPLETBDRM_PIXEL_FORMAT) {
- drm_err(drm, "Encountered unknown pixel format (%p4cc)\n", &pixel_format);
+ drm_err(drm, "Encountered unknown pixel format (%p4cl)\n", &pixel_format);
ret = -EINVAL;
goto free_info;
}
@@ -612,22 +610,10 @@ static const struct drm_encoder_funcs appletbdrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct appletbdrm_device *adev = drm_to_adev(dev);
-
- if (!adev->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops);
static const struct drm_driver appletbdrm_drm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = appletbdrm_driver_gem_prime_import,
.name = "appletbdrm",
.desc = "Apple Touch Bar DRM Driver",
.major = 1,
@@ -747,6 +733,7 @@ static int appletbdrm_probe(struct usb_interface *intf,
struct device *dev = &intf->dev;
struct appletbdrm_device *adev;
struct drm_device *drm = NULL;
+ struct device *dma_dev;
int ret;
ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL);
@@ -761,12 +748,19 @@ static int appletbdrm_probe(struct usb_interface *intf,
adev->in_ep = bulk_in->bEndpointAddress;
adev->out_ep = bulk_out->bEndpointAddress;
- adev->dmadev = dev;
drm = &adev->drm;
usb_set_intfdata(intf, adev);
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ drm_warn(drm, "buffer sharing not supported"); /* not an error */
+ }
+
ret = appletbdrm_get_information(adev);
if (ret) {
drm_err(drm, "Failed to get display information\n");
diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 52ec1e4ea9e5..97a93adc5669 100644
--- a/drivers/gpu/drm/tiny/cirrus-qemu.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -70,20 +70,6 @@ struct cirrus_device {
#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
-struct cirrus_primary_plane_state {
- struct drm_shadow_plane_state base;
-
- /* HW scanout buffer */
- const struct drm_format_info *format;
- unsigned int pitch;
-};
-
-static inline struct cirrus_primary_plane_state *
-to_cirrus_primary_plane_state(struct drm_plane_state *plane_state)
-{
- return container_of(plane_state, struct cirrus_primary_plane_state, base.base);
-};
-
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
@@ -144,37 +130,6 @@ static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
}
-static const struct drm_format_info *cirrus_convert_to(struct drm_framebuffer *fb)
-{
- if (fb->format->format == DRM_FORMAT_XRGB8888 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
- if (fb->width * 3 <= CIRRUS_MAX_PITCH)
- /* convert from XR24 to RG24 */
- return drm_format_info(DRM_FORMAT_RGB888);
- else
- /* convert from XR24 to RG16 */
- return drm_format_info(DRM_FORMAT_RGB565);
- }
- return NULL;
-}
-
-static const struct drm_format_info *cirrus_format(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return format;
- return fb->format;
-}
-
-static int cirrus_pitch(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return drm_format_info_min_pitch(format, 0, fb->width);
- return fb->pitches[0];
-}
-
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
u32 addr;
@@ -318,7 +273,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch)
/* Enable extended blanking and pitch bits, and enable full memory */
cr1b = 0x22;
cr1b |= (pitch >> 7) & 0x10;
- cr1b |= (pitch >> 6) & 0x40;
wreg_crt(cirrus, 0x1b, cr1b);
cirrus_set_start_address(cirrus, 0);
@@ -342,13 +296,10 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *new_primary_plane_state =
- to_cirrus_primary_plane_state(new_plane_state);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
- unsigned int pitch;
if (new_crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
@@ -362,17 +313,12 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
- pitch = cirrus_pitch(fb);
-
/* validate size constraints */
- if (pitch > CIRRUS_MAX_PITCH)
+ if (fb->pitches[0] > CIRRUS_MAX_PITCH)
return -EINVAL;
- else if (pitch * fb->height > CIRRUS_VRAM_SIZE)
+ else if (fb->pitches[0] > CIRRUS_VRAM_SIZE / fb->height)
return -EINVAL;
- new_primary_plane_state->format = cirrus_format(fb);
- new_primary_plane_state->pitch = pitch;
-
return 0;
}
@@ -381,15 +327,10 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
{
struct cirrus_device *cirrus = to_cirrus(plane->dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
- const struct drm_format_info *format = primary_plane_state->format;
- unsigned int pitch = primary_plane_state->pitch;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct cirrus_primary_plane_state *old_primary_plane_state =
- to_cirrus_primary_plane_state(old_plane_state);
+ struct drm_framebuffer *old_fb = old_plane_state->fb;
struct iosys_map vaddr = IOSYS_MAP_INIT_VADDR_IOMEM(cirrus->vram);
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
@@ -401,18 +342,17 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(&cirrus->dev, &idx))
return;
- if (old_primary_plane_state->format != format)
- cirrus_format_set(cirrus, format);
- if (old_primary_plane_state->pitch != pitch)
- cirrus_pitch_set(cirrus, pitch);
+ if (!old_fb || old_fb->format != fb->format)
+ cirrus_format_set(cirrus, fb->format);
+ if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
+ cirrus_pitch_set(cirrus, fb->pitches[0]);
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
- unsigned int offset = drm_fb_clip_offset(pitch, format, &damage);
+ unsigned int offset = drm_fb_clip_offset(fb->pitches[0], fb->format, &damage);
struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
- drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
+ drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage);
}
drm_dev_exit(idx);
@@ -424,62 +364,11 @@ static const struct drm_plane_helper_funcs cirrus_primary_plane_helper_funcs = {
.atomic_update = cirrus_primary_plane_helper_atomic_update,
};
-static struct drm_plane_state *
-cirrus_primary_plane_atomic_duplicate_state(struct drm_plane *plane)
-{
- struct drm_plane_state *plane_state = plane->state;
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
- struct cirrus_primary_plane_state *new_primary_plane_state;
- struct drm_shadow_plane_state *new_shadow_plane_state;
-
- if (!plane_state)
- return NULL;
-
- new_primary_plane_state = kzalloc(sizeof(*new_primary_plane_state), GFP_KERNEL);
- if (!new_primary_plane_state)
- return NULL;
- new_shadow_plane_state = &new_primary_plane_state->base;
-
- __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
- new_primary_plane_state->format = primary_plane_state->format;
- new_primary_plane_state->pitch = primary_plane_state->pitch;
-
- return &new_shadow_plane_state->base;
-}
-
-static void cirrus_primary_plane_atomic_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
-{
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
-
- __drm_gem_destroy_shadow_plane_state(&primary_plane_state->base);
- kfree(primary_plane_state);
-}
-
-static void cirrus_reset_primary_plane(struct drm_plane *plane)
-{
- struct cirrus_primary_plane_state *primary_plane_state;
-
- if (plane->state) {
- cirrus_primary_plane_atomic_destroy_state(plane, plane->state);
- plane->state = NULL; /* must be set to NULL here */
- }
-
- primary_plane_state = kzalloc(sizeof(*primary_plane_state), GFP_KERNEL);
- if (!primary_plane_state)
- return;
- __drm_gem_reset_shadow_plane(plane, &primary_plane_state->base);
-}
-
static const struct drm_plane_funcs cirrus_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = cirrus_reset_primary_plane,
- .atomic_duplicate_state = cirrus_primary_plane_atomic_duplicate_state,
- .atomic_destroy_state = cirrus_primary_plane_atomic_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int cirrus_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
@@ -614,9 +503,17 @@ static enum drm_mode_status cirrus_mode_config_mode_valid(struct drm_device *dev
const struct drm_display_mode *mode)
{
const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888);
- uint64_t pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ u64 pitch;
- if (pitch * mode->vdisplay > CIRRUS_VRAM_SIZE)
+ if (drm_WARN_ON_ONCE(dev, !format))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > CIRRUS_MAX_PITCH)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > CIRRUS_VRAM_SIZE / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -681,7 +578,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, DRIVER_NAME);
+ ret = pcim_request_all_regions(pdev, DRIVER_NAME);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 41e9bfb2e2ff..fb0004166f4a 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -86,7 +86,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
struct gm12u320_device {
struct drm_device dev;
- struct device *dmadev;
struct drm_simple_display_pipe pipe;
struct drm_connector conn;
unsigned char *cmd_buf;
@@ -602,22 +601,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
-
- if (!gm12u320->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static const struct drm_driver gm12u320_drm_driver = {
@@ -630,7 +613,6 @@ static const struct drm_driver gm12u320_drm_driver = {
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gm12u320_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
};
@@ -645,6 +627,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
{
struct gm12u320_device *gm12u320;
struct drm_device *dev;
+ struct device *dma_dev;
int ret;
/*
@@ -660,16 +643,20 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
return PTR_ERR(gm12u320);
dev = &gm12u320->dev;
- gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!gm12u320->dmadev)
+ dma_dev = usb_intf_get_dma_device(interface);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
ret = drmm_mode_config_init(dev);
if (ret)
- goto err_put_device;
+ return ret;
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
@@ -679,15 +666,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_conn_init(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -697,31 +684,24 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
- goto err_put_device;
+ return ret;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_put_device;
+ return ret;
drm_client_setup(dev, NULL);
return 0;
-
-err_put_device:
- put_device(gm12u320->dmadev);
- return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
- put_device(gm12u320->dmadev);
- gm12u320->dmadev = NULL;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
}
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index f8f20d2f6174..560d12e50e9e 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -201,7 +201,7 @@ static int threaded_ttm_bo_reserve(void *arg)
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
timer_delete_sync(&s_timer.timer);
- destroy_timer_on_stack(&s_timer.timer);
+ timer_destroy_on_stack(&s_timer.timer);
ww_acquire_fini(&ctx);
@@ -340,7 +340,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+ KUNIT_ASSERT_NOT_NULL(test, resv);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index 9e2d72c447ee..ffaab68bd5dd 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -120,13 +120,13 @@ ttm_backup_backup_page(struct file *backup, struct page *page,
.for_reclaim = 1,
};
folio_set_reclaim(to_folio);
- ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
+ ret = shmem_writeout(to_folio, &wbc);
if (!folio_test_writeback(to_folio))
folio_clear_reclaim(to_folio);
/*
- * If writepage succeeds, it unlocks the folio.
- * writepage() errors are otherwise dropped, since writepage()
- * is only best effort here.
+ * If writeout succeeds, it unlocks the folio. errors
+ * are otherwise dropped, since writeout is only best
+ * effort here.
*/
if (ret)
folio_unlock(to_folio);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5bf3c969907c..08a23ab037cb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -235,7 +235,7 @@ static void ttm_bo_delayed_delete(struct work_struct *work)
bo = container_of(work, typeof(*bo), delayed_delete);
- dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_cleanup_memtype_use(bo);
@@ -270,7 +270,7 @@ static void ttm_bo_release(struct kref *kref)
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
- if (!dma_resv_test_signaled(bo->base.resv,
+ if (!dma_resv_test_signaled(&bo->base._resv,
DMA_RESV_USAGE_BOOKKEEP) ||
(want_init_on_free() && (bo->ttm != NULL)) ||
bo->type == ttm_bo_type_sg ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a194db83421d..bdfa6ecfef05 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -220,7 +220,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
- .force_alloc = true
};
ttm = bo->ttm;
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 7e5a60c55813..769b0ca9be47 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -548,7 +548,6 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = true
};
struct dma_fence *fence;
int ret;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 05b3a152cc33..1922988625eb 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -22,13 +22,14 @@ static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
int ret;
ret = drm_mode_config_helper_suspend(dev);
if (ret)
return ret;
- udl_sync_pending_urbs(dev);
+ udl_sync_pending_urbs(udl);
return 0;
}
@@ -49,22 +50,6 @@ static int udl_usb_reset_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct udl_device *udl = to_udl(dev);
-
- if (!udl->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static const struct drm_driver driver = {
@@ -73,7 +58,6 @@ static const struct drm_driver driver = {
/* GEM hooks */
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = udl_driver_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
@@ -126,10 +110,10 @@ static int udl_usb_probe(struct usb_interface *interface,
static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
- drm_kms_helper_poll_fini(dev);
- udl_drop_usb(dev);
drm_dev_unplug(dev);
+ udl_drop_usb(udl);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index be00dc1d87a1..145bb95ccc48 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -50,18 +50,14 @@ struct urb_list {
struct udl_device {
struct drm_device drm;
- struct device *dev;
- struct device *dmadev;
+
+ unsigned long sku_pixel_limit;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
- struct mutex gem_lock;
-
- int sku_pixel_limit;
-
struct urb_list urbs;
};
@@ -73,22 +69,22 @@ static inline struct usb_device *udl_to_usb_device(struct udl_device *udl)
}
/* modeset */
-int udl_modeset_init(struct drm_device *dev);
+int udl_modeset_init(struct udl_device *udl);
struct drm_connector *udl_connector_init(struct drm_device *dev);
-struct urb *udl_get_urb(struct drm_device *dev);
+struct urb *udl_get_urb(struct udl_device *udl);
-int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
-void udl_sync_pending_urbs(struct drm_device *dev);
+int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len);
+void udl_sync_pending_urbs(struct udl_device *udl);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
-int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
+int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width);
-int udl_drop_usb(struct drm_device *dev);
+int udl_drop_usb(struct udl_device *udl);
int udl_select_std_channel(struct udl_device *udl);
#endif
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 3ebe2ce55dfd..bc58991a6f14 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -8,6 +8,8 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
+#include <linux/unaligned.h>
+
#include <drm/drm.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -23,72 +25,99 @@
#define WRITES_IN_FLIGHT (20)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
+#define UDL_SKU_PIXEL_LIMIT_DEFAULT 2080000
+
static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
+/*
+ * Try to make sense of whatever we parse. Therefore return @end on
+ * errors, but don't fail hard.
+ */
+static const u8 *udl_parse_key_value_pair(struct udl_device *udl, const u8 *pos, const u8 *end)
+{
+ u16 key;
+ u8 len;
+
+ /* read key */
+ if (pos >= end - 2)
+ return end;
+ key = get_unaligned_le16(pos);
+ pos += 2;
+
+ /* read value length */
+ if (pos >= end - 1)
+ return end;
+ len = *pos++;
+
+ /* read value */
+ if (pos >= end - len)
+ return end;
+ switch (key) {
+ case 0x0200: { /* maximum number of pixels */
+ unsigned int sku_pixel_limit;
+
+ if (len < sizeof(__le32))
+ break;
+ sku_pixel_limit = get_unaligned_le32(pos);
+ if (sku_pixel_limit >= 16 * UDL_SKU_PIXEL_LIMIT_DEFAULT)
+ break; /* almost 100 MiB, so probably bogus */
+ udl->sku_pixel_limit = sku_pixel_limit;
+ break;
+ }
+ default:
+ break;
+ }
+ pos += len;
+
+ return pos;
+}
+
static int udl_parse_vendor_descriptor(struct udl_device *udl)
{
+ struct drm_device *dev = &udl->drm;
struct usb_device *udev = udl_to_usb_device(udl);
- char *desc;
- char *buf;
- char *desc_end;
-
- u8 total_len = 0;
+ bool detected = false;
+ void *buf;
+ int ret;
+ unsigned int len;
+ const u8 *desc;
+ const u8 *desc_end;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
- return false;
+ return -ENOMEM;
+
+ ret = usb_get_descriptor(udev, 0x5f, /* vendor specific */
+ 0, buf, MAX_VENDOR_DESCRIPTOR_SIZE);
+ if (ret < 0)
+ goto out;
+ len = ret;
+
+ if (len < 5)
+ goto out;
+
desc = buf;
+ desc_end = desc + len;
- total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */
- 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
- if (total_len > 5) {
- DRM_INFO("vendor descriptor length:%x data:%11ph\n",
- total_len, desc);
-
- if ((desc[0] != total_len) || /* descriptor length */
- (desc[1] != 0x5f) || /* vendor descriptor type */
- (desc[2] != 0x01) || /* version (2 bytes) */
- (desc[3] != 0x00) ||
- (desc[4] != total_len - 2)) /* length after type */
- goto unrecognized;
-
- desc_end = desc + total_len;
- desc += 5; /* the fixed header we've already parsed */
-
- while (desc < desc_end) {
- u8 length;
- u16 key;
-
- key = le16_to_cpu(*((u16 *) desc));
- desc += sizeof(u16);
- length = *desc;
- desc++;
-
- switch (key) {
- case 0x0200: { /* max_area */
- u32 max_area;
- max_area = le32_to_cpu(*((u32 *)desc));
- DRM_DEBUG("DL chip limited to %d pixel modes\n",
- max_area);
- udl->sku_pixel_limit = max_area;
- break;
- }
- default:
- break;
- }
- desc += length;
- }
- }
+ if ((desc[0] != len) || /* descriptor length */
+ (desc[1] != 0x5f) || /* vendor descriptor type */
+ (desc[2] != 0x01) || /* version (2 bytes) */
+ (desc[3] != 0x00) ||
+ (desc[4] != len - 2)) /* length after type */
+ goto out;
+ desc += 5;
- goto success;
+ detected = true;
-unrecognized:
- /* allow udlfb to load for now even if firmware unrecognized */
- DRM_ERROR("Unrecognized vendor firmware descriptor\n");
+ while (desc < desc_end)
+ desc = udl_parse_key_value_pair(udl, desc, desc_end);
-success:
+out:
+ if (!detected)
+ drm_warn(dev, "Unrecognized vendor firmware descriptor\n");
kfree(buf);
- return true;
+
+ return 0;
}
/*
@@ -145,9 +174,8 @@ void udl_urb_completion(struct urb *urb)
wake_up(&udl->urbs.sleep);
}
-static void udl_free_urb_list(struct drm_device *dev)
+static void udl_free_urb_list(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
struct urb_node *unode;
struct urb *urb;
@@ -172,9 +200,8 @@ static void udl_free_urb_list(struct drm_device *dev)
wake_up_all(&udl->urbs.sleep);
}
-static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
+static int udl_alloc_urb_list(struct udl_device *udl, int count, size_t size)
{
- struct udl_device *udl = to_udl(dev);
struct urb *urb;
struct urb_node *unode;
char *buf;
@@ -210,7 +237,7 @@ retry:
usb_free_urb(urb);
if (size > PAGE_SIZE) {
size /= 2;
- udl_free_urb_list(dev);
+ udl_free_urb_list(udl);
goto retry;
}
break;
@@ -259,9 +286,8 @@ static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
}
#define GET_URB_TIMEOUT HZ
-struct urb *udl_get_urb(struct drm_device *dev)
+struct urb *udl_get_urb(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
struct urb *urb;
spin_lock_irq(&udl->urbs.lock);
@@ -270,9 +296,8 @@ struct urb *udl_get_urb(struct drm_device *dev)
return urb;
}
-int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
+int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len)
{
- struct udl_device *udl = to_udl(dev);
int ret;
if (WARN_ON(len > udl->urbs.size)) {
@@ -290,9 +315,9 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
}
/* wait until all pending URBs have been processed */
-void udl_sync_pending_urbs(struct drm_device *dev)
+void udl_sync_pending_urbs(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
+ struct drm_device *dev = &udl->drm;
spin_lock_irq(&udl->urbs.lock);
/* 2 seconds as a sane timeout */
@@ -308,53 +333,55 @@ int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
+ struct device *dma_dev;
DRM_DEBUG("\n");
- udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!udl->dmadev)
+ dma_dev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
- mutex_init(&udl->gem_lock);
+ /*
+ * Not all devices provide vendor descriptors with device
+ * information. Initialize to default values of real-world
+ * devices. It is just enough memory for FullHD.
+ */
+ udl->sku_pixel_limit = UDL_SKU_PIXEL_LIMIT_DEFAULT;
- if (!udl_parse_vendor_descriptor(udl)) {
- ret = -ENODEV;
- DRM_ERROR("firmware not recognized. Assume incompatible device\n");
+ ret = udl_parse_vendor_descriptor(udl);
+ if (ret)
goto err;
- }
if (udl_select_std_channel(udl))
DRM_ERROR("Selecting channel failed\n");
- if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ if (!udl_alloc_urb_list(udl, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
DRM_ERROR("udl_alloc_urb_list failed\n");
+ ret = -ENOMEM;
goto err;
}
DRM_DEBUG("\n");
- ret = udl_modeset_init(dev);
+ ret = udl_modeset_init(udl);
if (ret)
goto err;
- drm_kms_helper_poll_init(dev);
-
return 0;
err:
if (udl->urbs.count)
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
+ udl_free_urb_list(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
-int udl_drop_usb(struct drm_device *dev)
+int udl_drop_usb(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
-
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
- udl->dmadev = NULL;
+ udl_free_urb_list(udl);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index bbb04f98886a..231e829bd709 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -205,6 +205,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
struct drm_device *dev = fb->dev;
+ struct udl_device *udl = to_udl(dev);
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
int i, ret;
char *cmd;
@@ -216,7 +217,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
return -ENOMEM;
cmd = urb->transfer_buffer;
@@ -226,7 +227,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
const int byte_offset = line_offset + (clip->x1 << log_bpp);
const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp;
const int byte_width = drm_rect_width(clip) << log_bpp;
- ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
+ ret = udl_render_hline(udl, log_bpp, &urb, (char *)vaddr,
&cmd, byte_offset, dev_byte_offset,
byte_width);
if (ret)
@@ -239,7 +240,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
*cmd++ = UDL_MSG_BULK;
len = cmd - (char *)urb->transfer_buffer;
- ret = udl_submit_urb(dev, urb, len);
+ ret = udl_submit_urb(udl, urb, len);
} else {
udl_urb_completion(urb);
}
@@ -330,6 +331,7 @@ static const struct drm_plane_funcs udl_primary_plane_funcs = {
static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = to_udl(dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *mode = &crtc_state->mode;
struct urb *urb;
@@ -339,7 +341,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom
if (!drm_dev_enter(dev, &idx))
return;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
goto out;
@@ -355,7 +357,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
- udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+ udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
@@ -364,6 +366,7 @@ out:
static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
char *buf;
int idx;
@@ -371,7 +374,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
if (!drm_dev_enter(dev, &idx))
return;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
goto out;
@@ -381,7 +384,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
- udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+ udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
@@ -476,9 +479,9 @@ static const struct drm_mode_config_funcs udl_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int udl_modeset_init(struct drm_device *dev)
+int udl_modeset_init(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
+ struct drm_device *dev = &udl->drm;
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
@@ -535,6 +538,7 @@ int udl_modeset_init(struct drm_device *dev)
return ret;
drm_mode_config_reset(dev);
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 62224992988f..7d670b3a5293 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -170,7 +170,7 @@ static void udl_compress_hline16(
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
-int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
+int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width)
@@ -199,10 +199,10 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
- int ret = udl_submit_urb(dev, urb, len);
+ int ret = udl_submit_urb(udl, urb, len);
if (ret)
return ret;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
return -EAGAIN;
*urb_ptr = urb;
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 76816f2551c1..7e789e181af0 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -21,74 +21,74 @@ struct v3d_reg_def {
};
static const struct v3d_reg_def v3d_hub_reg_defs[] = {
- REGDEF(33, 42, V3D_HUB_AXICFG),
- REGDEF(33, 71, V3D_HUB_UIFCFG),
- REGDEF(33, 71, V3D_HUB_IDENT0),
- REGDEF(33, 71, V3D_HUB_IDENT1),
- REGDEF(33, 71, V3D_HUB_IDENT2),
- REGDEF(33, 71, V3D_HUB_IDENT3),
- REGDEF(33, 71, V3D_HUB_INT_STS),
- REGDEF(33, 71, V3D_HUB_INT_MSK_STS),
-
- REGDEF(33, 71, V3D_MMU_CTL),
- REGDEF(33, 71, V3D_MMU_VIO_ADDR),
- REGDEF(33, 71, V3D_MMU_VIO_ID),
- REGDEF(33, 71, V3D_MMU_DEBUG_INFO),
-
- REGDEF(71, 71, V3D_GMP_STATUS(71)),
- REGDEF(71, 71, V3D_GMP_CFG(71)),
- REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO),
+
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_STATUS(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_CFG(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_VIO_ADDR(71)),
};
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN),
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK),
};
static const struct v3d_reg_def v3d_core_reg_defs[] = {
- REGDEF(33, 71, V3D_CTL_IDENT0),
- REGDEF(33, 71, V3D_CTL_IDENT1),
- REGDEF(33, 71, V3D_CTL_IDENT2),
- REGDEF(33, 71, V3D_CTL_MISCCFG),
- REGDEF(33, 71, V3D_CTL_INT_STS),
- REGDEF(33, 71, V3D_CTL_INT_MSK_STS),
- REGDEF(33, 71, V3D_CLE_CT0CS),
- REGDEF(33, 71, V3D_CLE_CT0CA),
- REGDEF(33, 71, V3D_CLE_CT0EA),
- REGDEF(33, 71, V3D_CLE_CT1CS),
- REGDEF(33, 71, V3D_CLE_CT1CA),
- REGDEF(33, 71, V3D_CLE_CT1EA),
-
- REGDEF(33, 71, V3D_PTB_BPCA),
- REGDEF(33, 71, V3D_PTB_BPCS),
-
- REGDEF(33, 42, V3D_GMP_STATUS(33)),
- REGDEF(33, 42, V3D_GMP_CFG(33)),
- REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)),
-
- REGDEF(33, 71, V3D_ERR_FDBGO),
- REGDEF(33, 71, V3D_ERR_FDBGB),
- REGDEF(33, 71, V3D_ERR_FDBGS),
- REGDEF(33, 71, V3D_ERR_STAT),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_STATUS(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_CFG(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_VIO_ADDR(33)),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT),
};
static const struct v3d_reg_def v3d_csd_reg_defs[] = {
- REGDEF(41, 71, V3D_CSD_STATUS),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)),
- REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7),
+ REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG0(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG1(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG2(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG3(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG4(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG5(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG6(41)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG0(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG1(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG2(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG3(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG4(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG5(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG6(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7),
};
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
@@ -164,7 +164,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
seq_printf(m, "TFU: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, "TSY: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
}
@@ -196,11 +196,11 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
seq_printf(m, " QPUs: %d\n", nslc * qups);
seq_printf(m, " Semaphores: %d\n",
V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, " BCG int: %d\n",
(ident2 & V3D_IDENT2_BCG_INT) != 0);
}
- if (v3d->ver < 40) {
+ if (v3d->ver < V3D_GEN_41) {
seq_printf(m, " Override TMU: %d\n",
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
@@ -234,7 +234,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int core = 0;
int measure_ms = 1000;
- if (v3d->ver >= 40) {
+ if (v3d->ver >= V3D_GEN_41) {
int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
V3D_SET_FIELD_VER(cycle_count_reg,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 852015214e97..5e997ae8bc9c 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched/clock.h>
@@ -92,7 +93,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
args->value = 1;
return 0;
case DRM_V3D_PARAM_SUPPORTS_PERFMON:
- args->value = (v3d->ver >= 40);
+ args->value = (v3d->ver >= V3D_GEN_41);
return 0;
case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
args->value = 1;
@@ -254,14 +255,44 @@ static const struct drm_driver v3d_drm_driver = {
};
static const struct of_device_id v3d_of_match[] = {
- { .compatible = "brcm,2711-v3d" },
- { .compatible = "brcm,2712-v3d" },
- { .compatible = "brcm,7268-v3d" },
- { .compatible = "brcm,7278-v3d" },
+ { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 },
+ { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 },
+ { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 },
+ { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 },
{},
};
MODULE_DEVICE_TABLE(of, v3d_of_match);
+static void
+v3d_idle_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_CLEAR_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_IDLE), 100)) {
+ DRM_ERROR("Failed to power up SMS\n");
+ }
+
+ v3d_reset_sms(v3d);
+}
+
+static void
+v3d_power_off_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_POWER_OFF_STATE), 100)) {
+ DRM_ERROR("Failed to power off SMS\n");
+ }
+}
+
static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
@@ -274,6 +305,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct drm_device *drm;
struct v3d_dev *v3d;
+ enum v3d_gen gen;
int ret;
u32 mmu_debug;
u32 ident1, ident3;
@@ -287,6 +319,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drm);
+ gen = (uintptr_t)of_device_get_match_data(dev);
+ v3d->ver = gen;
+
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
return ret;
@@ -295,6 +330,12 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (v3d->ver >= V3D_GEN_71) {
+ ret = map_regs(v3d, &v3d->sms_regs, "sms");
+ if (ret)
+ return ret;
+ }
+
v3d->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(v3d->clk))
return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
@@ -305,6 +346,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return ret;
}
+ v3d_idle_sms(v3d);
+
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
ret = dma_set_mask_and_coherent(dev, mask);
@@ -316,6 +359,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ident1 = V3D_READ(V3D_HUB_IDENT1);
v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
+ /* Make sure that the V3D tech version retrieved from the HW is equal
+ * to the one advertised by the device tree.
+ */
+ WARN_ON(v3d->ver != gen);
+
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
@@ -340,7 +388,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
}
}
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
goto clk_disable;
@@ -400,6 +448,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
+ v3d_power_off_sms(v3d);
+
clk_disable_unprepare(v3d->clk);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9deaefa0f95b..b51f0b648a08 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -94,11 +94,18 @@ struct v3d_perfmon {
u64 values[] __counted_by(ncounters);
};
+enum v3d_gen {
+ V3D_GEN_33 = 33,
+ V3D_GEN_41 = 41,
+ V3D_GEN_42 = 42,
+ V3D_GEN_71 = 71,
+};
+
struct v3d_dev {
struct drm_device drm;
/* Short representation (e.g. 33, 41) of the V3D tech version */
- int ver;
+ enum v3d_gen ver;
/* Short representation (e.g. 5, 6) of the V3D tech revision */
int rev;
@@ -111,6 +118,7 @@ struct v3d_dev {
void __iomem *core_regs[3];
void __iomem *bridge_regs;
void __iomem *gca_regs;
+ void __iomem *sms_regs;
struct clk *clk;
struct reset_control *reset;
@@ -199,7 +207,7 @@ to_v3d_dev(struct drm_device *dev)
static inline bool
v3d_has_csd(struct v3d_dev *v3d)
{
- return v3d->ver >= 41;
+ return v3d->ver >= V3D_GEN_41;
}
#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
@@ -261,6 +269,15 @@ to_v3d_fence(struct dma_fence *fence)
#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
+#define V3D_SMS_IDLE 0x0
+#define V3D_SMS_ISOLATING_FOR_RESET 0xa
+#define V3D_SMS_RESETTING 0xb
+#define V3D_SMS_ISOLATING_FOR_POWER_OFF 0xc
+#define V3D_SMS_POWER_OFF_STATE 0xd
+
+#define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset))
+#define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset))
+
#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
@@ -539,6 +556,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
+void v3d_reset_sms(struct v3d_dev *v3d);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_clean_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b1e681630ded..d7d16da78db3 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -25,7 +25,7 @@ v3d_init_core(struct v3d_dev *v3d, int core)
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
- if (v3d->ver < 40)
+ if (v3d->ver < V3D_GEN_41)
V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
@@ -58,7 +58,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int core)
static void
v3d_idle_gca(struct v3d_dev *v3d)
{
- if (v3d->ver >= 41)
+ if (v3d->ver >= V3D_GEN_41)
return;
V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
@@ -105,6 +105,22 @@ v3d_reset_v3d(struct v3d_dev *v3d)
}
void
+v3d_reset_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_REE_CS, V3D_SET_FIELD(0x4, V3D_SMS_STATE));
+
+ if (wait_for(!(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_ISOLATING_FOR_RESET) &&
+ !(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_RESETTING), 100)) {
+ DRM_ERROR("Failed to wait for SMS reset\n");
+ }
+}
+
+void
v3d_reset(struct v3d_dev *v3d)
{
struct drm_device *dev = &v3d->drm;
@@ -119,6 +135,7 @@ v3d_reset(struct v3d_dev *v3d)
v3d_idle_axi(v3d, 0);
v3d_idle_gca(v3d);
+ v3d_reset_sms(v3d);
v3d_reset_v3d(v3d);
v3d_mmu_set_page_table(v3d);
@@ -132,13 +149,13 @@ v3d_reset(struct v3d_dev *v3d)
static void
v3d_flush_l3(struct v3d_dev *v3d)
{
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
- if (v3d->ver < 33) {
+ if (v3d->ver < V3D_GEN_33) {
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
}
@@ -151,7 +168,7 @@ v3d_flush_l3(struct v3d_dev *v3d)
static void
v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
{
- if (v3d->ver > 32)
+ if (v3d->ver >= V3D_GEN_33)
return;
V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 72b6a119412f..2cca5d3a26a2 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -143,7 +143,7 @@ v3d_irq(int irq, void *arg)
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
- if (v3d->ver < 71 && (intsts & V3D_INT_GMPV))
+ if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV))
dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
@@ -186,27 +186,59 @@ v3d_hub_irq(int irq, void *arg)
u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
(v3d->va_width - 32));
- static const char *const v3d41_axi_ids[] = {
- "L2T",
- "PTB",
- "PSE",
- "TLB",
- "CLE",
- "TFU",
- "MMU",
- "GMP",
+ static const struct {
+ u32 begin;
+ u32 end;
+ const char *client;
+ } v3d41_axi_ids[] = {
+ {0x00, 0x20, "L2T"},
+ {0x20, 0x21, "PTB"},
+ {0x40, 0x41, "PSE"},
+ {0x60, 0x80, "TLB"},
+ {0x80, 0x88, "CLE"},
+ {0xA0, 0xA1, "TFU"},
+ {0xC0, 0xE0, "MMU"},
+ {0xE0, 0xE1, "GMP"},
+ }, v3d71_axi_ids[] = {
+ {0x00, 0x30, "L2T"},
+ {0x30, 0x38, "CLE"},
+ {0x38, 0x39, "PTB"},
+ {0x39, 0x3A, "PSE"},
+ {0x3A, 0x3B, "CSD"},
+ {0x40, 0x60, "TLB"},
+ {0x60, 0x70, "MMU"},
+ {0x7C, 0x7E, "TFU"},
+ {0x7F, 0x80, "GMP"},
};
const char *client = "?";
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
- if (v3d->ver >= 41) {
- axi_id = axi_id >> 5;
- if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
- client = v3d41_axi_ids[axi_id];
+ if (v3d->ver >= V3D_GEN_71) {
+ size_t i;
+
+ axi_id = axi_id & 0x7F;
+ for (i = 0; i < ARRAY_SIZE(v3d71_axi_ids); i++) {
+ if (axi_id >= v3d71_axi_ids[i].begin &&
+ axi_id < v3d71_axi_ids[i].end) {
+ client = v3d71_axi_ids[i].client;
+ break;
+ }
+ }
+ } else if (v3d->ver >= V3D_GEN_41) {
+ size_t i;
+
+ axi_id = axi_id & 0xFF;
+ for (i = 0; i < ARRAY_SIZE(v3d41_axi_ids); i++) {
+ if (axi_id >= v3d41_axi_ids[i].begin &&
+ axi_id < v3d41_axi_ids[i].end) {
+ client = v3d41_axi_ids[i].client;
+ break;
+ }
+ }
}
- dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+ dev_err(v3d->drm.dev, "MMU error from client %s (0x%x) at 0x%llx%s%s%s\n",
client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
@@ -217,7 +249,7 @@ v3d_hub_irq(int irq, void *arg)
status = IRQ_HANDLED;
}
- if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
+ if (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
dev_err(v3d->drm.dev, "GMP Violation\n");
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index 3ebda2fa46fc..9a3fe5255874 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -200,10 +200,10 @@ void v3d_perfmon_init(struct v3d_dev *v3d)
const struct v3d_perf_counter_desc *counters = NULL;
unsigned int max = 0;
- if (v3d->ver >= 71) {
+ if (v3d->ver >= V3D_GEN_71) {
counters = v3d_v71_performance_counters;
max = ARRAY_SIZE(v3d_v71_performance_counters);
- } else if (v3d->ver >= 42) {
+ } else if (v3d->ver >= V3D_GEN_42) {
counters = v3d_v42_performance_counters;
max = ARRAY_SIZE(v3d_v42_performance_counters);
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6da3c69082bd..c1870265eaee 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -515,4 +515,30 @@
# define V3D_ERR_VPAERGS BIT(1)
# define V3D_ERR_VPAEABB BIT(0)
+#define V3D_SMS_REE_CS 0x00000
+#define V3D_SMS_TEE_CS 0x00400
+# define V3D_SMS_INTERRUPT BIT(31)
+# define V3D_SMS_POWER_OFF BIT(30)
+# define V3D_SMS_CLEAR_POWER_OFF BIT(29)
+# define V3D_SMS_LOCK BIT(28)
+# define V3D_SMS_CLEAR_LOCK BIT(27)
+# define V3D_SMS_SVP_MODE_EXIT BIT(26)
+# define V3D_SMS_CLEAR_SVP_MODE_EXIT BIT(25)
+# define V3D_SMS_SVP_MODE_ENTER BIT(24)
+# define V3D_SMS_CLEAR_SVP_MODE_ENTER BIT(23)
+# define V3D_SMS_THEIR_MODE_EXIT BIT(22)
+# define V3D_SMS_THEIR_MODE_ENTER BIT(21)
+# define V3D_SMS_OUR_MODE_EXIT BIT(20)
+# define V3D_SMS_CLEAR_OUR_MODE_EXIT BIT(19)
+# define V3D_SMS_SEQ_PC_MASK V3D_MASK(16, 10)
+# define V3D_SMS_SEQ_PC_SHIFT 10
+# define V3D_SMS_HUBCORE_STATUS_MASK V3D_MASK(9, 8)
+# define V3D_SMS_HUBCORE_STATUS_SHIFT 8
+# define V3D_SMS_NEW_MODE_MASK V3D_MASK(7, 6)
+# define V3D_SMS_NEW_MODE_SHIFT 6
+# define V3D_SMS_OLD_MODE_MASK V3D_MASK(5, 4)
+# define V3D_SMS_OLD_MODE_SHIFT 4
+# define V3D_SMS_STATE_MASK V3D_MASK(3, 0)
+# define V3D_SMS_STATE_SHIFT 0
+
#endif /* V3D_REGS_H */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index eb35482f6fb5..35f131a46d07 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -357,11 +357,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
- if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
+ if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
@@ -412,7 +412,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
*
* XXX: Set the CFG7 register
*/
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0);
/* CFG0 write kicks off the job. */
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
index e70d7c3076ac..577d9a956369 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
@@ -61,6 +61,19 @@ static const struct drm_display_mode default_mode = {
DRM_SIMPLE_MODE(640, 480, 64, 48)
};
+/**
+ * vc4_mock_atomic_add_output() - Enables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to enable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and enables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_add_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -75,30 +88,49 @@ int vc4_mock_atomic_add_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state->active = true;
return 0;
}
+/**
+ * vc4_mock_atomic_del_output() - Disables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to disable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and disables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_del_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -113,26 +145,32 @@ int vc4_mock_atomic_del_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
crtc_state->active = false;
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 992e8f5c5c6e..d1f694029169 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -20,7 +20,6 @@
struct pv_muxing_priv {
struct vc4_dev *vc4;
- struct drm_atomic_state *state;
};
static bool check_fifo_conflict(struct kunit *test,
@@ -677,18 +676,41 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test,
@@ -700,33 +722,61 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
KUNIT_EXPECT_TRUE(test, check_channel_for_encoder(test, state, enc_type,
params->check_fn));
}
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static int vc4_pv_muxing_test_init(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
- struct drm_modeset_acquire_ctx ctx;
struct pv_muxing_priv *priv;
- struct drm_device *drm;
struct vc4_dev *vc4;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
@@ -737,15 +787,6 @@ static int vc4_pv_muxing_test_init(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
priv->vc4 = vc4;
- drm_modeset_acquire_init(&ctx, 0);
-
- drm = &vc4->base;
- priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
return 0;
}
@@ -800,13 +841,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -823,13 +877,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -874,16 +941,35 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -908,13 +994,26 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -968,25 +1067,50 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
-
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 779b22efe27b..458e5d987964 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -552,8 +552,6 @@ struct vc4_dsi {
struct vc4_encoder encoder;
struct mipi_dsi_host dsi_host;
- struct kref kref;
-
struct platform_device *pdev;
struct drm_bridge *out_bridge;
@@ -1160,12 +1158,13 @@ static void vc4_dsi_bridge_enable(struct drm_bridge *bridge,
}
static int vc4_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge,
&dsi->bridge, flags);
}
@@ -1621,29 +1620,11 @@ static void vc4_dsi_dma_chan_release(void *ptr)
dsi->reg_dma_chan = NULL;
}
-static void vc4_dsi_release(struct kref *kref)
-{
- struct vc4_dsi *dsi =
- container_of(kref, struct vc4_dsi, kref);
-
- kfree(dsi);
-}
-
-static void vc4_dsi_get(struct vc4_dsi *dsi)
-{
- kref_get(&dsi->kref);
-}
-
-static void vc4_dsi_put(struct vc4_dsi *dsi)
-{
- kref_put(&dsi->kref, &vc4_dsi_release);
-}
-
static void vc4_dsi_release_action(struct drm_device *drm, void *ptr)
{
struct vc4_dsi *dsi = ptr;
- vc4_dsi_put(dsi);
+ drm_bridge_put(&dsi->bridge);
}
static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
@@ -1654,7 +1635,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder = &dsi->encoder.base;
int ret;
- vc4_dsi_get(dsi);
+ drm_bridge_get(&dsi->bridge);
ret = drmm_add_action_or_reset(drm, vc4_dsi_release_action, dsi);
if (ret)
@@ -1809,15 +1790,12 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi;
- dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct vc4_dsi, bridge, &vc4_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dev_set_drvdata(dev, dsi);
- kref_init(&dsi->kref);
-
dsi->pdev = pdev;
- dsi->bridge.funcs = &vc4_dsi_bridge_funcs;
#ifdef CONFIG_OF
dsi->bridge.of_node = dev->of_node;
#endif
@@ -1835,7 +1813,6 @@ static void vc4_dsi_dev_remove(struct platform_device *pdev)
struct vc4_dsi *dsi = dev_get_drvdata(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
- vc4_dsi_put(dsi);
}
struct platform_driver vc4_dsi_driver = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 37238a12baa5..a29a6ef266f9 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -51,6 +51,7 @@
#include <linux/reset.h>
#include <sound/dmaengine_pcm.h>
#include <sound/hdmi-codec.h>
+#include <sound/jack.h>
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -372,13 +373,13 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
* the lock for now.
*/
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
+
if (status == connector_status_disconnected) {
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return;
}
- drm_atomic_helper_connector_hdmi_hotplug(connector, status);
-
cec_s_phys_addr(vc4_hdmi->cec_adap,
connector->display_info.source_physical_address, false);
@@ -2175,6 +2176,22 @@ static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = {
.shutdown = vc4_hdmi_audio_shutdown,
};
+static int vc4_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct vc4_hdmi *vc4_hdmi = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component;
+ int ret;
+
+ ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT,
+ &vc4_hdmi->hdmi_jack);
+ if (ret) {
+ dev_err(rtd->dev, "HDMI Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_component_set_jack(component, &vc4_hdmi->hdmi_jack, NULL);
+}
+
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2288,6 +2305,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dai_link->cpus->dai_name = dev_name(dev);
dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev);
dai_link->platforms->name = dev_name(dev);
+ dai_link->init = vc4_hdmi_codec_init;
card->dai_link = dai_link;
card->num_links = 1;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index e3d989ca302b..a31157c99bee 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -4,6 +4,7 @@
#include <drm/drm_connector.h>
#include <media/cec.h>
#include <sound/dmaengine_pcm.h>
+#include <sound/hdmi-codec.h>
#include <sound/soc.h>
#include "vc4_drv.h"
@@ -211,6 +212,12 @@ struct vc4_hdmi {
* KMS hooks. Protected by @mutex.
*/
enum hdmi_colorspace output_format;
+
+ /**
+ * @hdmi_jack: Represents the connection state of the HDMI plug, for
+ * ALSA jack detection.
+ */
+ struct snd_soc_jack hdmi_jack;
};
#define connector_to_vc4_hdmi(_connector) \
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c5e84d3494d2..056d344c5411 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -2080,7 +2080,7 @@ static int vc6_plane_mode_set(struct drm_plane *plane,
/* HPPF plane 1 */
vc4_dlist_write(vc4_state, kernel);
/* VPPF plane 1 */
- vc4_dlist_write(vc4_state, kernel);
+ vc4_dlist_write(vc4_state, kernel);
}
}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 37bb1fb58cf9..b611c7c8ca2d 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -53,25 +53,10 @@ static void vgem_fence_release(struct dma_fence *base)
dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
-{
- snprintf(str, size, "%llu", fence->seqno);
-}
-
-static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
- int size)
-{
- snprintf(str, size, "%llu",
- dma_fence_is_signaled(fence) ? fence->seqno : 0);
-}
-
static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.release = vgem_fence_release,
-
- .fence_value_str = vgem_fence_value_str,
- .timeline_value_str = vgem_fence_timeline_value_str,
};
static void vgem_fence_timeout(struct timer_list *t)
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..44c1d8ef3c4d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f)
return false;
}
-static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
-{
- snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
-}
-
-static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
- int size)
-{
- struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
-
- snprintf(str, size, "%llu",
- (u64)atomic64_read(&fence->drv->last_fence_id));
-}
-
static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name = virtio_gpu_get_timeline_name,
.signaled = virtio_gpu_fence_signaled,
- .fence_value_str = virtio_gpu_fence_value_str,
- .timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 87e584add042..698ea7adb951 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -366,7 +366,7 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
obj = new_state->fb->obj[0];
- if (bo->dumb || obj->import_attach) {
+ if (bo->dumb || drm_gem_is_imported(obj)) {
vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
vgdev->fence_drv.context,
0);
@@ -374,7 +374,7 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return -ENOMEM;
}
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
if (ret)
goto err_fence;
@@ -417,7 +417,7 @@ static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
}
obj = state->fb->obj[0];
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
virtio_gpu_cleanup_imported_obj(obj);
}
@@ -508,11 +508,19 @@ static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
- /* Only support mapped shmem bo */
- if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr)
+ if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(&bo->base.base))
return -ENODEV;
- iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ if (bo->base.vaddr) {
+ iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ } else {
+ struct drm_gem_shmem_object *shmem = &bo->base;
+
+ if (!shmem->pages)
+ return -ENODEV;
+ /* map scanout buffer later */
+ sb->pages = shmem->pages;
+ }
sb->format = plane->state->fb->format;
sb->height = plane->state->fb->height;
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 4de2a63ccd18..1118a0250279 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -75,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
- .cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = virtgpu_gem_map_dma_buf,
@@ -205,16 +204,15 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
- struct dma_buf_attachment *attach = obj->import_attach;
- if (attach) {
- struct dma_buf *dmabuf = attach->dmabuf;
+ if (drm_gem_is_imported(obj)) {
+ struct dma_buf *dmabuf = obj->dma_buf;
dma_resv_lock(dmabuf->resv, NULL);
virtgpu_dma_buf_unmap(bo);
dma_resv_unlock(dmabuf->resv);
- dma_buf_detach(dmabuf, attach);
+ dma_buf_detach(dmabuf, obj->import_attach);
dma_buf_put(dmabuf);
}
diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig
index 9def079f685b..3c02f928ffe6 100644
--- a/drivers/gpu/drm/vkms/Kconfig
+++ b/drivers/gpu/drm/vkms/Kconfig
@@ -14,3 +14,18 @@ config DRM_VKMS
a VKMS.
If M is selected the module will be called vkms.
+
+config DRM_VKMS_KUNIT_TEST
+ tristate "KUnit tests for VKMS" if !KUNIT_ALL_TESTS
+ depends on DRM_VKMS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for VKMS. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on VKMS.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 1b28a6a32948..d657865e573f 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -6,6 +6,9 @@ vkms-y := \
vkms_formats.o \
vkms_crtc.o \
vkms_composer.o \
- vkms_writeback.o
+ vkms_writeback.o \
+ vkms_connector.o \
+ vkms_config.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/vkms/tests/.kunitconfig b/drivers/gpu/drm/vkms/tests/.kunitconfig
new file mode 100644
index 000000000000..6a2d87068edc
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_VKMS=y
+CONFIG_DRM_VKMS_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile
new file mode 100644
index 000000000000..9ded37b67a46
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms_config_test.o
diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
new file mode 100644
index 000000000000..ff4566cf9925
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
@@ -0,0 +1,951 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include "../vkms_config.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static size_t vkms_config_get_num_planes(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_encoders(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_connectors(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ count++;
+
+ return count;
+}
+
+static struct vkms_config_plane *get_first_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ return plane_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_crtc *get_first_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ return crtc_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_encoder *get_first_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ return encoder_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_connector *get_first_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ return connector_cfg;
+
+ return NULL;
+}
+
+struct default_config_case {
+ bool enable_cursor;
+ bool enable_writeback;
+ bool enable_overlay;
+};
+
+static void vkms_config_test_empty_config(struct kunit *test)
+{
+ struct vkms_config *config;
+ const char *dev_name = "test";
+
+ config = vkms_config_create(dev_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* The dev_name string and the config have different lifetimes */
+ dev_name = NULL;
+ KUNIT_EXPECT_STREQ(test, vkms_config_get_device_name(config), "test");
+
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static struct default_config_case default_config_cases[] = {
+ { false, false, false },
+ { true, false, false },
+ { true, true, false },
+ { true, false, true },
+ { false, true, false },
+ { false, true, true },
+ { false, false, true },
+ { true, true, true },
+};
+
+KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL);
+
+static void vkms_config_test_default_config(struct kunit *test)
+{
+ const struct default_config_case *params = test->param_value;
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int n_primaries = 0;
+ int n_cursors = 0;
+ int n_overlays = 0;
+
+ config = vkms_config_default_create(params->enable_cursor,
+ params->enable_writeback,
+ params->enable_overlay);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Planes */
+ vkms_config_for_each_plane(config, plane_cfg) {
+ switch (vkms_config_plane_get_type(plane_cfg)) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ n_primaries++;
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ n_cursors++;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ n_overlays++;
+ break;
+ default:
+ KUNIT_FAIL_AND_ABORT(test, "Unknown plane type");
+ }
+ }
+ KUNIT_EXPECT_EQ(test, n_primaries, 1);
+ KUNIT_EXPECT_EQ(test, n_cursors, params->enable_cursor ? 1 : 0);
+ KUNIT_EXPECT_EQ(test, n_overlays, params->enable_overlay ? 8 : 0);
+
+ /* CRTCs */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+
+ crtc_cfg = get_first_crtc(config);
+ KUNIT_EXPECT_EQ(test, vkms_config_crtc_get_writeback(crtc_cfg),
+ params->enable_writeback);
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ int n_possible_crtcs = 0;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ KUNIT_EXPECT_PTR_EQ(test, crtc_cfg, possible_crtc);
+ n_possible_crtcs++;
+ }
+ KUNIT_EXPECT_EQ(test, n_possible_crtcs, 1);
+ }
+
+ /* Encoders */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 1);
+
+ /* Connectors */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 1);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_planes(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ int n_planes = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ n_planes++;
+ KUNIT_ASSERT_EQ(test, n_planes, 0);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+ n_planes = 0;
+
+ plane_cfg2 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 2);
+ n_planes = 0;
+
+ vkms_config_destroy_plane(plane_cfg1);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ int n_encoders = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ n_encoders++;
+ KUNIT_ASSERT_EQ(test, n_encoders, 0);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_destroy_encoder(config, encoder_cfg2);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_connectors(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int n_connectors = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ n_connectors++;
+ KUNIT_ASSERT_EQ(test, n_connectors, 0);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ connector_cfg2 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1 &&
+ connector_cfg != connector_cfg2)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 2);
+ n_connectors = 0;
+
+ vkms_config_destroy_connector(connector_cfg2);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_plane_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No planes */
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many planes */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_plane(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_type(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: No primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple primary planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple cursor planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary and one cursor plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without primary plane */
+ crtc_cfg = vkms_config_create_crtc(config);
+ encoder_cfg = vkms_config_create_encoder(config);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with a primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: Primary plane without a possible CRTC */
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_crtc_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No CRTCs */
+ crtc_cfg = get_first_crtc(config);
+ vkms_config_destroy_crtc(config, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many CRTCs */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_crtc(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_encoder_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No encoders */
+ encoder_cfg = get_first_encoder(config);
+ vkms_config_destroy_encoder(config, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many encoders */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_encoder(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ crtc_cfg1 = get_first_crtc(config);
+
+ /* Invalid: Encoder without a possible CRTC */
+ encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with shared encoder */
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without encoders */
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: First CRTC with 2 possible encoder */
+ vkms_config_destroy_plane(plane_cfg);
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_connector_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No connectors */
+ connector_cfg = get_first_connector(config);
+ vkms_config_destroy_connector(connector_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many connectors */
+ for (n = 0; n <= 32; n++)
+ connector_cfg = vkms_config_create_connector(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_connector_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg = get_first_encoder(config);
+ connector_cfg = get_first_connector(config);
+
+ /* Invalid: Connector without a possible encoder */
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_attach_different_configs(struct kunit *test)
+{
+ struct vkms_config *config1, *config2;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int err;
+
+ config1 = vkms_config_create("test1");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config1);
+
+ config2 = vkms_config_create("test2");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config2);
+
+ plane_cfg1 = vkms_config_create_plane(config1);
+ crtc_cfg1 = vkms_config_create_crtc(config1);
+ encoder_cfg1 = vkms_config_create_encoder(config1);
+ connector_cfg1 = vkms_config_create_connector(config1);
+
+ plane_cfg2 = vkms_config_create_plane(config2);
+ crtc_cfg2 = vkms_config_create_crtc(config2);
+ encoder_cfg2 = vkms_config_create_encoder(config2);
+ connector_cfg2 = vkms_config_create_connector(config2);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
+
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ vkms_config_destroy(config1);
+ vkms_config_destroy(config2);
+}
+
+static void vkms_config_test_plane_attach_crtc(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *overlay_cfg;
+ struct vkms_config_plane *primary_cfg;
+ struct vkms_config_plane *cursor_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ overlay_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY);
+ primary_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY);
+ cursor_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+
+ /* No primary or cursor planes */
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Overlay plane, but no primary or cursor planes */
+ err = vkms_config_plane_attach_crtc(overlay_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary plane, attaching it twice must fail */
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_NE(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary and cursor planes */
+ err = vkms_config_plane_attach_crtc(cursor_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_cursor_plane(config, crtc_cfg),
+ cursor_cfg);
+
+ /* Detach primary and destroy cursor plane */
+ vkms_config_plane_detach_crtc(overlay_cfg, crtc_cfg);
+ vkms_config_plane_detach_crtc(primary_cfg, crtc_cfg);
+ vkms_config_destroy_plane(cursor_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ plane_cfg2 = vkms_config_create_plane(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and 2 */
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and plane 2 to CRTC 2 */
+ vkms_config_plane_detach_crtc(plane_cfg1, crtc_cfg2);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and 2 */
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and encoder 2 to CRTC 2 */
+ vkms_config_encoder_detach_crtc(encoder_cfg1, crtc_cfg2);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_connector_get_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ int n_encoders = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ connector_cfg2 = vkms_config_create_connector(config);
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+
+ /* No possible encoders */
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoders 1 and 2 */
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1 &&
+ possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoder 1 and connector 2 to encoder 2 */
+ vkms_config_connector_detach_encoder(connector_cfg1, encoder_cfg2);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+
+ vkms_config_destroy(config);
+}
+
+static struct kunit_case vkms_config_test_cases[] = {
+ KUNIT_CASE(vkms_config_test_empty_config),
+ KUNIT_CASE_PARAM(vkms_config_test_default_config,
+ default_config_gen_params),
+ KUNIT_CASE(vkms_config_test_get_planes),
+ KUNIT_CASE(vkms_config_test_get_crtcs),
+ KUNIT_CASE(vkms_config_test_get_encoders),
+ KUNIT_CASE(vkms_config_test_get_connectors),
+ KUNIT_CASE(vkms_config_test_invalid_plane_number),
+ KUNIT_CASE(vkms_config_test_valid_plane_type),
+ KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_crtc_number),
+ KUNIT_CASE(vkms_config_test_invalid_encoder_number),
+ KUNIT_CASE(vkms_config_test_valid_encoder_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_connector_number),
+ KUNIT_CASE(vkms_config_test_valid_connector_possible_encoders),
+ KUNIT_CASE(vkms_config_test_attach_different_configs),
+ KUNIT_CASE(vkms_config_test_plane_attach_crtc),
+ KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_connector_get_possible_encoders),
+ {}
+};
+
+static struct kunit_suite vkms_config_test_suite = {
+ .name = "vkms-config",
+ .test_cases = vkms_config_test_cases,
+};
+
+kunit_test_suite(vkms_config_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit test for vkms config utility");
diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c
new file mode 100644
index 000000000000..a1df5659b0fb
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/slab.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
+#include <kunit/visibility.h>
+
+#include "vkms_config.h"
+
+struct vkms_config *vkms_config_create(const char *dev_name)
+{
+ struct vkms_config *config;
+
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return ERR_PTR(-ENOMEM);
+
+ config->dev_name = kstrdup_const(dev_name, GFP_KERNEL);
+ if (!config->dev_name) {
+ kfree(config);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&config->planes);
+ INIT_LIST_HEAD(&config->crtcs);
+ INIT_LIST_HEAD(&config->encoders);
+ INIT_LIST_HEAD(&config->connectors);
+
+ return config;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create);
+
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_create(DEFAULT_DEVICE_NAME);
+ if (IS_ERR(config))
+ return config;
+
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+ if (IS_ERR(crtc_cfg))
+ goto err_alloc;
+ vkms_config_crtc_set_writeback(crtc_cfg, enable_writeback);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+
+ if (enable_overlay) {
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg,
+ DRM_PLANE_TYPE_OVERLAY);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+ }
+
+ if (enable_cursor) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+
+ encoder_cfg = vkms_config_create_encoder(config);
+ if (IS_ERR(encoder_cfg))
+ goto err_alloc;
+
+ if (vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg))
+ goto err_alloc;
+
+ connector_cfg = vkms_config_create_connector(config);
+ if (IS_ERR(connector_cfg))
+ goto err_alloc;
+
+ if (vkms_config_connector_attach_encoder(connector_cfg, encoder_cfg))
+ goto err_alloc;
+
+ return config;
+
+err_alloc:
+ vkms_config_destroy(config);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create);
+
+void vkms_config_destroy(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg, *plane_tmp;
+ struct vkms_config_crtc *crtc_cfg, *crtc_tmp;
+ struct vkms_config_encoder *encoder_cfg, *encoder_tmp;
+ struct vkms_config_connector *connector_cfg, *connector_tmp;
+
+ list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link)
+ vkms_config_destroy_plane(plane_cfg);
+
+ list_for_each_entry_safe(crtc_cfg, crtc_tmp, &config->crtcs, link)
+ vkms_config_destroy_crtc(config, crtc_cfg);
+
+ list_for_each_entry_safe(encoder_cfg, encoder_tmp, &config->encoders, link)
+ vkms_config_destroy_encoder(config, encoder_cfg);
+
+ list_for_each_entry_safe(connector_cfg, connector_tmp, &config->connectors, link)
+ vkms_config_destroy_connector(connector_cfg);
+
+ kfree_const(config->dev_name);
+ kfree(config);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy);
+
+static bool valid_plane_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_planes;
+
+ n_planes = list_count_nodes((struct list_head *)&config->planes);
+ if (n_planes <= 0 || n_planes >= 32) {
+ drm_info(dev, "The number of planes must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_planes_for_crtc(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+ bool has_primary_plane = false;
+ bool has_cursor_plane = false;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc != crtc_cfg)
+ continue;
+
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ if (has_primary_plane) {
+ drm_info(dev, "Multiple primary planes\n");
+ return false;
+ }
+
+ has_primary_plane = true;
+ } else if (type == DRM_PLANE_TYPE_CURSOR) {
+ if (has_cursor_plane) {
+ drm_info(dev, "Multiple cursor planes\n");
+ return false;
+ }
+
+ has_cursor_plane = true;
+ }
+ }
+ }
+
+ if (!has_primary_plane) {
+ drm_info(dev, "Primary plane not found\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_plane_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ if (xa_empty(&plane_cfg->possible_crtcs)) {
+ drm_info(dev, "All planes must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_crtc_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_crtcs;
+
+ n_crtcs = list_count_nodes((struct list_head *)&config->crtcs);
+ if (n_crtcs <= 0 || n_crtcs >= 32) {
+ drm_info(dev, "The number of CRTCs must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_encoders;
+
+ n_encoders = list_count_nodes((struct list_head *)&config->encoders);
+ if (n_encoders <= 0 || n_encoders >= 32) {
+ drm_info(dev, "The number of encoders must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ if (xa_empty(&encoder_cfg->possible_crtcs)) {
+ drm_info(dev, "All encoders must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ bool crtc_has_encoder = false;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg,
+ idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ crtc_has_encoder = true;
+ }
+ }
+
+ if (!crtc_has_encoder) {
+ drm_info(dev, "All CRTCs must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_connector_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_connectors;
+
+ n_connectors = list_count_nodes((struct list_head *)&config->connectors);
+ if (n_connectors <= 0 || n_connectors >= 32) {
+ drm_info(dev, "The number of connectors must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_connector_possible_encoders(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg) {
+ if (xa_empty(&connector_cfg->possible_encoders)) {
+ drm_info(dev,
+ "All connectors must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool vkms_config_is_valid(const struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ if (!valid_plane_number(config))
+ return false;
+
+ if (!valid_crtc_number(config))
+ return false;
+
+ if (!valid_encoder_number(config))
+ return false;
+
+ if (!valid_connector_number(config))
+ return false;
+
+ if (!valid_plane_possible_crtcs(config))
+ return false;
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (!valid_planes_for_crtc(config, crtc_cfg))
+ return false;
+ }
+
+ if (!valid_encoder_possible_crtcs(config))
+ return false;
+
+ if (!valid_connector_possible_encoders(config))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid);
+
+static int vkms_config_show(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ const char *dev_name;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config);
+ seq_printf(m, "dev_name=%s\n", dev_name);
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ seq_puts(m, "plane:\n");
+ seq_printf(m, "\ttype=%d\n",
+ vkms_config_plane_get_type(plane_cfg));
+ }
+
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ seq_puts(m, "crtc:\n");
+ seq_printf(m, "\twriteback=%d\n",
+ vkms_config_crtc_get_writeback(crtc_cfg));
+ }
+
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg)
+ seq_puts(m, "encoder\n");
+
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg)
+ seq_puts(m, "connector\n");
+
+ return 0;
+}
+
+static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
+ { "vkms_config", vkms_config_show, 0 },
+};
+
+void vkms_config_register_debugfs(struct vkms_device *vkms_device)
+{
+ drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
+ ARRAY_SIZE(vkms_config_debugfs_list));
+}
+
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ plane_cfg = kzalloc(sizeof(*plane_cfg), GFP_KERNEL);
+ if (!plane_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ plane_cfg->config = config;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&plane_cfg->link, &config->planes);
+
+ return plane_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_plane);
+
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg)
+{
+ xa_destroy(&plane_cfg->possible_crtcs);
+ list_del(&plane_cfg->link);
+ kfree(plane_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_plane);
+
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (plane_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&plane_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_attach_crtc);
+
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&plane_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_detach_crtc);
+
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ crtc_cfg = kzalloc(sizeof(*crtc_cfg), GFP_KERNEL);
+ if (!crtc_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_cfg->config = config;
+ vkms_config_crtc_set_writeback(crtc_cfg, false);
+
+ list_add_tail(&crtc_cfg->link, &config->crtcs);
+
+ return crtc_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_crtc);
+
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg);
+
+ list_del(&crtc_cfg->link);
+ kfree(crtc_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_crtc);
+
+/**
+ * vkms_config_crtc_get_plane() - Return the first attached plane to a CRTC with
+ * the specific type
+ * @config: Configuration containing the CRTC and the plane
+ * @crtc_cfg: Only find planes attached to this CRTC
+ * @type: Plane type to search
+ *
+ * Returns:
+ * The first plane found attached to @crtc_cfg with the type @type.
+ */
+static struct vkms_config_plane *vkms_config_crtc_get_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg,
+ enum drm_plane_type type)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *possible_crtc;
+ enum drm_plane_type current_type;
+ unsigned long idx = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ current_type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg && current_type == type)
+ return plane_cfg;
+ }
+ }
+
+ return NULL;
+}
+
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_PRIMARY);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_primary_plane);
+
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_CURSOR);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_cursor_plane);
+
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ encoder_cfg = kzalloc(sizeof(*encoder_cfg), GFP_KERNEL);
+ if (!encoder_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ encoder_cfg->config = config;
+ xa_init_flags(&encoder_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&encoder_cfg->link, &config->encoders);
+
+ return encoder_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_encoder);
+
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+
+ xa_destroy(&encoder_cfg->possible_crtcs);
+ list_del(&encoder_cfg->link);
+ kfree(encoder_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_encoder);
+
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (encoder_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&encoder_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_attach_crtc);
+
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&encoder_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_detach_crtc);
+
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ connector_cfg = kzalloc(sizeof(*connector_cfg), GFP_KERNEL);
+ if (!connector_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ connector_cfg->config = config;
+ xa_init_flags(&connector_cfg->possible_encoders, XA_FLAGS_ALLOC);
+
+ list_add_tail(&connector_cfg->link, &config->connectors);
+
+ return connector_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_connector);
+
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg)
+{
+ xa_destroy(&connector_cfg->possible_encoders);
+ list_del(&connector_cfg->link);
+ kfree(connector_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_connector);
+
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ u32 encoder_idx = 0;
+
+ if (connector_cfg->config != encoder_cfg->config)
+ return -EINVAL;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&connector_cfg->possible_encoders, &encoder_idx,
+ encoder_cfg, xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_attach_encoder);
+
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ xa_erase(&connector_cfg->possible_encoders, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_detach_encoder);
diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h
new file mode 100644
index 000000000000..0118e3f99706
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONFIG_H_
+#define _VKMS_CONFIG_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_config - General configuration for VKMS driver
+ *
+ * @dev_name: Name of the device
+ * @planes: List of planes configured for the device
+ * @crtcs: List of CRTCs configured for the device
+ * @encoders: List of encoders configured for the device
+ * @connectors: List of connectors configured for the device
+ * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
+ */
+struct vkms_config {
+ const char *dev_name;
+ struct list_head planes;
+ struct list_head crtcs;
+ struct list_head encoders;
+ struct list_head connectors;
+ struct vkms_device *dev;
+};
+
+/**
+ * struct vkms_config_plane
+ *
+ * @link: Link to the others planes in vkms_config
+ * @config: The vkms_config this plane belongs to
+ * @type: Type of the plane. The creator of configuration needs to ensures that
+ * at least one primary plane is present.
+ * @possible_crtcs: Array of CRTCs that can be used with this plane
+ * @plane: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS plane during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_plane {
+ struct list_head link;
+ struct vkms_config *config;
+
+ enum drm_plane_type type;
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct vkms_plane *plane;
+};
+
+/**
+ * struct vkms_config_crtc
+ *
+ * @link: Link to the others CRTCs in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @writeback: If true, a writeback buffer can be attached to the CRTC
+ * @crtc: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS CRTC during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_crtc {
+ struct list_head link;
+ struct vkms_config *config;
+
+ bool writeback;
+
+ /* Internal usage */
+ struct vkms_output *crtc;
+};
+
+/**
+ * struct vkms_config_encoder
+ *
+ * @link: Link to the others encoders in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @possible_crtcs: Array of CRTCs that can be used with this encoder
+ * @encoder: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS encoder
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_encoder {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct drm_encoder *encoder;
+};
+
+/**
+ * struct vkms_config_connector
+ *
+ * @link: Link to the others connector in vkms_config
+ * @config: The vkms_config this connector belongs to
+ * @possible_encoders: Array of encoders that can be used with this connector
+ * @connector: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS connector
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_connector {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_encoders;
+
+ /* Internal usage */
+ struct vkms_connector *connector;
+};
+
+/**
+ * vkms_config_for_each_plane - Iterate over the vkms_config planes
+ * @config: &struct vkms_config pointer
+ * @plane_cfg: &struct vkms_config_plane pointer used as cursor
+ */
+#define vkms_config_for_each_plane(config, plane_cfg) \
+ list_for_each_entry((plane_cfg), &(config)->planes, link)
+
+/**
+ * vkms_config_for_each_crtc - Iterate over the vkms_config CRTCs
+ * @config: &struct vkms_config pointer
+ * @crtc_cfg: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_for_each_crtc(config, crtc_cfg) \
+ list_for_each_entry((crtc_cfg), &(config)->crtcs, link)
+
+/**
+ * vkms_config_for_each_encoder - Iterate over the vkms_config encoders
+ * @config: &struct vkms_config pointer
+ * @encoder_cfg: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_for_each_encoder(config, encoder_cfg) \
+ list_for_each_entry((encoder_cfg), &(config)->encoders, link)
+
+/**
+ * vkms_config_for_each_connector - Iterate over the vkms_config connectors
+ * @config: &struct vkms_config pointer
+ * @connector_cfg: &struct vkms_config_connector pointer used as cursor
+ */
+#define vkms_config_for_each_connector(config, connector_cfg) \
+ list_for_each_entry((connector_cfg), &(config)->connectors, link)
+
+/**
+ * vkms_config_plane_for_each_possible_crtc - Iterate over the vkms_config_plane
+ * possible CRTCs
+ * @plane_cfg: &struct vkms_config_plane pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) \
+ xa_for_each(&(plane_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_encoder_for_each_possible_crtc - Iterate over the
+ * vkms_config_encoder possible CRTCs
+ * @encoder_cfg: &struct vkms_config_encoder pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) \
+ xa_for_each(&(encoder_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_connector_for_each_possible_encoder - Iterate over the
+ * vkms_config_connector possible encoders
+ * @connector_cfg: &struct vkms_config_connector pointer
+ * @idx: Index of the cursor
+ * @possible_encoder: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, possible_encoder) \
+ xa_for_each(&(connector_cfg)->possible_encoders, idx, (possible_encoder))
+
+/**
+ * vkms_config_create() - Create a new VKMS configuration
+ * @dev_name: Name of the device
+ *
+ * Returns:
+ * The new vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_create(const char *dev_name);
+
+/**
+ * vkms_config_default_create() - Create the configuration for the default device
+ * @enable_cursor: Create or not a cursor plane
+ * @enable_writeback: Create or not a writeback connector
+ * @enable_overlay: Create or not overlay planes
+ *
+ * Returns:
+ * The default vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay);
+
+/**
+ * vkms_config_destroy() - Free a VKMS configuration
+ * @config: vkms_config to free
+ */
+void vkms_config_destroy(struct vkms_config *config);
+
+/**
+ * vkms_config_get_device_name() - Return the name of the device
+ * @config: Configuration to get the device name from
+ *
+ * Returns:
+ * The device name. Only valid while @config is valid.
+ */
+static inline const char *
+vkms_config_get_device_name(struct vkms_config *config)
+{
+ return config->dev_name;
+}
+
+/**
+ * vkms_config_get_num_crtcs() - Return the number of CRTCs in the configuration
+ * @config: Configuration to get the number of CRTCs from
+ */
+static inline size_t vkms_config_get_num_crtcs(struct vkms_config *config)
+{
+ return list_count_nodes(&config->crtcs);
+}
+
+/**
+ * vkms_config_is_valid() - Validate a configuration
+ * @config: Configuration to validate
+ *
+ * Returns:
+ * Whether the configuration is valid or not.
+ * For example, a configuration without primary planes is not valid.
+ */
+bool vkms_config_is_valid(const struct vkms_config *config);
+
+/**
+ * vkms_config_register_debugfs() - Register a debugfs file to show the device's
+ * configuration
+ * @vkms_device: Device to register
+ */
+void vkms_config_register_debugfs(struct vkms_device *vkms_device);
+
+/**
+ * vkms_config_create_plane() - Add a new plane configuration
+ * @config: Configuration to add the plane to
+ *
+ * Returns:
+ * The new plane configuration or an error. Call vkms_config_destroy_plane() to
+ * free the returned plane configuration.
+ */
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_plane() - Remove and free a plane configuration
+ * @plane_cfg: Plane configuration to destroy
+ */
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg);
+
+/**
+ * vkms_config_plane_type() - Return the plane type
+ * @plane_cfg: Plane to get the type from
+ */
+static inline enum drm_plane_type
+vkms_config_plane_get_type(struct vkms_config_plane *plane_cfg)
+{
+ return plane_cfg->type;
+}
+
+/**
+ * vkms_config_plane_set_type() - Set the plane type
+ * @plane_cfg: Plane to set the type to
+ * @type: New plane type
+ */
+static inline void
+vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg,
+ enum drm_plane_type type)
+{
+ plane_cfg->type = type;
+}
+
+/**
+ * vkms_config_plane_attach_crtc - Attach a plane to a CRTC
+ * @plane_cfg: Plane to attach
+ * @crtc_cfg: CRTC to attach @plane_cfg to
+ */
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_plane_detach_crtc - Detach a plane from a CRTC
+ * @plane_cfg: Plane to detach
+ * @crtc_cfg: CRTC to detach @plane_cfg from
+ */
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_crtc() - Add a new CRTC configuration
+ * @config: Configuration to add the CRTC to
+ *
+ * Returns:
+ * The new CRTC configuration or an error. Call vkms_config_destroy_crtc() to
+ * free the returned CRTC configuration.
+ */
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_crtc() - Remove and free a CRTC configuration
+ * @config: Configuration to remove the CRTC from
+ * @crtc_cfg: CRTC configuration to destroy
+ */
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_get_writeback() - If a writeback connector will be created
+ * @crtc_cfg: CRTC with or without a writeback connector
+ */
+static inline bool
+vkms_config_crtc_get_writeback(struct vkms_config_crtc *crtc_cfg)
+{
+ return crtc_cfg->writeback;
+}
+
+/**
+ * vkms_config_crtc_set_writeback() - If a writeback connector will be created
+ * @crtc_cfg: Target CRTC
+ * @writeback: Enable or disable the writeback connector
+ */
+static inline void
+vkms_config_crtc_set_writeback(struct vkms_config_crtc *crtc_cfg,
+ bool writeback)
+{
+ crtc_cfg->writeback = writeback;
+}
+
+/**
+ * vkms_config_crtc_primary_plane() - Return the primary plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple primary planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The primary plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_cursor_plane() - Return the cursor plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple cursor planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The cursor plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_encoder() - Add a new encoder configuration
+ * @config: Configuration to add the encoder to
+ *
+ * Returns:
+ * The new encoder configuration or an error. Call vkms_config_destroy_encoder()
+ * to free the returned encoder configuration.
+ */
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_encoder() - Remove and free a encoder configuration
+ * @config: Configuration to remove the encoder from
+ * @encoder_cfg: Encoder configuration to destroy
+ */
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_encoder_attach_crtc - Attach a encoder to a CRTC
+ * @encoder_cfg: Encoder to attach
+ * @crtc_cfg: CRTC to attach @encoder_cfg to
+ */
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_encoder_detach_crtc - Detach a encoder from a CRTC
+ * @encoder_cfg: Encoder to detach
+ * @crtc_cfg: CRTC to detach @encoder_cfg from
+ */
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_connector() - Add a new connector configuration
+ * @config: Configuration to add the connector to
+ *
+ * Returns:
+ * The new connector configuration or an error. Call
+ * vkms_config_destroy_connector() to free the returned connector configuration.
+ */
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_connector() - Remove and free a connector configuration
+ * @connector_cfg: Connector configuration to destroy
+ */
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg);
+
+/**
+ * vkms_config_connector_attach_encoder - Attach a connector to an encoder
+ * @connector_cfg: Connector to attach
+ * @encoder_cfg: Encoder to attach @connector_cfg to
+ */
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_connector_detach_encoder - Detach a connector from an encoder
+ * @connector_cfg: Connector to detach
+ * @encoder_cfg: Encoder to detach @connector_cfg from
+ */
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+#endif /* _VKMS_CONFIG_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c
new file mode 100644
index 000000000000..48b10cba322a
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+
+#include "vkms_connector.h"
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ /* Use the default modes list from DRM */
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return count;
+}
+
+static struct drm_encoder *vkms_conn_best_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+ .best_encoder = vkms_conn_best_encoder,
+};
+
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ struct vkms_connector *connector;
+ int ret;
+
+ connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drmm_connector_init(dev, &connector->base, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(&connector->base, &vkms_conn_helper_funcs);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h
new file mode 100644
index 000000000000..c9149c1b7af0
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONNECTOR_H_
+#define _VKMS_CONNECTOR_H_
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_connector - VKMS custom type wrapping around the DRM connector
+ *
+ * @drm: Base DRM connector
+ */
+struct vkms_connector {
+ struct drm_connector base;
+};
+
+/**
+ * vkms_connector_init() - Initialize a connector
+ * @vkmsdev: VKMS device containing the connector
+ *
+ * Returns:
+ * The connector or an error on failure.
+ */
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev);
+
+#endif /* _VKMS_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 12034ec12029..8c9898b9055d 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -194,7 +194,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
i++;
}
- vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
+ vkms_state->active_planes = kcalloc(i, sizeof(*vkms_state->active_planes), GFP_KERNEL);
if (!vkms_state->active_planes)
return -ENOMEM;
vkms_state->num_active_planes = i;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index b6de91134a22..a24d1655f7b8 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -27,11 +27,9 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
+#include "vkms_config.h"
#include "vkms_drv.h"
-#include <drm/drm_print.h>
-#include <drm/drm_debugfs.h>
-
#define DRIVER_NAME "vkms"
#define DRIVER_DESC "Virtual Kernel Mode Setting"
#define DRIVER_MAJOR 1
@@ -81,23 +79,6 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
}
-static int vkms_config_show(struct seq_file *m, void *data)
-{
- struct drm_debugfs_entry *entry = m->private;
- struct drm_device *dev = entry->dev;
- struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
-
- seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback);
- seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor);
- seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay);
-
- return 0;
-}
-
-static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
- { "vkms_config", vkms_config_show, 0 },
-};
-
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.fops = &vkms_driver_fops,
@@ -170,8 +151,10 @@ static int vkms_create(struct vkms_config *config)
int ret;
struct platform_device *pdev;
struct vkms_device *vkms_device;
+ const char *dev_name;
- pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ dev_name = vkms_config_get_device_name(config);
+ pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -198,7 +181,8 @@ static int vkms_create(struct vkms_config *config)
goto out_devres;
}
- ret = drm_vblank_init(&vkms_device->drm, 1);
+ ret = drm_vblank_init(&vkms_device->drm,
+ vkms_config_get_num_crtcs(config));
if (ret) {
DRM_ERROR("Failed to vblank\n");
goto out_devres;
@@ -208,8 +192,7 @@ static int vkms_create(struct vkms_config *config)
if (ret)
goto out_devres;
- drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
- ARRAY_SIZE(vkms_config_debugfs_list));
+ vkms_config_register_debugfs(vkms_device);
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
@@ -231,17 +214,13 @@ static int __init vkms_init(void)
int ret;
struct vkms_config *config;
- config = kmalloc(sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- config->cursor = enable_cursor;
- config->writeback = enable_writeback;
- config->overlay = enable_overlay;
+ config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay);
+ if (IS_ERR(config))
+ return PTR_ERR(config);
ret = vkms_create(config);
if (ret) {
- kfree(config);
+ vkms_config_destroy(config);
return ret;
}
@@ -275,7 +254,7 @@ static void __exit vkms_exit(void)
return;
vkms_destroy(default_config);
- kfree(default_config);
+ vkms_config_destroy(default_config);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index abbb652be2b5..a74a7fc3a056 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -12,6 +12,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_writeback.h>
+#define DEFAULT_DEVICE_NAME "vkms"
+
#define XRES_MIN 10
#define YRES_MIN 10
@@ -189,20 +191,7 @@ struct vkms_output {
spinlock_t composer_lock;
};
-/**
- * struct vkms_config - General configuration for VKMS driver
- *
- * @writeback: If true, a writeback buffer can be attached to the CRTC
- * @cursor: If true, a cursor plane is created in the VKMS device
- * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device
- * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
- */
-struct vkms_config {
- bool writeback;
- bool cursor;
- bool overlay;
- struct vkms_device *dev;
-};
+struct vkms_config;
/**
* struct vkms_device - Description of a VKMS device
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 22f0d678af3a..8d7ca0cdd79f 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,121 +1,111 @@
// SPDX-License-Identifier: GPL-2.0+
+#include "vkms_config.h"
+#include "vkms_connector.h"
#include "vkms_drv.h"
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
#include <drm/drm_managed.h>
-#include <drm/drm_probe_helper.h>
-
-static const struct drm_connector_funcs vkms_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int vkms_conn_get_modes(struct drm_connector *connector)
-{
- int count;
-
- /* Use the default modes list from DRM */
- count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
- drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
-
- return count;
-}
-
-static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
- .get_modes = vkms_conn_get_modes,
-};
int vkms_output_init(struct vkms_device *vkmsdev)
{
struct drm_device *dev = &vkmsdev->drm;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct vkms_output *output;
- struct vkms_plane *primary, *overlay, *cursor = NULL;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
int ret;
int writeback;
- unsigned int n;
-
- /*
- * Initialize used plane. One primary plane is required to perform the composition.
- *
- * The overlay and cursor planes are not mandatory, but can be used to perform complex
- * composition.
- */
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(primary))
- return PTR_ERR(primary);
-
- if (vkmsdev->config->cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
- if (IS_ERR(cursor))
- return PTR_ERR(cursor);
- }
- output = vkms_crtc_init(dev, &primary->base,
- cursor ? &cursor->base : NULL);
- if (IS_ERR(output)) {
- DRM_ERROR("Failed to allocate CRTC\n");
- return PTR_ERR(output);
- }
+ if (!vkms_config_is_valid(vkmsdev->config))
+ return -EINVAL;
- if (vkmsdev->config->overlay) {
- for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
- overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(overlay)) {
- DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
- return PTR_ERR(overlay);
- }
- overlay->base.possible_crtcs = drm_crtc_mask(&output->crtc);
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ plane_cfg->plane = vkms_plane_init(vkmsdev, type);
+ if (IS_ERR(plane_cfg->plane)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
+ return PTR_ERR(plane_cfg->plane);
}
}
- connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
- if (!connector) {
- DRM_ERROR("Failed to allocate connector\n");
- return -ENOMEM;
- }
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ struct vkms_config_plane *primary, *cursor;
- ret = drmm_connector_init(dev, connector, &vkms_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init connector\n");
- return ret;
- }
+ primary = vkms_config_crtc_primary_plane(vkmsdev->config, crtc_cfg);
+ cursor = vkms_config_crtc_cursor_plane(vkmsdev->config, crtc_cfg);
- drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+ crtc_cfg->crtc = vkms_crtc_init(dev, &primary->plane->base,
+ cursor ? &cursor->plane->base : NULL);
+ if (IS_ERR(crtc_cfg->crtc)) {
+ DRM_ERROR("Failed to allocate CRTC\n");
+ return PTR_ERR(crtc_cfg->crtc);
+ }
- encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder) {
- DRM_ERROR("Failed to allocate encoder\n");
- return -ENOMEM;
+ /* Initialize the writeback component */
+ if (vkms_config_crtc_get_writeback(crtc_cfg)) {
+ writeback = vkms_enable_writeback_connector(vkmsdev, crtc_cfg->crtc);
+ if (writeback)
+ DRM_ERROR("Failed to init writeback connector\n");
+ }
}
- ret = drmm_encoder_init(dev, encoder, NULL,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init encoder\n");
- return ret;
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ plane_cfg->plane->base.possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- encoder->possible_crtcs = drm_crtc_mask(&output->crtc);
- /* Attach the encoder and the connector */
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_ERROR("Failed to attach connector to encoder\n");
- return ret;
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ encoder_cfg->encoder = drmm_kzalloc(dev, sizeof(*encoder_cfg->encoder), GFP_KERNEL);
+ if (!encoder_cfg->encoder) {
+ DRM_ERROR("Failed to allocate encoder\n");
+ return -ENOMEM;
+ }
+ ret = drmm_encoder_init(dev, encoder_cfg->encoder, NULL,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ return ret;
+ }
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ encoder_cfg->encoder->possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- /* Initialize the writeback component */
- if (vkmsdev->config->writeback) {
- writeback = vkms_enable_writeback_connector(vkmsdev, output);
- if (writeback)
- DRM_ERROR("Failed to init writeback connector\n");
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg) {
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ connector_cfg->connector = vkms_connector_init(vkmsdev);
+ if (IS_ERR(connector_cfg->connector)) {
+ DRM_ERROR("Failed to init connector\n");
+ return PTR_ERR(connector_cfg->connector);
+ }
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg,
+ idx,
+ possible_encoder) {
+ ret = drm_connector_attach_encoder(&connector_cfg->connector->base,
+ possible_encoder->encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ return ret;
+ }
+ }
}
drm_mode_config_reset(dev);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 46a4ab688a7f..b168fd7fe9b3 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
- vmwgfx_gem.o vmwgfx_vkms.o
+ vmwgfx_gem.o vmwgfx_vkms.o vmwgfx_cursor_plane.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 64bd7d74854e..fa5841fda659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -429,7 +429,7 @@ static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
void *ptr = NULL;
int ret;
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
if (ret) {
drm_dbg_driver(&vmw->drm,
@@ -447,7 +447,7 @@ out:
static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
{
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_vunmap(bo->tbo.base.dma_buf, map);
else
vmw_bo_unmap(bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 9b5b8c1f063b..f031a312c783 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -36,8 +36,7 @@ static void vmw_bo_release(struct vmw_bo *vbo)
{
struct vmw_resource *res;
- WARN_ON(vbo->tbo.base.funcs &&
- kref_read(&vbo->tbo.base.refcount) != 0);
+ WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
xa_destroy(&vbo->detached_resources);
@@ -51,11 +50,13 @@ static void vmw_bo_release(struct vmw_bo *vbo)
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void)vmw_resource_reserve(res, false, true);
vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
res->guest_memory_bo = NULL;
res->guest_memory_offset = 0;
- vmw_resource_unreserve(res, false, false, false, NULL,
+ vmw_resource_unreserve(res, true, false, false, NULL,
0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -73,9 +74,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
- WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
+ WARN_ON(vbo->dirty);
kfree(vbo);
}
@@ -467,6 +468,7 @@ int vmw_bo_create(struct vmw_private *vmw,
if (unlikely(ret != 0))
goto out_error;
+ (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs;
return ret;
out_error:
*p_bo = NULL;
@@ -848,9 +850,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
- xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
+ return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
}
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
@@ -887,3 +889,9 @@ out:
surf = vmw_res_to_srf(res);
return surf;
}
+
+s32 vmw_bo_mobid(struct vmw_bo *vbo)
+{
+ WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB);
+ return (s32)vbo->tbo.resource->start;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 11e330c7c7f5..cf84a163bfcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -141,7 +141,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
@@ -204,12 +204,12 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf)
*buf = NULL;
if (tmp_buf)
- ttm_bo_put(&tmp_buf->tbo);
+ drm_gem_object_put(&tmp_buf->tbo.base);
}
static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
- ttm_bo_get(&buf->tbo);
+ drm_gem_object_get(&buf->tbo.base);
return buf;
}
@@ -233,4 +233,6 @@ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
return container_of((gobj), struct vmw_bo, tbo.base);
}
+s32 vmw_bo_mobid(struct vmw_bo *vbo);
+
#endif // VMWGFX_BO_H
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index a7c07692262b..98331c4c0335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
new file mode 100644
index 000000000000..718832b08d96
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+#include "vmwgfx_cursor_plane.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmw_surface_cache.h"
+
+#include "drm/drm_atomic.h"
+#include "drm/drm_atomic_helper.h"
+#include "drm/drm_plane.h"
+#include <asm/page.h>
+
+#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
+#define VMW_CURSOR_SNOOP_WIDTH 64
+#define VMW_CURSOR_SNOOP_HEIGHT 64
+
+struct vmw_svga_fifo_cmd_define_cursor {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+};
+
+/**
+ * vmw_send_define_cursor_cmd - queue a define cursor command
+ * @dev_priv: the private driver struct
+ * @image: buffer which holds the cursor image
+ * @width: width of the mouse cursor image
+ * @height: height of the mouse cursor image
+ * @hotspotX: the horizontal position of mouse hotspot
+ * @hotspotY: the vertical position of mouse hotspot
+ */
+static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct vmw_svga_fifo_cmd_define_cursor *cmd;
+ const u32 image_size = width * height * sizeof(*image);
+ const u32 cmd_size = sizeof(*cmd) + image_size;
+
+ /*
+ * Try to reserve fifocmd space and swallow any failures;
+ * such reservations cannot be left unconsumed for long
+ * under the risk of clogging other fifocmd users, so
+ * we treat reservations separtely from the way we treat
+ * other fallible KMS-atomic resources at prepare_fb
+ */
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
+
+ if (unlikely(!cmd))
+ return;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
+
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
+}
+
+static void
+vmw_cursor_plane_update_legacy(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+ s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+
+ if (WARN_ON(!surface || !surface->snooper.image))
+ return;
+
+ if (vps->cursor.legacy.id != surface->snooper.id) {
+ vmw_send_define_cursor_cmd(vmw, surface->snooper.image,
+ vps->base.crtc_w, vps->base.crtc_h,
+ hotspot_x, hotspot_y);
+ vps->cursor.legacy.id = surface->snooper.id;
+ }
+}
+
+static enum vmw_cursor_update_type
+vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+
+ if (surface && surface->snooper.image)
+ return VMW_CURSOR_UPDATE_LEGACY;
+
+ if (vmw->has_mob) {
+ if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
+ return VMW_CURSOR_UPDATE_MOB;
+ }
+
+ return VMW_CURSOR_UPDATE_NONE;
+}
+
+static void vmw_cursor_update_mob(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ SVGAGBCursorHeader *header;
+ SVGAGBAlphaCursorHeader *alpha_header;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
+ u32 *image = vmw_bo_map_and_cache(bo);
+ const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image);
+
+ header = vmw_bo_map_and_cache(vps->cursor.mob);
+ alpha_header = &header->header.alphaHeader;
+
+ memset(header, 0, sizeof(*header));
+
+ header->type = SVGA_ALPHA_CURSOR;
+ header->sizeInBytes = image_size;
+
+ alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+ alpha_header->width = vps->base.crtc_w;
+ alpha_header->height = vps->base.crtc_h;
+
+ memcpy(header + 1, image, image_size);
+ vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob));
+
+ vmw_bo_unmap(bo);
+ vmw_bo_unmap(vps->cursor.mob);
+}
+
+static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
+ u32 w, u32 h)
+{
+ switch (update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_NONE:
+ return 0;
+ case VMW_CURSOR_UPDATE_MOB:
+ return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
+ }
+ return 0;
+}
+
+static void vmw_cursor_mob_destroy(struct vmw_bo **vbo)
+{
+ if (!(*vbo))
+ return;
+
+ ttm_bo_unpin(&(*vbo)->tbo);
+ vmw_bo_unreference(vbo);
+}
+
+/**
+ * vmw_cursor_mob_unmap - Unmaps the cursor mobs.
+ *
+ * @vps: state of the cursor plane
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_unmap(struct vmw_plane_state *vps)
+{
+ int ret = 0;
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo || !vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
+ if (likely(ret == 0)) {
+ vmw_bo_unmap(vbo);
+ ttm_bo_unreserve(&vbo->tbo);
+ }
+
+ return ret;
+}
+
+static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ u32 i;
+
+ if (!vps->cursor.mob)
+ return;
+
+ vmw_cursor_mob_unmap(vps);
+
+ /* Look for a free slot to return this mob to the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (!vcp->cursor_mobs[i]) {
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Cache is full: See if this mob is bigger than an existing mob. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i]->tbo.base.size <
+ vps->cursor.mob->tbo.base.size) {
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Destroy it if it's not worth caching. */
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+}
+
+static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ u32 i;
+ u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (!dev_priv->has_mob ||
+ (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
+ return -EINVAL;
+
+ mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+ cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
+
+ if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
+ vps->base.crtc_h > cursor_max_dim)
+ return -EINVAL;
+
+ if (vps->cursor.mob) {
+ if (vps->cursor.mob->tbo.base.size >= size)
+ return 0;
+ vmw_cursor_mob_put(vcp, vps);
+ }
+
+ /* Look for an unused mob in the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i] &&
+ vcp->cursor_mobs[i]->tbo.base.size >= size) {
+ vps->cursor.mob = vcp->cursor_mobs[i];
+ vcp->cursor_mobs[i] = NULL;
+ return 0;
+ }
+ }
+ /* Create a new mob if we can't find an existing one. */
+ ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB,
+ &vps->cursor.mob);
+
+ if (ret != 0)
+ return ret;
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL);
+ if (ret != 0)
+ goto teardown;
+
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+
+ return 0;
+
+teardown:
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+ return ret;
+}
+
+static void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
+ : SVGA_CURSOR_ON_HIDE;
+ u32 count;
+
+ spin_lock(&dev_priv->cursor_lock);
+ if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
+ } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ } else {
+ vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
+ }
+ spin_unlock(&dev_priv->cursor_lock);
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ u32 box_count;
+ void *virtual;
+ bool is_iomem;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int i, ret;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed, nothing to copy */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->d != 1 || box_count != 1 ||
+ box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+ DRM_ERROR("Can't snoop dma request for cursor!\n");
+ DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+ box->srcx, box->srcy, box->srcz,
+ box->x, box->y, box->z,
+ box->w, box->h, box->d, box_count,
+ cmd->dma.guest.ptr.offset);
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+
+ if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
+ memcpy(srf->snooper.image, virtual,
+ VMW_CURSOR_SNOOP_HEIGHT * image_pitch);
+ } else {
+ /* Image is unsigned pointer. */
+ for (i = 0; i < box->h; i++)
+ memcpy(srf->snooper.image + i * image_pitch,
+ virtual + i * cmd->dma.guest.pitch,
+ box->w * desc->pitchBytesPerBlock);
+ }
+ srf->snooper.id++;
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ u32 i;
+
+ vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+
+ drm_plane_cleanup(plane);
+}
+
+/**
+ * vmw_cursor_mob_map - Maps the cursor mobs.
+ *
+ * @vps: plane_state
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_map(struct vmw_plane_state *vps)
+{
+ int ret;
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo)
+ return -EINVAL;
+
+ if (vbo->tbo.base.size < size)
+ return -EINVAL;
+
+ if (vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL);
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+
+ vmw_bo_map_and_cache(vbo);
+
+ ttm_bo_unreserve(&vbo->tbo);
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_cleanup_fb - Unpins the plane surface
+ *
+ * @plane: cursor plane
+ * @old_state: contains the state to clean up
+ *
+ * Unmaps all cursor bo mappings and unpins the cursor surface
+ *
+ * Returns 0 on success
+ */
+void
+vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+ if (!vmw_user_object_is_null(&vps->uo))
+ vmw_user_object_unmap(&vps->uo);
+
+ vmw_cursor_mob_unmap(vps);
+ vmw_cursor_mob_put(vcp, vps);
+
+ vmw_du_plane_unpin_surf(vps);
+ vmw_user_object_unref(&vps->uo);
+}
+
+static bool
+vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo);
+ struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo);
+ struct vmw_surface *surf;
+ bool dirty = false;
+ int ret;
+
+ if (new_bo != old_bo)
+ return true;
+
+ if (new_bo) {
+ if (!old_bo) {
+ return true;
+ } else if (new_bo->dirty) {
+ vmw_bo_dirty_scan(new_bo);
+ dirty = vmw_bo_is_dirty(new_bo);
+ if (dirty) {
+ surf = vmw_user_object_surface(&new_vps->uo);
+ if (surf)
+ vmw_bo_dirty_transfer_to_res(&surf->res);
+ else
+ vmw_bo_dirty_clear(new_bo);
+ }
+ return dirty;
+ } else if (new_bo != old_bo) {
+ /*
+ * Currently unused because the top exits right away.
+ * In most cases buffer being different will mean
+ * that the contents is different. For the few percent
+ * of cases where that's not true the cost of doing
+ * the memcmp on all other seems to outweight the
+ * benefits. Leave the conditional to be able to
+ * trivially validate it by removing the initial
+ * if (new_bo != old_bo) at the start.
+ */
+ void *old_image;
+ void *new_image;
+ bool changed = false;
+ struct ww_acquire_ctx ctx;
+ const u32 size = new_vps->base.crtc_w *
+ new_vps->base.crtc_h * sizeof(u32);
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ old_image = vmw_bo_map_and_cache(old_bo);
+ new_image = vmw_bo_map_and_cache(new_bo);
+
+ if (old_image && new_image && old_image != new_image)
+ changed = memcmp(old_image, new_image, size) !=
+ 0;
+
+ ttm_bo_unreserve(&new_bo->tbo);
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
+
+ return changed;
+ }
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+vmw_cursor_plane_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
+ old_vps->base.crtc_h != new_vps->base.crtc_h)
+ return true;
+
+ if (old_vps->base.hotspot_x != new_vps->base.hotspot_x ||
+ old_vps->base.hotspot_y != new_vps->base.hotspot_y)
+ return true;
+
+ if (old_vps->cursor.legacy.hotspot_x !=
+ new_vps->cursor.legacy.hotspot_x ||
+ old_vps->cursor.legacy.hotspot_y !=
+ new_vps->cursor.legacy.hotspot_y)
+ return true;
+
+ if (old_vps->base.fb != new_vps->base.fb)
+ return true;
+
+ return false;
+}
+
+/**
+ * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it
+ *
+ * @plane: display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ struct vmw_bo *bo = NULL;
+ struct vmw_surface *surface;
+ int ret = 0;
+
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_user_object_unmap(&vps->uo);
+ vmw_user_object_unref(&vps->uo);
+ }
+
+ if (fb) {
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vps->uo.surface = NULL;
+ } else {
+ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
+ }
+ vmw_user_object_ref(&vps->uo);
+ }
+
+ vps->cursor.update_type = vmw_cursor_update_type(vmw, vps);
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || vps->cursor.legacy.id == surface->snooper.id)
+ vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
+ break;
+ case VMW_CURSOR_UPDATE_MOB: {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
+ struct ttm_operation_ctx ctx = { false, false };
+
+ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
+ if (ret != 0)
+ return -ENOMEM;
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret != 0)
+ return -ENOMEM;
+
+ /*
+ * vmw_bo_pin_reserved also validates, so to skip
+ * the extra validation use ttm_bo_pin directly
+ */
+ if (!bo->tbo.pin_count)
+ ttm_bo_pin(&bo->tbo);
+
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ const u32 size = new_state->crtc_w *
+ new_state->crtc_h *
+ sizeof(u32);
+
+ (void)vmw_bo_map_and_cache_size(bo, size);
+ } else {
+ vmw_bo_map_and_cache(bo);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+ }
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ if (!vmw_cursor_plane_changed(vps, old_vps) &&
+ !vmw_cursor_buffer_changed(vps, old_vps)) {
+ vps->cursor.update_type =
+ VMW_CURSOR_UPDATE_NONE;
+ } else {
+ vmw_cursor_mob_get(vcp, vps);
+ vmw_cursor_mob_map(vps);
+ }
+ }
+ }
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_atomic_check - check if the new state is okay
+ *
+ * @plane: cursor plane
+ * @state: info on the new plane state
+ *
+ * This is a chance to fail if the new cursor state does not fit
+ * our requirements.
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ int ret = 0;
+ struct drm_crtc_state *crtc_state = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ enum vmw_cursor_update_type update_type;
+ struct drm_framebuffer *fb = new_state->fb;
+
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, true,
+ true);
+ if (ret)
+ return ret;
+
+ /* Turning off */
+ if (!fb)
+ return 0;
+
+ update_type = vmw_cursor_update_type(vmw, vps);
+ if (update_type == VMW_CURSOR_UPDATE_LEGACY) {
+ if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH ||
+ new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) {
+ drm_warn(&vmw->drm,
+ "Invalid cursor dimensions (%d, %d)\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return -EINVAL;
+ }
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || !surface->snooper.image) {
+ drm_warn(&vmw->drm,
+ "surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void
+vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
+ struct vmw_private *dev_priv = vmw_priv(plane->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ s32 hotspot_x, hotspot_y, cursor_x, cursor_y;
+
+ /*
+ * Hide the cursor if the new bo is null
+ */
+ if (vmw_user_object_is_null(&vps->uo)) {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return;
+ }
+
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ vmw_cursor_plane_update_legacy(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_MOB:
+ vmw_cursor_update_mob(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ /*
+ * For all update types update the cursor position
+ */
+ cursor_x = new_state->crtc_x + du->set_gui_x;
+ cursor_y = new_state->crtc_y + du->set_gui_y;
+
+ hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x;
+ hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y;
+
+ vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x,
+ cursor_y + hotspot_y);
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct vmw_plane_state *vps;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata)
+{
+ if (!file_priv->atomic && metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
+ metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
+ metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
+ VMW_CURSOR_SNOOP_HEIGHT *
+ desc->pitchBytesPerBlock;
+ void *image = kzalloc(cursor_size_bytes, GFP_KERNEL);
+
+ if (!image) {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return image;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
new file mode 100644
index 000000000000..40694925a70e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_CURSOR_PLANE_H
+#define VMWGFX_CURSOR_PLANE_H
+
+#include "device_include/svga3d_cmd.h"
+#include "drm/drm_file.h"
+#include "drm/drm_fourcc.h"
+#include "drm/drm_plane.h"
+
+#include <linux/types.h>
+
+struct SVGA3dCmdHeader;
+struct ttm_buffer_object;
+struct vmw_bo;
+struct vmw_cursor;
+struct vmw_private;
+struct vmw_surface;
+struct vmw_user_object;
+
+#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
+
+static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+enum vmw_cursor_update_type {
+ VMW_CURSOR_UPDATE_NONE = 0,
+ VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_MOB,
+};
+
+struct vmw_cursor_plane_state {
+ enum vmw_cursor_update_type update_type;
+ bool changed;
+ bool surface_changed;
+ struct vmw_bo *mob;
+ struct {
+ s32 hotspot_x;
+ s32 hotspot_y;
+ u32 id;
+ } legacy;
+};
+
+/**
+ * Derived class for cursor plane object
+ *
+ * @base DRM plane object
+ * @cursor.cursor_mobs Cursor mobs available for re-use
+ */
+struct vmw_cursor_plane {
+ struct drm_plane base;
+
+ struct vmw_bo *cursor_mobs[3];
+};
+
+struct vmw_surface_metadata;
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata);
+void vmw_cursor_cmd_dma_snoop(SVGA3dCmdHeader *header,
+ struct vmw_surface *srf,
+ struct ttm_buffer_object *bo);
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane);
+
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+#endif /* VMWGFX_CURSOR_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0f32471c8533..0695a342b1ef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,31 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-
#include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
@@ -1324,9 +1304,6 @@ static void vmw_master_set(struct drm_device *dev,
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
-
- vmw_kms_legacy_hotspot_clear(dev_priv);
}
bool vmwgfx_supported(struct vmw_private *vmw)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5275ef632d4b..594af8eb04c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,29 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef _VMWGFX_DRV_H_
@@ -58,7 +38,7 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 20
+#define VMWGFX_DRIVER_MINOR 21
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_NUM_DISPLAY_UNITS 8
@@ -100,10 +80,6 @@
#define VMW_RES_SHADER ttm_driver_type4
#define VMW_RES_HT_ORDER 12
-#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
-#define VMW_CURSOR_SNOOP_WIDTH 64
-#define VMW_CURSOR_SNOOP_HEIGHT 64
-
#define MKSSTAT_CAPACITY_LOG2 5U
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
@@ -201,7 +177,7 @@ enum vmw_cmdbuf_res_type {
struct vmw_cmdbuf_res_manager;
struct vmw_cursor_snooper {
- size_t age;
+ size_t id;
uint32_t *image;
};
@@ -846,9 +822,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
* GEM related functionality - vmwgfx_gem.c
*/
struct vmw_bo_params;
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo);
+extern const struct drm_gem_object_funcs vmw_gem_object_funcs;
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -1050,7 +1024,6 @@ int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
@@ -1067,7 +1040,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev);
@@ -1393,8 +1365,10 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
+bool vmw_bo_is_dirty(struct vmw_bo *vbo);
void vmw_bo_dirty_scan(struct vmw_bo *vbo);
int vmw_bo_dirty_add(struct vmw_bo *vbo);
+void vmw_bo_dirty_clear(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
void vmw_bo_dirty_release(struct vmw_bo *vbo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e52d73eba48..e831e324e737 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,29 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
+
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
@@ -4086,6 +4068,23 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
return 0;
}
+/*
+ * DMA fence callback to remove a seqno_waiter
+ */
+struct seqno_waiter_rm_context {
+ struct dma_fence_cb base;
+ struct vmw_private *dev_priv;
+};
+
+static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct seqno_waiter_rm_context *ctx =
+ container_of(cb, struct seqno_waiter_rm_context, base);
+
+ vmw_seqno_waiter_remove(ctx->dev_priv);
+ kfree(ctx);
+}
+
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands, void *kernel_commands,
@@ -4266,6 +4265,15 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
+ struct seqno_waiter_rm_context *ctx =
+ kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx->dev_priv = dev_priv;
+ vmw_seqno_waiter_add(dev_priv);
+ if (dma_fence_add_callback(&fence->base, &ctx->base,
+ seqno_waiter_rm_cb) < 0) {
+ vmw_seqno_waiter_remove(dev_priv);
+ kfree(ctx);
+ }
}
}
@@ -4512,8 +4520,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out;
- vmw_kms_cursor_post_execbuf(dev_priv);
-
out:
if (in_fence)
dma_fence_put(in_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index ed5015ced392..c55382167c1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -84,11 +84,11 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
int ret;
- if (obj->import_attach) {
- ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj)) {
+ ret = dma_buf_vmap(obj->dma_buf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ dma_buf_vunmap(obj->dma_buf, map);
return -EIO;
}
}
@@ -101,8 +101,8 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- if (obj->import_attach)
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj))
+ dma_buf_vunmap(obj->dma_buf, map);
else
drm_gem_ttm_vunmap(obj, map);
}
@@ -111,7 +111,7 @@ static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/*
* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
@@ -140,7 +140,7 @@ static const struct vm_operations_struct vmw_vm_ops = {
.close = ttm_bo_vm_close,
};
-static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
.open = vmw_gem_object_open,
.close = vmw_gem_object_close,
@@ -154,20 +154,6 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vm_ops = &vmw_vm_ops,
};
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo)
-{
- int ret = vmw_bo_create(vmw, params, p_vbo);
-
- if (ret != 0)
- goto out_no_bo;
-
- (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
-out_no_bo:
- return ret;
-}
-
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -183,7 +169,7 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
+ ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 1912ac1cde6d..05b1c54a070c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,33 +1,15 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
+
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
+#include "vmwgfx_resource_priv.h"
#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
@@ -59,474 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du)
drm_connector_cleanup(&du->connector);
}
-/*
- * Display Unit Cursor functions
- */
-
-static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY);
-
-struct vmw_svga_fifo_cmd_define_cursor {
- u32 cmd;
- SVGAFifoCmdDefineAlphaCursor cursor;
-};
-
-/**
- * vmw_send_define_cursor_cmd - queue a define cursor command
- * @dev_priv: the private driver struct
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- struct vmw_svga_fifo_cmd_define_cursor *cmd;
- const u32 image_size = width * height * sizeof(*image);
- const u32 cmd_size = sizeof(*cmd) + image_size;
-
- /* Try to reserve fifocmd space and swallow any failures;
- such reservations cannot be left unconsumed for long
- under the risk of clogging other fifocmd users, so
- we treat reservations separtely from the way we treat
- other fallible KMS-atomic resources at prepare_fb */
- cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
-
- if (unlikely(!cmd))
- return;
-
- memset(cmd, 0, sizeof(*cmd));
-
- memcpy(&cmd[1], image, image_size);
-
- cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
- cmd->cursor.id = 0;
- cmd->cursor.width = width;
- cmd->cursor.height = height;
- cmd->cursor.hotspotX = hotspotX;
- cmd->cursor.hotspotY = hotspotY;
-
- vmw_cmd_commit_flush(dev_priv, cmd_size);
-}
-
-/**
- * vmw_cursor_update_image - update the cursor image on the provided plane
- * @dev_priv: the private driver struct
- * @vps: the plane state of the cursor plane
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_cursor_update_image(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- if (vps->cursor.bo)
- vmw_cursor_update_mob(dev_priv, vps, image,
- vps->base.crtc_w, vps->base.crtc_h,
- hotspotX, hotspotY);
-
- else
- vmw_send_define_cursor_cmd(dev_priv, image, width, height,
- hotspotX, hotspotY);
-}
-
-
-/**
- * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
- *
- * Called from inside vmw_du_cursor_plane_atomic_update to actually
- * make the cursor-image live.
- *
- * @dev_priv: device to work with
- * @vps: the plane state of the cursor plane
- * @image: cursor source data to fill the MOB with
- * @width: source data width
- * @height: source data height
- * @hotspotX: cursor hotspot x
- * @hotspotY: cursor hotspot Y
- */
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- SVGAGBCursorHeader *header;
- SVGAGBAlphaCursorHeader *alpha_header;
- const u32 image_size = width * height * sizeof(*image);
-
- header = vmw_bo_map_and_cache(vps->cursor.bo);
- alpha_header = &header->header.alphaHeader;
-
- memset(header, 0, sizeof(*header));
-
- header->type = SVGA_ALPHA_CURSOR;
- header->sizeInBytes = image_size;
-
- alpha_header->hotspotX = hotspotX;
- alpha_header->hotspotY = hotspotY;
- alpha_header->width = width;
- alpha_header->height = height;
-
- memcpy(header + 1, image, image_size);
- vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
- vps->cursor.bo->tbo.resource->start);
-}
-
-
-static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
-{
- return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
-}
-
-/**
- * vmw_du_cursor_plane_acquire_image -- Acquire the image data
- * @vps: cursor plane state
- */
-static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
-{
- struct vmw_surface *surf;
-
- if (vmw_user_object_is_null(&vps->uo))
- return NULL;
-
- surf = vmw_user_object_surface(&vps->uo);
- if (surf && !vmw_user_object_is_mapped(&vps->uo))
- return surf->snooper.image;
-
- return vmw_user_object_map(&vps->uo);
-}
-
-static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
- struct vmw_plane_state *new_vps)
-{
- void *old_image;
- void *new_image;
- u32 size;
- bool changed;
-
- if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
- old_vps->base.crtc_h != new_vps->base.crtc_h)
- return true;
-
- if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
- old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
- return true;
-
- size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
-
- old_image = vmw_du_cursor_plane_acquire_image(old_vps);
- new_image = vmw_du_cursor_plane_acquire_image(new_vps);
-
- changed = false;
- if (old_image && new_image && old_image != new_image)
- changed = memcmp(old_image, new_image, size) != 0;
-
- return changed;
-}
-
-static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
-{
- if (!(*vbo))
- return;
-
- ttm_bo_unpin(&(*vbo)->tbo);
- vmw_bo_unreference(vbo);
-}
-
-static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- u32 i;
-
- if (!vps->cursor.bo)
- return;
-
- vmw_du_cursor_plane_unmap_cm(vps);
-
- /* Look for a free slot to return this mob to the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (!vcp->cursor_mobs[i]) {
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Cache is full: See if this mob is bigger than an existing mob. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i]->tbo.base.size <
- vps->cursor.bo->tbo.base.size) {
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Destroy it if it's not worth caching. */
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
-}
-
-static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- u32 i;
- u32 cursor_max_dim, mob_max_size;
- struct vmw_fence_obj *fence = NULL;
- int ret;
-
- if (!dev_priv->has_mob ||
- (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
- return -EINVAL;
-
- mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
-
- if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
- vps->base.crtc_h > cursor_max_dim)
- return -EINVAL;
-
- if (vps->cursor.bo) {
- if (vps->cursor.bo->tbo.base.size >= size)
- return 0;
- vmw_du_put_cursor_mob(vcp, vps);
- }
-
- /* Look for an unused mob in the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i] &&
- vcp->cursor_mobs[i]->tbo.base.size >= size) {
- vps->cursor.bo = vcp->cursor_mobs[i];
- vcp->cursor_mobs[i] = NULL;
- return 0;
- }
- }
- /* Create a new mob if we can't find an existing one. */
- ret = vmw_bo_create_and_populate(dev_priv, size,
- VMW_BO_DOMAIN_MOB,
- &vps->cursor.bo);
-
- if (ret != 0)
- return ret;
-
- /* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
- if (ret != 0)
- goto teardown;
-
- ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (ret != 0) {
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- goto teardown;
- }
-
- dma_fence_wait(&fence->base, false);
- dma_fence_put(&fence->base);
-
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- return 0;
-
-teardown:
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
- return ret;
-}
-
-
-static void vmw_cursor_update_position(struct vmw_private *dev_priv,
- bool show, int x, int y)
-{
- const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
- : SVGA_CURSOR_ON_HIDE;
- uint32_t count;
-
- spin_lock(&dev_priv->cursor_lock);
- if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
- vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
- } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
- count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
- } else {
- vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
- }
- spin_unlock(&dev_priv->cursor_lock);
-}
-
-void vmw_kms_cursor_snoop(struct vmw_surface *srf,
- struct ttm_object_file *tfile,
- struct ttm_buffer_object *bo,
- SVGA3dCmdHeader *header)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- SVGA3dCopyBox *box;
- unsigned box_count;
- void *virtual;
- bool is_iomem;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
- int i, ret;
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
-
- cmd = container_of(header, struct vmw_dma_cmd, header);
-
- /* No snooper installed, nothing to copy */
- if (!srf->snooper.image)
- return;
-
- if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
- DRM_ERROR("face and mipmap for cursors should never != 0\n");
- return;
- }
-
- if (cmd->header.size < 64) {
- DRM_ERROR("at least one full copy box must be given\n");
- return;
- }
-
- box = (SVGA3dCopyBox *)&cmd[1];
- box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
- sizeof(SVGA3dCopyBox);
-
- if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
- box->x != 0 || box->y != 0 || box->z != 0 ||
- box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1 ||
- box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
- /* TODO handle none page aligned offsets */
- /* TODO handle more dst & src != 0 */
- /* TODO handle more then one copy */
- DRM_ERROR("Can't snoop dma request for cursor!\n");
- DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
- box->srcx, box->srcy, box->srcz,
- box->x, box->y, box->z,
- box->w, box->h, box->d, box_count,
- cmd->dma.guest.ptr.offset);
- return;
- }
-
- kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
- kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(bo, true, false, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
-
- if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
- memcpy(srf->snooper.image, virtual,
- VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
- } else {
- /* Image is unsigned pointer. */
- for (i = 0; i < box->h; i++)
- memcpy(srf->snooper.image + i * image_pitch,
- virtual + i * cmd->dma.guest.pitch,
- box->w * desc->pitchBytesPerBlock);
- }
-
- srf->snooper.age++;
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(bo);
-}
-
-/**
- * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
- *
- * @dev_priv: Pointer to the device private struct.
- *
- * Clears all legacy hotspots.
- */
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- drm_modeset_lock_all(dev);
- drm_for_each_crtc(crtc, dev) {
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = 0;
- du->hotspot_y = 0;
- }
- drm_modeset_unlock_all(dev);
-}
-
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- mutex_lock(&dev->mode_config.mutex);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- if (!du->cursor_surface ||
- du->cursor_age == du->cursor_surface->snooper.age ||
- !du->cursor_surface->snooper.image)
- continue;
-
- du->cursor_age = du->cursor_surface->snooper.age;
- vmw_send_define_cursor_cmd(dev_priv,
- du->cursor_surface->snooper.image,
- VMW_CURSOR_SNOOP_WIDTH,
- VMW_CURSOR_SNOOP_HEIGHT,
- du->hotspot_x + du->core_hotspot_x,
- du->hotspot_y + du->core_hotspot_y);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- u32 i;
-
- vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
-
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
-
- drm_plane_cleanup(plane);
-}
-
void vmw_du_primary_plane_destroy(struct drm_plane *plane)
{
@@ -575,262 +89,6 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
/**
- * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
- *
- * @vps: plane_state
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
-{
- int ret;
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- struct ttm_buffer_object *bo;
-
- if (!vps->cursor.bo)
- return -EINVAL;
-
- bo = &vps->cursor.bo->tbo;
-
- if (bo->base.size < size)
- return -EINVAL;
-
- if (vps->cursor.bo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- vmw_bo_map_and_cache(vps->cursor.bo);
-
- ttm_bo_unreserve(bo);
-
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- return 0;
-}
-
-
-/**
- * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
- *
- * @vps: state of the cursor plane
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
-{
- int ret = 0;
- struct vmw_bo *vbo = vps->cursor.bo;
-
- if (!vbo || !vbo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
- if (likely(ret == 0)) {
- vmw_bo_unmap(vbo);
- ttm_bo_unreserve(&vbo->tbo);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
- *
- * @plane: cursor plane
- * @old_state: contains the state to clean up
- *
- * Unmaps all cursor bo mappings and unpins the cursor surface
- *
- * Returns 0 on success
- */
-void
-vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
-
- if (!vmw_user_object_is_null(&vps->uo))
- vmw_user_object_unmap(&vps->uo);
-
- vmw_du_cursor_plane_unmap_cm(vps);
- vmw_du_put_cursor_mob(vcp, vps);
-
- vmw_du_plane_unpin_surf(vps);
- vmw_user_object_unref(&vps->uo);
-}
-
-
-/**
- * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
- *
- * @plane: display plane
- * @new_state: info on the new plane state, including the FB
- *
- * Returns 0 on success
- */
-int
-vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_framebuffer *fb = new_state->fb;
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_bo *bo = NULL;
- int ret = 0;
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_user_object_unmap(&vps->uo);
- vmw_user_object_unref(&vps->uo);
- }
-
- if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
- vps->uo.surface = NULL;
- } else {
- memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
- }
- vmw_user_object_ref(&vps->uo);
- }
-
- bo = vmw_user_object_buffer(&vps->uo);
- if (bo) {
- struct ttm_operation_ctx ctx = {false, false};
-
- ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
- if (ret != 0)
- return -ENOMEM;
-
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret != 0)
- return -ENOMEM;
-
- vmw_bo_pin_reserved(bo, true);
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
-
- (void)vmw_bo_map_and_cache_size(bo, size);
- } else {
- vmw_bo_map_and_cache(bo);
- }
- ttm_bo_unreserve(&bo->tbo);
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_du_get_cursor_mob(vcp, vps);
- vmw_du_cursor_plane_map_cm(vps);
- }
-
- return 0;
-}
-
-
-void
-vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
- struct vmw_bo *old_bo = NULL;
- struct vmw_bo *new_bo = NULL;
- struct ww_acquire_ctx ctx;
- s32 hotspot_x, hotspot_y;
- int ret;
-
- hotspot_x = du->hotspot_x + new_state->hotspot_x;
- hotspot_y = du->hotspot_y + new_state->hotspot_y;
-
- du->cursor_surface = vmw_user_object_surface(&vps->uo);
-
- if (vmw_user_object_is_null(&vps->uo)) {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- return;
- }
-
- vps->cursor.hotspot_x = hotspot_x;
- vps->cursor.hotspot_y = hotspot_y;
-
- if (du->cursor_surface)
- du->cursor_age = du->cursor_surface->snooper.age;
-
- ww_acquire_init(&ctx, &reservation_ww_class);
-
- if (!vmw_user_object_is_null(&old_vps->uo)) {
- old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
- if (ret != 0)
- return;
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- new_bo = vmw_user_object_buffer(&vps->uo);
- if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
- if (ret != 0) {
- if (old_bo) {
- ttm_bo_unreserve(&old_bo->tbo);
- ww_acquire_fini(&ctx);
- }
- return;
- }
- } else {
- new_bo = NULL;
- }
- }
- if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
- /*
- * If it hasn't changed, avoid making the device do extra
- * work by keeping the old cursor active.
- */
- struct vmw_cursor_plane_state tmp = old_vps->cursor;
- old_vps->cursor = vps->cursor;
- vps->cursor = tmp;
- } else {
- void *image = vmw_du_cursor_plane_acquire_image(vps);
- if (image)
- vmw_cursor_update_image(dev_priv, vps, image,
- new_state->crtc_w,
- new_state->crtc_h,
- hotspot_x, hotspot_y);
- }
-
- if (new_bo)
- ttm_bo_unreserve(&new_bo->tbo);
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
-
- ww_acquire_fini(&ctx);
-
- du->cursor_x = new_state->crtc_x + du->set_gui_x;
- du->cursor_y = new_state->crtc_y + du->set_gui_y;
-
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + hotspot_x,
- du->cursor_y + hotspot_y);
-
- du->core_hotspot_x = hotspot_x - du->hotspot_x;
- du->core_hotspot_y = hotspot_y - du->hotspot_y;
-}
-
-
-/**
* vmw_du_primary_plane_atomic_check - check if the new state is okay
*
* @plane: display plane
@@ -873,66 +131,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
return ret;
}
-
-/**
- * vmw_du_cursor_plane_atomic_check - check if the new state is okay
- *
- * @plane: cursor plane
- * @state: info on the new plane state
- *
- * This is a chance to fail if the new cursor state does not fit
- * our requirements.
- *
- * Returns 0 on success
- */
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- int ret = 0;
- struct drm_crtc_state *crtc_state = NULL;
- struct vmw_surface *surface = NULL;
- struct drm_framebuffer *fb = new_state->fb;
-
- if (new_state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
- new_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
- /* Turning off */
- if (!fb)
- return 0;
-
- /* A lot of the code assumes this */
- if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
- DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
- new_state->crtc_w, new_state->crtc_h);
- return -EINVAL;
- }
-
- if (!vmw_framebuffer_to_vfb(fb)->bo) {
- surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
-
- WARN_ON(!surface);
-
- if (!surface ||
- (!surface->snooper.image && !surface->res.guest_memory_bo)) {
- DRM_ERROR("surface not suitable for cursor\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1076,7 +274,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
vps->cpp = 0;
- memset(&vps->cursor, 0, sizeof(vps->cursor));
+ vps->cursor.mob = NULL;
/* Each ref counted resource needs to be acquired again */
vmw_user_object_ref(&vps->uo);
@@ -1221,7 +419,20 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
+ if (bo) {
+ vmw_bo_dirty_release(bo);
+ /*
+ * bo->dirty is reference counted so it being NULL
+ * means that the surface wasn't coherent to begin
+ * with and so we have to free the dirty tracker
+ * in the vmw_resource
+ */
+ if (!bo->dirty && surf && surf->res.dirty)
+ surf->res.func->dirty_free(&surf->res);
+ }
drm_framebuffer_cleanup(framebuffer);
vmw_user_object_unref(&vfbs->uo);
@@ -1375,6 +586,7 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
+ vmw_bo_dirty_release(vfbd->buffer);
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
@@ -1505,6 +717,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_user_object uo = {0};
+ struct vmw_bo *bo;
+ struct vmw_surface *surface;
int ret;
/* returns either a bo or surface */
@@ -1534,6 +748,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
err_out:
+ bo = vmw_user_object_buffer(&uo);
+ surface = vmw_user_object_surface(&uo);
/* vmw_user_object_lookup takes one ref so does new_fb */
vmw_user_object_unref(&uo);
@@ -1542,6 +758,14 @@ err_out:
return ERR_PTR(ret);
}
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+
return &vfb->base;
}
@@ -1974,44 +1198,6 @@ int vmw_kms_close(struct vmw_private *dev_priv)
return ret;
}
-int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_cursor_bypass_arg *arg = data;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
- }
-
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
- }
-
- crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
-
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 4eab581883e2..511e29cdb987 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,40 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
+#include "vmwgfx_cursor_plane.h"
+#include "vmwgfx_drv.h"
+
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
-#include "vmwgfx_drv.h"
-
/**
* struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
* @plane: Plane which is being updated.
@@ -235,16 +216,11 @@ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
};
-static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
#define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base)
#define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base)
#define vmw_connector_state_to_vcs(x) \
container_of(x, struct vmw_connector_state, base)
-#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
/**
* Derived class for crtc state object
@@ -255,11 +231,6 @@ struct vmw_crtc_state {
struct drm_crtc_state base;
};
-struct vmw_cursor_plane_state {
- struct vmw_bo *bo;
- s32 hotspot_x;
- s32 hotspot_y;
-};
/**
* Derived class for plane state object
@@ -283,7 +254,6 @@ struct vmw_plane_state {
/* For CPU Blit */
unsigned int cpp;
- bool surf_mapped;
struct vmw_cursor_plane_state cursor;
};
@@ -317,17 +287,6 @@ struct vmw_connector_state {
int gui_y;
};
-/**
- * Derived class for cursor plane object
- *
- * @base DRM plane object
- * @cursor.cursor_mobs Cursor mobs available for re-use
- */
-struct vmw_cursor_plane {
- struct drm_plane base;
-
- struct vmw_bo *cursor_mobs[3];
-};
/**
* Base class display unit.
@@ -343,17 +302,6 @@ struct vmw_display_unit {
struct drm_plane primary;
struct vmw_cursor_plane cursor;
- struct vmw_surface *cursor_surface;
- size_t cursor_age;
-
- int cursor_x;
- int cursor_y;
-
- int hotspot_x;
- int hotspot_y;
- s32 core_hotspot_x;
- s32 core_hotspot_y;
-
unsigned unit;
/*
@@ -403,8 +351,6 @@ struct vmw_display_unit {
*/
void vmw_du_init(struct vmw_display_unit *du);
void vmw_du_cleanup(struct vmw_display_unit *du);
-void vmw_du_crtc_save(struct drm_crtc *crtc);
-void vmw_du_crtc_restore(struct drm_crtc *crtc);
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t size,
@@ -460,19 +406,10 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state);
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state);
-void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state);
-int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void vmw_du_plane_reset(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index f0b429525467..c23c9195f0dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -372,7 +372,7 @@ static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -383,10 +383,10 @@ static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7055cbefc768..d8204d4265d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -282,8 +282,7 @@ out_no_setup:
}
vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
- ttm_bo_put(&batch->otable_bo->tbo);
- batch->otable_bo = NULL;
+ vmw_bo_unreference(&batch->otable_bo);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 74ff2812d66a..7de20e56082c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -1,27 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
#include "vmwgfx_bo.h"
@@ -71,6 +52,11 @@ struct vmw_bo_dirty {
unsigned long bitmap[];
};
+bool vmw_bo_is_dirty(struct vmw_bo *vbo)
+{
+ return vbo->dirty && (vbo->dirty->start < vbo->dirty->end);
+}
+
/**
* vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
* @vbo: The buffer object to scan
@@ -341,6 +327,41 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
dirty->end = res_start;
}
+void vmw_bo_dirty_clear(struct vmw_bo *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = 0;
+ unsigned long res_end = vbo->tbo.base.size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
/**
* vmw_bo_dirty_clear_res - Clear a resource's dirty region from
* its backing mob.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a73af8a355fb..388011696941 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
- kref_get(&res->kref);
+ vmw_resource_reference(res);
*p_res = res;
ret = 0;
@@ -347,7 +347,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
@@ -531,9 +531,9 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->guest_memory_bo->tbo);
val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
+ drm_gem_object_get(&val_buf->bo->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -557,7 +557,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
if (guest_memory_dirty)
vmw_user_bo_unref(&res->guest_memory_bo);
@@ -619,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 32029d80b72b..5f5f5a94301f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -445,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv);
if (ret)
return ret;
@@ -764,7 +764,7 @@ static const struct drm_plane_funcs vmw_sou_plane_funcs = {
static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -775,10 +775,10 @@ static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f5d2ed1b0a72..20aab725e53a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1482,7 +1482,7 @@ static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -1494,10 +1494,10 @@ static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
@@ -1584,6 +1584,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&cursor->base);
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5721c74da3e0..7e281c3c6bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,32 +1,13 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#include "vmwgfx_bo.h"
+#include "vmwgfx_cursor_plane.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -658,7 +639,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- WARN_ON_ONCE(res->dirty);
+ WARN_ON(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -689,8 +670,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
* Dumb buffers own the resource and they'll unref the
* resource themselves
*/
- if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
- return;
+ WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
vmw_resource_unreference(&res);
}
@@ -818,25 +798,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->guest_memory_size = cur_bo_offset;
- if (!file_priv->atomic &&
- metadata->scanout &&
- metadata->num_sizes == 1 &&
- metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
- metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
- metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
- VMW_CURSOR_SNOOP_HEIGHT *
- desc->pitchBytesPerBlock;
- srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
+
+ srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata);
+ if (IS_ERR(srf->snooper.image)) {
+ ret = PTR_ERR(srf->snooper.image);
+ goto out_no_copy;
}
if (drm_is_primary_client(file_priv))
@@ -864,14 +830,17 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv,
- &params,
- &res->guest_memory_bo);
+ ret = vmw_bo_create(dev_priv, &params, &res->guest_memory_bo);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1670,6 +1639,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
+ if (res->guest_memory_bo) {
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
VMW_RES_SURFACE,
@@ -1684,7 +1661,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;
@@ -2358,12 +2334,19 @@ int vmw_dumb_create(struct drm_file *file_priv,
vbo = res->guest_memory_bo;
vbo->is_dumb = true;
vbo->dumb_surface = vmw_res_to_srf(res);
-
+ drm_gem_object_put(&vbo->tbo.base);
+ /*
+ * Unset the user surface dtor since this in not actually exposed
+ * to userspace. The suface is owned via the dumb_buffer's GEM handle
+ */
+ struct vmw_user_surface *usurf = container_of(vbo->dumb_surface,
+ struct vmw_user_surface, srf);
+ usurf->prime.base.refcount_release = NULL;
err:
if (res)
vmw_resource_unreference(&res);
- if (ret)
- ttm_ref_object_base_unref(tfile, arg.rep.handle);
+
+ ttm_ref_object_base_unref(tfile, arg.rep.handle);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e7625b3f71e0..7ee93e7191c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -262,9 +262,8 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key);
}
val_buf = &bo_node->base;
- val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
- if (!val_buf->bo)
- return -ESRCH;
+ vmw_bo_reference(vbo);
+ val_buf->bo = &vbo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
}
@@ -656,7 +655,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- ttm_bo_put(entry->base.bo);
+ drm_gem_object_put(&entry->base.bo->base);
entry->base.bo = NULL;
}
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 5c2f459a2925..9bce047901b2 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -39,7 +39,6 @@ config DRM_XE
select DRM_TTM_HELPER
select DRM_EXEC
select DRM_GPUVM
- select DRM_GPUSVM if !UML && DEVICE_PRIVATE
select DRM_SCHED
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
@@ -74,9 +73,22 @@ config DRM_XE_DP_TUNNEL
If in doubt say "Y".
+config DRM_XE_GPUSVM
+ bool "Enable CPU to GPU address mirroring"
+ depends on DRM_XE
+ depends on !UML
+ depends on DEVICE_PRIVATE
+ default y
+ select DRM_GPUSVM
+ help
+ Enable this option if you want support for CPU to GPU address
+ mirroring.
+
+ If in doubut say "Y".
+
config DRM_XE_DEVMEM_MIRROR
bool "Enable device memory mirror"
- depends on DRM_XE
+ depends on DRM_XE_GPUSVM
select GET_FREE_REGION
default y
help
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 9699b08585f7..e4bf484d4121 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -125,12 +125,13 @@ xe-y += xe_bb.o \
xe_wopcm.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
-xe-$(CONFIG_DRM_GPUSVM) += xe_svm.o
+xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o
+xe-$(CONFIG_CONFIGFS_FS) += xe_configfs.o
# graphics virtualization (SR-IOV) support
xe-y += \
@@ -185,7 +186,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/intel_fbdev_fb.o \
display/xe_display.o \
display/xe_display_misc.o \
- display/xe_display_rps.o \
+ display/xe_display_rpm.o \
display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
@@ -196,7 +197,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
# SOC code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-soc/intel_dram.o \
- i915-soc/intel_pch.o \
i915-soc/intel_rom.o
# Display code shared with i915
@@ -271,6 +271,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_panel.o \
i915-display/intel_pfit.o \
i915-display/intel_pmdemand.o \
+ i915-display/intel_pch.o \
i915-display/intel_pps.o \
i915-display/intel_psr.o \
i915-display/intel_qp_tables.o \
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index ec516e838ee8..448afb86e05c 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -141,6 +141,7 @@ enum xe_guc_action {
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
+ XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index d633f1c739e4..7de8f827281f 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -367,6 +367,7 @@ enum xe_guc_klv_ids {
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH = 0x900b,
};
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h b/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
deleted file mode 100644
index 21fec9cc837c..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RPS_H__
-#define __INTEL_RPS_H__
-
-#define gen5_rps_irq_handler(x) ({})
-
-#endif /* __INTEL_RPS_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index dfec5108d2c3..9b7572e06f34 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -13,7 +13,6 @@
#include <drm/drm_drv.h>
#include "i915_utils.h"
-#include "intel_runtime_pm.h"
#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
@@ -22,28 +21,12 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return container_of(dev, struct drm_i915_private, drm);
}
+/* compat platform checks only for soc/ usage */
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
-#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
-#define IS_I830(dev_priv) (dev_priv && 0)
-#define IS_I845G(dev_priv) (dev_priv && 0)
-#define IS_I85X(dev_priv) (dev_priv && 0)
-#define IS_I865G(dev_priv) (dev_priv && 0)
#define IS_I915G(dev_priv) (dev_priv && 0)
#define IS_I915GM(dev_priv) (dev_priv && 0)
-#define IS_I945G(dev_priv) (dev_priv && 0)
-#define IS_I945GM(dev_priv) (dev_priv && 0)
-#define IS_I965G(dev_priv) (dev_priv && 0)
-#define IS_I965GM(dev_priv) (dev_priv && 0)
-#define IS_G45(dev_priv) (dev_priv && 0)
-#define IS_GM45(dev_priv) (dev_priv && 0)
-#define IS_G4X(dev_priv) (dev_priv && 0)
#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
-#define IS_G33(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE_M(dev_priv) (dev_priv && 0)
-#define IS_SANDYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
-#define IS_IVB_GT1(dev_priv) (dev_priv && 0)
#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
#define IS_HASWELL(dev_priv) (dev_priv && 0)
@@ -71,39 +54,10 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
-#define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0)
#define IS_MOBILE(xe) (xe && 0)
-#define IS_TIGERLAKE_UY(xe) (xe && 0)
-#define IS_COMETLAKE_ULX(xe) (xe && 0)
-#define IS_COFFEELAKE_ULX(xe) (xe && 0)
-#define IS_KABYLAKE_ULX(xe) (xe && 0)
-#define IS_SKYLAKE_ULX(xe) (xe && 0)
-#define IS_HASWELL_ULX(xe) (xe && 0)
-#define IS_COMETLAKE_ULT(xe) (xe && 0)
-#define IS_COFFEELAKE_ULT(xe) (xe && 0)
-#define IS_KABYLAKE_ULT(xe) (xe && 0)
-#define IS_SKYLAKE_ULT(xe) (xe && 0)
-
-#define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10)
-#define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11)
-#define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12)
-#define IS_RAPTORLAKE_U(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_P_RPLU)
-#define IS_ICL_WITH_PORT_F(xe) (xe && 0)
#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
-
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-#ifdef CONFIG_ARM64
-/*
- * arm64 indirectly includes linux/rtc.h,
- * which defines a irq_lock, so include it
- * here before #define-ing it
- */
-#include <linux/rtc.h>
-#endif
-
-#define irq_lock irq.lock
-
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
deleted file mode 100644
index 274042bff1be..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RUNTIME_PM_H__
-#define __INTEL_RUNTIME_PM_H__
-
-#include "intel_wakeref.h"
-#include "xe_device_types.h"
-#include "xe_pm.h"
-
-#define intel_runtime_pm xe_runtime_pm
-
-static inline void disable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline void enable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline bool
-intel_runtime_pm_suspended(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return pm_runtime_suspended(xe->drm.dev);
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_get_noresume(xe);
-
- return INTEL_WAKEREF_DEF;
-}
-
-static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_put(xe);
-}
-
-static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref)
-{
- if (wakeref)
- intel_runtime_pm_put_unchecked(pm);
-}
-
-#define intel_runtime_pm_get_raw intel_runtime_pm_get
-#define intel_runtime_pm_put_raw intel_runtime_pm_put
-#define assert_rpm_wakelock_held(x) do { } while (0)
-#define assert_rpm_raw_wakeref_held(x) do { } while (0)
-
-#define with_intel_runtime_pm(rpm, wf) \
- for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
deleted file mode 100644
index 9c46556d33a4..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "../../../i915/soc/intel_pch.h"
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 3a1e505ff182..e8191562d122 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -45,7 +45,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -56,7 +56,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
}
if (IS_ERR(obj)) {
@@ -79,11 +79,11 @@ err:
return ERR_CAST(fb);
}
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
- struct drm_gem_object *_obj, struct i915_vma *vma)
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+ struct drm_gem_object *_obj, struct i915_vma *vma)
{
struct xe_bo *obj = gem_to_xe_bo(_obj);
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 0b0aca7a25af..68f064f33d4b 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -133,9 +133,6 @@ int xe_display_init_early(struct xe_device *xe)
/* Fake uncore lock */
spin_lock_init(&xe->uncore.lock);
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(xe);
-
intel_display_driver_early_probe(display);
/* Early display init.. */
@@ -147,7 +144,7 @@ int xe_display_init_early(struct xe_device *xe)
*/
intel_dram_detect(xe);
- intel_bw_init_hw(xe);
+ intel_bw_init_hw(display);
intel_display_device_info_runtime_init(display);
@@ -173,7 +170,7 @@ static void xe_display_fini(void *arg)
struct xe_device *xe = arg;
struct intel_display *display = &xe->display;
- intel_hpd_poll_fini(xe);
+ intel_hpd_poll_fini(display);
intel_hdcp_component_fini(display);
intel_audio_deinit(display);
intel_display_driver_remove(display);
@@ -220,11 +217,13 @@ void xe_display_unregister(struct xe_device *xe)
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (master_ctl & DISPLAY_IRQ)
- gen11_display_irq_handler(xe);
+ gen11_display_irq_handler(display);
}
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
@@ -240,19 +239,23 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
void xe_display_irq_reset(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- gen11_display_irq_reset(xe);
+ gen11_display_irq_reset(display);
}
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (gt->info.id == XE_GT0)
- gen11_de_irq_postinstall(xe);
+ gen11_de_irq_postinstall(display);
}
static bool suspend_to_idle(void)
@@ -305,7 +308,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
intel_dmc_suspend(display);
if (has_display(xe))
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
static void xe_display_disable_d3cold(struct xe_device *xe)
@@ -322,10 +325,10 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
intel_display_driver_init_hw(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -355,7 +358,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe)) {
intel_display_driver_suspend_access(display);
@@ -385,7 +388,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
intel_dp_mst_suspend(display);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe))
intel_display_driver_suspend_access(display);
@@ -400,6 +403,8 @@ void xe_display_pm_shutdown(struct xe_device *xe)
void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -408,7 +413,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
return;
}
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
void xe_display_pm_suspend_late(struct xe_device *xe)
@@ -482,7 +487,7 @@ void xe_display_pm_resume(struct xe_device *xe)
if (has_display(xe))
intel_display_driver_resume_access(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe)) {
intel_display_driver_resume(display);
@@ -491,7 +496,7 @@ void xe_display_pm_resume(struct xe_device *xe)
}
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -502,6 +507,8 @@ void xe_display_pm_resume(struct xe_device *xe)
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -510,9 +517,9 @@ void xe_display_pm_runtime_resume(struct xe_device *xe)
return;
}
- intel_hpd_init(xe);
- intel_hpd_poll_disable(xe);
- skl_watermark_ipc_update(xe);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
+ skl_watermark_ipc_update(display);
}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
new file mode 100644
index 000000000000..1955153aadba
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "intel_display_rpm.h"
+#include "xe_device_types.h"
+#include "xe_pm.h"
+
+static struct xe_device *display_to_xe(struct intel_display *display)
+{
+ return container_of(display, struct xe_device, display);
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_display_rpm_get(display);
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_display_rpm_put(display, wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ xe_pm_runtime_get_noresume(display_to_xe(display));
+
+ return INTEL_WAKEREF_DEF;
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ if (wakeref)
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ struct xe_device *xe = display_to_xe(display);
+
+ return pm_runtime_suspended(xe->drm.dev);
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ /* FIXME */
+}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rps.c b/drivers/gpu/drm/xe/display/xe_display_rps.c
deleted file mode 100644
index fa616f9688a5..000000000000
--- a/drivers/gpu/drm/xe/display/xe_display_rps.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "intel_display_rps.h"
-
-void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
-}
-
-void intel_display_rps_mark_interactive(struct intel_display *display,
- struct intel_atomic_state *state,
- bool interactive)
-{
-}
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 68e3d1959ad6..2933ca97d673 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -10,7 +10,9 @@
#include <generated/xe_wa_oob.h>
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915)
+bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
- return XE_WA(xe_root_mmio_gt(i915), 16023588340);
+ struct xe_device *xe = to_xe_device(display->drm);
+
+ return XE_WA(xe_root_mmio_gt(xe), 16023588340);
}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 7c02323e9531..b35a6f201d4a 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -9,7 +9,6 @@
#include "abi/gsc_command_header_abi.h"
#include "intel_hdcp_gsc.h"
-#include "intel_hdcp_gsc_message.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
@@ -22,7 +21,8 @@
#define HECI_MEADDRESS_HDCP 18
-struct intel_hdcp_gsc_message {
+struct intel_hdcp_gsc_context {
+ struct xe_device *xe;
struct xe_bo *hdcp_bo;
u64 hdcp_cmd_in;
u64 hdcp_cmd_out;
@@ -30,14 +30,9 @@ struct intel_hdcp_gsc_message {
#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
-bool intel_hdcp_gsc_cs_required(struct intel_display *display)
+bool intel_hdcp_gsc_check_status(struct drm_device *drm)
{
- return DISPLAY_VER(display) >= 14;
-}
-
-bool intel_hdcp_gsc_check_status(struct intel_display *display)
-{
- struct xe_device *xe = to_xe_device(display->drm);
+ struct xe_device *xe = to_xe_device(drm);
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
@@ -69,10 +64,9 @@ out:
}
/*This function helps allocate memory for the command that we will send to gsc cs */
-static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
- struct intel_hdcp_gsc_message *hdcp_message)
+static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
+ struct intel_hdcp_gsc_context *gsc_context)
{
- struct xe_device *xe = to_xe_device(display->drm);
struct xe_bo *bo = NULL;
u64 cmd_in, cmd_out;
int ret = 0;
@@ -84,7 +78,7 @@ static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
- drm_err(display->drm, "Failed to allocate bo for HDCP streaming command!\n");
+ drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
ret = PTR_ERR(bo);
goto out;
}
@@ -93,104 +87,60 @@ static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
cmd_out = cmd_in + PAGE_SIZE;
xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
- hdcp_message->hdcp_bo = bo;
- hdcp_message->hdcp_cmd_in = cmd_in;
- hdcp_message->hdcp_cmd_out = cmd_out;
+ gsc_context->hdcp_bo = bo;
+ gsc_context->hdcp_cmd_in = cmd_in;
+ gsc_context->hdcp_cmd_out = cmd_out;
+ gsc_context->xe = xe;
+
out:
return ret;
}
-static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
{
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct xe_device *xe = to_xe_device(drm);
+ struct intel_hdcp_gsc_context *gsc_context;
int ret;
- hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
-
- if (!hdcp_message)
- return -ENOMEM;
+ gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
+ if (!gsc_context)
+ return ERR_PTR(-ENOMEM);
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- ret = intel_hdcp_gsc_initialize_message(display, hdcp_message);
+ ret = intel_hdcp_gsc_initialize_message(xe, gsc_context);
if (ret) {
- drm_err(display->drm, "Could not initialize hdcp_message\n");
- kfree(hdcp_message);
- return ret;
+ drm_err(&xe->drm, "Could not initialize gsc_context\n");
+ kfree(gsc_context);
+ gsc_context = ERR_PTR(ret);
}
- display->hdcp.hdcp_message = hdcp_message;
- return ret;
-}
-
-static const struct i915_hdcp_ops gsc_hdcp_ops = {
- .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
- .verify_receiver_cert_prepare_km =
- intel_hdcp_gsc_verify_receiver_cert_prepare_km,
- .verify_hprime = intel_hdcp_gsc_verify_hprime,
- .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
- .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
- .verify_lprime = intel_hdcp_gsc_verify_lprime,
- .get_session_key = intel_hdcp_gsc_get_session_key,
- .repeater_check_flow_prepare_ack =
- intel_hdcp_gsc_repeater_check_flow_prepare_ack,
- .verify_mprime = intel_hdcp_gsc_verify_mprime,
- .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
- .close_hdcp_session = intel_hdcp_gsc_close_session,
-};
-
-int intel_hdcp_gsc_init(struct intel_display *display)
-{
- struct i915_hdcp_arbiter *data;
- int ret;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- mutex_lock(&display->hdcp.hdcp_mutex);
- display->hdcp.arbiter = data;
- display->hdcp.arbiter->hdcp_dev = display->drm->dev;
- display->hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(display);
- if (ret)
- kfree(data);
-
- mutex_unlock(&display->hdcp.hdcp_mutex);
-
- return ret;
+ return gsc_context;
}
-void intel_hdcp_gsc_fini(struct intel_display *display)
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
{
- struct intel_hdcp_gsc_message *hdcp_message =
- display->hdcp.hdcp_message;
- struct i915_hdcp_arbiter *arb = display->hdcp.arbiter;
-
- if (hdcp_message) {
- xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
- kfree(hdcp_message);
- display->hdcp.hdcp_message = NULL;
- }
+ if (!gsc_context)
+ return;
- kfree(arb);
- display->hdcp.arbiter = NULL;
+ xe_bo_unpin_map_no_vm(gsc_context->hdcp_bo);
+ kfree(gsc_context);
}
static int xe_gsc_send_sync(struct xe_device *xe,
- struct intel_hdcp_gsc_message *hdcp_message,
+ struct intel_hdcp_gsc_context *gsc_context,
u32 msg_size_in, u32 msg_size_out,
u32 addr_out_off)
{
- struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
- struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
+ struct xe_gt *gt = gsc_context->hdcp_bo->tile->media_gt;
+ struct iosys_map *map = &gsc_context->hdcp_bo->vmap;
struct xe_gsc *gsc = &gt->uc.gsc;
int ret;
- ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
- hdcp_message->hdcp_cmd_out, msg_size_out);
+ ret = xe_gsc_pkt_submit_kernel(gsc, gsc_context->hdcp_cmd_in, msg_size_in,
+ gsc_context->hdcp_cmd_out, msg_size_out);
if (ret) {
drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
return ret;
@@ -205,12 +155,12 @@ static int xe_gsc_send_sync(struct xe_device *xe,
return ret;
}
-ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len)
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
{
+ struct xe_device *xe = gsc_context->xe;
const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
- struct intel_hdcp_gsc_message *hdcp_message;
u64 host_session_id;
u32 msg_size_in, msg_size_out;
u32 addr_out_off, addr_in_wr_off = 0;
@@ -223,15 +173,14 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
- hdcp_message = xe->display.hdcp.hdcp_message;
addr_out_off = PAGE_SIZE;
host_session_id = xe_gsc_create_host_session_id();
xe_pm_runtime_get_noresume(xe);
- addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
+ addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap,
addr_in_wr_off, HECI_MEADDRESS_HDCP,
host_session_id, msg_in_len);
- xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
+ xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off,
msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -240,7 +189,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
* 20 times each message 50 ms apart
*/
do {
- ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
+ ret = xe_gsc_send_sync(xe, gsc_context, msg_size_in, msg_size_out,
addr_out_off);
/* Only try again if gsc says so */
@@ -254,7 +203,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
if (ret)
goto out;
- xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
+ xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap,
addr_out_off + HDCP_GSC_HEADER_SIZE,
msg_out_len);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 4ca0cb571194..6502b8274173 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -83,7 +83,7 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0)
return NULL;
- flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
+ flags = XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
diff --git a/drivers/gpu/drm/xe/instructions/xe_alu_commands.h b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
new file mode 100644
index 000000000000..2987b10d3e16
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_ALU_COMMANDS_H_
+#define _XE_ALU_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+/* Instruction Opcodes */
+#define CS_ALU_OPCODE_NOOP 0x000
+#define CS_ALU_OPCODE_FENCE_RD 0x001
+#define CS_ALU_OPCODE_FENCE_WR 0x002
+#define CS_ALU_OPCODE_LOAD 0x080
+#define CS_ALU_OPCODE_LOADINV 0x480
+#define CS_ALU_OPCODE_LOAD0 0x081
+#define CS_ALU_OPCODE_LOAD1 0x481
+#define CS_ALU_OPCODE_LOADIND 0x082
+#define CS_ALU_OPCODE_ADD 0x100
+#define CS_ALU_OPCODE_SUB 0x101
+#define CS_ALU_OPCODE_AND 0x102
+#define CS_ALU_OPCODE_OR 0x103
+#define CS_ALU_OPCODE_XOR 0x104
+#define CS_ALU_OPCODE_SHL 0x105
+#define CS_ALU_OPCODE_SHR 0x106
+#define CS_ALU_OPCODE_SAR 0x107
+#define CS_ALU_OPCODE_STORE 0x180
+#define CS_ALU_OPCODE_STOREINV 0x580
+#define CS_ALU_OPCODE_STOREIND 0x181
+
+/* Instruction Operands */
+#define CS_ALU_OPERAND_REG(n) REG_FIELD_PREP(GENMASK(3, 0), (n))
+#define CS_ALU_OPERAND_REG0 0x0
+#define CS_ALU_OPERAND_REG1 0x1
+#define CS_ALU_OPERAND_REG2 0x2
+#define CS_ALU_OPERAND_REG3 0x3
+#define CS_ALU_OPERAND_REG4 0x4
+#define CS_ALU_OPERAND_REG5 0x5
+#define CS_ALU_OPERAND_REG6 0x6
+#define CS_ALU_OPERAND_REG7 0x7
+#define CS_ALU_OPERAND_REG8 0x8
+#define CS_ALU_OPERAND_REG9 0x9
+#define CS_ALU_OPERAND_REG10 0xa
+#define CS_ALU_OPERAND_REG11 0xb
+#define CS_ALU_OPERAND_REG12 0xc
+#define CS_ALU_OPERAND_REG13 0xd
+#define CS_ALU_OPERAND_REG14 0xe
+#define CS_ALU_OPERAND_REG15 0xf
+#define CS_ALU_OPERAND_SRCA 0x20
+#define CS_ALU_OPERAND_SRCB 0x21
+#define CS_ALU_OPERAND_ACCU 0x31
+#define CS_ALU_OPERAND_ZF 0x32
+#define CS_ALU_OPERAND_CF 0x33
+#define CS_ALU_OPERAND_NA 0 /* N/A operand */
+
+/* Command Streamer ALU Instructions */
+#define CS_ALU_INSTR(opcode, op1, op2) (REG_FIELD_PREP(GENMASK(31, 20), (opcode)) | \
+ REG_FIELD_PREP(GENMASK(19, 10), (op1)) | \
+ REG_FIELD_PREP(GENMASK(9, 0), (op2)))
+
+#define __CS_ALU_INSTR(opcode, op1, op2) CS_ALU_INSTR(CS_ALU_OPCODE_##opcode, \
+ CS_ALU_OPERAND_##op1, \
+ CS_ALU_OPERAND_##op2)
+
+#define CS_ALU_INSTR_NOOP __CS_ALU_INSTR(NOOP, NA, NA)
+#define CS_ALU_INSTR_LOAD(op1, op2) __CS_ALU_INSTR(LOAD, op1, op2)
+#define CS_ALU_INSTR_LOADINV(op1, op2) __CS_ALU_INSTR(LOADINV, op1, op2)
+#define CS_ALU_INSTR_LOAD0(op1) __CS_ALU_INSTR(LOAD0, op1, NA)
+#define CS_ALU_INSTR_LOAD1(op1) __CS_ALU_INSTR(LOAD1, op1, NA)
+#define CS_ALU_INSTR_ADD __CS_ALU_INSTR(ADD, NA, NA)
+#define CS_ALU_INSTR_SUB __CS_ALU_INSTR(SUB, NA, NA)
+#define CS_ALU_INSTR_AND __CS_ALU_INSTR(AND, NA, NA)
+#define CS_ALU_INSTR_OR __CS_ALU_INSTR(OR, NA, NA)
+#define CS_ALU_INSTR_XOR __CS_ALU_INSTR(XOR, NA, NA)
+#define CS_ALU_INSTR_STORE(op1, op2) __CS_ALU_INSTR(STORE, op1, op2)
+#define CS_ALU_INSTR_STOREINV(op1, op2) __CS_ALU_INSTR(STOREINV, op1, op2)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
index 31d28a67ef6a..457881af8af9 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
@@ -137,6 +137,7 @@
#define CMD_3DSTATE_CLIP_MESH GFXPIPE_3D_CMD(0x0, 0x81)
#define CMD_3DSTATE_SBE_MESH GFXPIPE_3D_CMD(0x0, 0x82)
#define CMD_3DSTATE_CPSIZE_CONTROL_BUFFER GFXPIPE_3D_CMD(0x0, 0x83)
+#define CMD_3DSTATE_COARSE_PIXEL GFXPIPE_3D_CMD(0x0, 0x89)
#define CMD_3DSTATE_DRAWING_RECTANGLE GFXPIPE_3D_CMD(0x1, 0x0)
#define CMD_3DSTATE_CHROMA_KEY GFXPIPE_3D_CMD(0x1, 0x4)
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index 5a47991b4b81..e3f5e8bb3ebc 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -32,6 +32,7 @@
#define MI_BATCH_BUFFER_END __MI_INSTR(0xA)
#define MI_TOPOLOGY_FILTER __MI_INSTR(0xD)
#define MI_FORCE_WAKEUP __MI_INSTR(0x1D)
+#define MI_MATH(n) (__MI_INSTR(0x1A) | XE_INSTR_NUM_DW((n) + 1))
#define MI_STORE_DATA_IMM __MI_INSTR(0x20)
#define MI_SDI_GGTT REG_BIT(22)
@@ -65,6 +66,10 @@
#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
#define MI_LRM_USE_GGTT REG_BIT(22)
+#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3))
+#define MI_LRR_DST_CS_MMIO REG_BIT(19)
+#define MI_LRR_SRC_CS_MMIO REG_BIT(18)
+
#define MI_COPY_MEM_MEM (__MI_INSTR(0x2e) | XE_INSTR_NUM_DW(5))
#define MI_COPY_MEM_MEM_SRC_GGTT REG_BIT(22)
#define MI_COPY_MEM_MEM_DST_GGTT REG_BIT(21)
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 891f928d80ce..7ade41e2b7b3 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -193,6 +193,10 @@
#define PREEMPT_GPGPU_LEVEL_MASK PREEMPT_GPGPU_LEVEL(1, 1)
#define PREEMPT_3D_OBJECT_LEVEL REG_BIT(0)
+#define CS_GPR_DATA(base, n) XE_REG((base) + 0x600 + (n) * 4)
+#define CS_GPR_REG(base, n) CS_GPR_DATA((base), (n) * 2)
+#define CS_GPR_REG_UDW(base, n) CS_GPR_DATA((base), (n) * 2 + 1)
+
#define VDBOX_CGCTL3F08(base) XE_REG((base) + 0x3f08)
#define CG3DDISHRS_CLKGATE_DIS REG_BIT(5)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 181913967ac9..5cd5ab8529c5 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -62,7 +62,6 @@
#define LE_SSE_MASK REG_GENMASK(18, 17)
#define LE_SSE(value) REG_FIELD_PREP(LE_SSE_MASK, value)
#define LE_COS_MASK REG_GENMASK(16, 15)
-#define LE_COS(value) REG_FIELD_PREP(LE_COS_MASK)
#define LE_SCF_MASK REG_BIT(14)
#define LE_SCF(value) REG_FIELD_PREP(LE_SCF_MASK, value)
#define LE_PFM_MASK REG_GENMASK(13, 11)
@@ -393,6 +392,18 @@
#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
#define XEHP_LNESPARE REG_BIT(19)
+#define LSN_VC_REG2 XE_REG_MCR(0xb0c8)
+#define LSN_LNI_WGT_MASK REG_GENMASK(31, 28)
+#define LSN_LNI_WGT(value) REG_FIELD_PREP(LSN_LNI_WGT_MASK, value)
+#define LSN_LNE_WGT_MASK REG_GENMASK(27, 24)
+#define LSN_LNE_WGT(value) REG_FIELD_PREP(LSN_LNE_WGT_MASK, value)
+#define LSN_DIM_X_WGT_MASK REG_GENMASK(23, 20)
+#define LSN_DIM_X_WGT(value) REG_FIELD_PREP(LSN_DIM_X_WGT_MASK, value)
+#define LSN_DIM_Y_WGT_MASK REG_GENMASK(19, 16)
+#define LSN_DIM_Y_WGT(value) REG_FIELD_PREP(LSN_DIM_Y_WGT_MASK, value)
+#define LSN_DIM_Z_WGT_MASK REG_GENMASK(15, 12)
+#define LSN_DIM_Z_WGT(value) REG_FIELD_PREP(LSN_DIM_Z_WGT_MASK, value)
+
#define L3SQCREG2 XE_REG_MCR(0xb104)
#define COMPMEMRD256BOVRFETCHEN REG_BIT(20)
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
index 8846eb9ce2a4..c7d5d782e3f9 100644
--- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -21,6 +21,9 @@
#define BMG_PACKAGE_POWER_SKU XE_REG(0x138098)
#define BMG_PACKAGE_POWER_SKU_UNIT XE_REG(0x1380dc)
#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120)
+#define BMG_FAN_1_SPEED XE_REG(0x138140)
+#define BMG_FAN_2_SPEED XE_REG(0x138170)
+#define BMG_FAN_3_SPEED XE_REG(0x1381a0)
#define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0)
#define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434)
#define BMG_PACKAGE_RAPL_LIMIT XE_REG(0x138440)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 9fde67ca989f..378dcd0fb414 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Evict to system. CCS data should be copied. */
- ret = xe_bo_evict(bo, true);
+ ret = xe_bo_evict(bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -252,7 +252,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for_each_gt(__gt, xe, id)
xe_gt_sanitize(__gt);
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
/*
* Snapshotting the CTB and copying back a potentially old
* version seems risky, depending on what might have been
@@ -273,7 +273,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
goto cleanup_all;
}
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err) {
KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
goto cleanup_all;
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index cedd3e88a6fb..c53f67ce4b0a 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -65,7 +65,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* the exporter and the importer should be the same bo.
*/
swap(exported->ttm.base.dma_buf, dmabuf);
- ret = xe_bo_evict(exported, true);
+ ret = xe_bo_evict(exported);
swap(exported->ttm.base.dma_buf, dmabuf);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index d5fe0ea889ad..4a65e3103f77 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -202,8 +202,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -211,8 +210,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -222,8 +220,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
PTR_ERR(tiny));
@@ -512,7 +509,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Evict vram buffer object\n");
- ret = xe_bo_evict(vram_bo, true);
+ ret = xe_bo_evict(vram_bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return;
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 64f9c936eea0..d99d91fe8aa9 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -55,6 +55,8 @@ static struct ttm_placement sys_placement = {
.placement = &sys_placement_flags,
};
+static struct ttm_placement purge_placement;
+
static const struct ttm_place tt_placement_flags[] = {
{
.fpfn = 0,
@@ -189,11 +191,18 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
static bool force_contiguous(u32 bo_flags)
{
+ if (bo_flags & XE_BO_FLAG_STOLEN)
+ return true; /* users expect this */
+ else if (bo_flags & XE_BO_FLAG_PINNED &&
+ !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
+ return true; /* needs vmap */
+
/*
* For eviction / restore on suspend / resume objects pinned in VRAM
* must be contiguous, also only contiguous BOs support xe_bo_vmap.
*/
- return bo_flags & (XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+ return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS &&
+ bo_flags & XE_BO_FLAG_PINNED;
}
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
@@ -281,6 +290,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
static void xe_evict_flags(struct ttm_buffer_object *tbo,
struct ttm_placement *placement)
{
+ struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
struct xe_bo *bo;
if (!xe_bo_is_xe_bo(tbo)) {
@@ -290,7 +301,7 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
- *placement = sys_placement;
+ *placement = device_unplugged ? purge_placement : sys_placement;
return;
}
@@ -300,6 +311,11 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
+ if (device_unplugged && !tbo->base.dma_buf) {
+ *placement = purge_placement;
+ return;
+ }
+
/*
* For xe, sg bos that are evicted to system just triggers a
* rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -657,11 +673,20 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
ttm);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
struct sg_table *sg;
xe_assert(xe, attach);
xe_assert(xe, ttm_bo->ttm);
+ if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM &&
+ ttm_bo->sg) {
+ dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ }
+
if (new_res->mem_type == XE_PL_SYSTEM)
goto out;
@@ -898,79 +923,44 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_pm_runtime_get_noresume(xe);
}
- if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
- /*
- * Kernel memory that is pinned should only be moved on suspend
- * / resume, some of the pinned memory is required for the
- * device to resume / use the GPU to move other evicted memory
- * (user memory) around. This likely could be optimized a bit
- * further where we find the minimum set of pinned memory
- * required for resume but for simplity doing a memcpy for all
- * pinned memory.
- */
- ret = xe_bo_vmap(bo);
- if (!ret) {
- ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
-
- /* Create a new VMAP once kernel BO back in VRAM */
- if (!ret && resource_is_vram(new_mem)) {
- struct xe_vram_region *vram = res_to_mem_region(new_mem);
- void __iomem *new_addr = vram->mapping +
- (new_mem->start << PAGE_SHIFT);
+ if (move_lacks_source) {
+ u32 flags = 0;
- if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
- ret = -EINVAL;
- xe_pm_runtime_put(xe);
- goto out;
- }
+ if (mem_type_is_vram(new_mem->mem_type))
+ flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
+ else if (handle_system_ccs)
+ flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
- xe_assert(xe, new_mem->start ==
- bo->placements->fpfn);
-
- iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
- }
- }
+ fence = xe_migrate_clear(migrate, bo, new_mem, flags);
} else {
- if (move_lacks_source) {
- u32 flags = 0;
-
- if (mem_type_is_vram(new_mem->mem_type))
- flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
- else if (handle_system_ccs)
- flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
-
- fence = xe_migrate_clear(migrate, bo, new_mem, flags);
- }
- else
- fence = xe_migrate_copy(migrate, bo, bo, old_mem,
- new_mem, handle_system_ccs);
- if (IS_ERR(fence)) {
- ret = PTR_ERR(fence);
- xe_pm_runtime_put(xe);
- goto out;
- }
- if (!move_lacks_source) {
- ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
- true, new_mem);
- if (ret) {
- dma_fence_wait(fence, false);
- ttm_bo_move_null(ttm_bo, new_mem);
- ret = 0;
- }
- } else {
- /*
- * ttm_bo_move_accel_cleanup() may blow up if
- * bo->resource == NULL, so just attach the
- * fence and set the new resource.
- */
- dma_resv_add_fence(ttm_bo->base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
+ fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem,
+ handle_system_ccs);
+ }
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ xe_pm_runtime_put(xe);
+ goto out;
+ }
+ if (!move_lacks_source) {
+ ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
+ new_mem);
+ if (ret) {
+ dma_fence_wait(fence, false);
ttm_bo_move_null(ttm_bo, new_mem);
+ ret = 0;
}
-
- dma_fence_put(fence);
+ } else {
+ /*
+ * ttm_bo_move_accel_cleanup() may blow up if
+ * bo->resource == NULL, so just attach the
+ * fence and set the new resource.
+ */
+ dma_resv_add_fence(ttm_bo->base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ ttm_bo_move_null(ttm_bo, new_mem);
}
+ dma_fence_put(fence);
xe_pm_runtime_put(xe);
out:
@@ -1095,6 +1085,80 @@ out_unref:
}
/**
+ * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
+ * up in system memory.
+ * @bo: The buffer object to prepare.
+ *
+ * On successful completion, the object backup pages are allocated. Expectation
+ * is that this is called from the PM notifier, prior to suspend/hibernation.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
+{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup;
+ int ret = 0;
+
+ xe_bo_lock(bo, false);
+
+ xe_assert(xe, !bo->backup_obj);
+
+ /*
+ * Since this is called from the PM notifier we might have raced with
+ * someone unpinning this after we dropped the pinned list lock and
+ * grabbing the above bo lock.
+ */
+ if (!xe_bo_is_pinned(bo))
+ goto out_unlock_bo;
+
+ if (!xe_bo_is_vram(bo))
+ goto out_unlock_bo;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
+
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
+ }
+
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ ttm_bo_pin(&backup->ttm);
+ bo->backup_obj = backup;
+
+out_unlock_bo:
+ xe_bo_unlock(bo);
+ return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
+ * @bo: The buffer object to undo the prepare for.
+ *
+ * Always returns 0. The backup object is removed, if still present. Expectation
+ * it that this called from the PM notifier when undoing the prepare step.
+ *
+ * Return: Always returns 0.
+ */
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
+{
+ xe_bo_lock(bo, false);
+ if (bo->backup_obj) {
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
+ }
+ xe_bo_unlock(bo);
+
+ return 0;
+}
+
+/**
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
*
@@ -1107,59 +1171,99 @@ out_unref:
*/
int xe_bo_evict_pinned(struct xe_bo *bo)
{
- struct ttm_place place = {
- .mem_type = XE_PL_TT,
- };
- struct ttm_placement placement = {
- .placement = &place,
- .num_placement = 1,
- };
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .gfp_retry_mayfail = true,
- };
- struct ttm_resource *new_mem;
- int ret;
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool backup_created = false;
+ bool unmap = false;
+ int ret = 0;
- xe_bo_assert_held(bo);
+ xe_bo_lock(bo, false);
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ if (WARN_ON(!bo->ttm.resource)) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (WARN_ON(!xe_bo_is_pinned(bo))) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
if (!xe_bo_is_vram(bo))
- return 0;
+ goto out_unlock_bo;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
+
+ if (!backup) {
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
+ }
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ backup_created = true;
+ }
- ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
+
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
+
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
+
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
- if (!bo->ttm.ttm) {
- bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
- if (!bo->ttm.ttm) {
- ret = -ENOMEM;
- goto err_res_free;
+ fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
+ backup->ttm.resource, false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_backup;
}
- }
- ret = ttm_bo_populate(&bo->ttm, &ctx);
- if (ret)
- goto err_res_free;
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_backup;
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
+ bo->size);
+ }
- return 0;
+ if (!bo->backup_obj)
+ bo->backup_obj = backup;
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+out_backup:
+ xe_bo_vunmap(backup);
+ if (ret && backup_created)
+ xe_bo_put(backup);
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
@@ -1180,50 +1284,109 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
.interruptible = false,
.gfp_retry_mayfail = false,
};
- struct ttm_resource *new_mem;
- struct ttm_place *place = &bo->placements[0];
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool unmap = false;
int ret;
- xe_bo_assert_held(bo);
+ if (!backup)
+ return 0;
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ xe_bo_lock(bo, false);
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (!xe_bo_is_pinned(backup)) {
+ ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
+ if (ret)
+ goto out_unlock_bo;
+ }
- if (WARN_ON(xe_bo_is_vram(bo)))
- return -EINVAL;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
- if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
- return -EINVAL;
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
- if (!mem_type_is_vram(place->mem_type))
- return 0;
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = ttm_bo_populate(&bo->ttm, &ctx);
- if (ret)
- goto err_res_free;
+ fence = xe_migrate_copy(migrate, backup, bo,
+ backup->ttm.resource, bo->ttm.resource,
+ false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_unlock_bo;
+ }
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_unlock_bo;
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
- return 0;
+ xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
+ bo->size);
+ }
+
+ bo->backup_obj = NULL;
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+out_backup:
+ xe_bo_vunmap(backup);
+ if (!bo->backup_obj) {
+ if (xe_bo_is_pinned(backup))
+ ttm_bo_unpin(&backup->ttm);
+ xe_bo_put(backup);
+ }
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
+{
+ struct ttm_buffer_object *ttm_bo = &bo->ttm;
+ struct ttm_tt *tt = ttm_bo->ttm;
+
+ if (tt) {
+ struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm);
+
+ if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
+ dma_buf_unmap_attachment(ttm_bo->base.import_attach,
+ ttm_bo->sg,
+ DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ xe_tt->sg = NULL;
+ } else if (xe_tt->sg) {
+ dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(xe_tt->sg);
+ xe_tt->sg = NULL;
+ }
+ }
+
+ return 0;
+}
+
static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
unsigned long page_offset)
{
@@ -1371,6 +1534,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
struct xe_res_cursor cursor;
struct xe_vram_region *vram;
int bytes_left = len;
+ int err = 0;
xe_bo_assert_held(bo);
xe_device_assert_mem_access(xe);
@@ -1378,9 +1542,14 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
if (!mem_type_is_vram(ttm_bo->resource->mem_type))
return -EIO;
- /* FIXME: Use GPU for non-visible VRAM */
- if (!xe_ttm_resource_visible(ttm_bo->resource))
- return -EIO;
+ if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) {
+ struct xe_migrate *migrate =
+ mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
+
+ err = xe_migrate_access_memory(migrate, bo, offset, buf, len,
+ write);
+ goto out;
+ }
vram = res_to_mem_region(ttm_bo->resource);
xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
@@ -1404,7 +1573,8 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
xe_res_next(&cursor, PAGE_SIZE);
} while (bytes_left);
- return len;
+out:
+ return err ?: len;
}
const struct ttm_device_funcs xe_ttm_funcs = {
@@ -1448,6 +1618,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
if (bo->vm && xe_bo_is_user(bo))
xe_vm_put(bo->vm);
+ if (bo->parent_obj)
+ xe_bo_put(bo->parent_obj);
+
mutex_lock(&xe->mem_access.vram_userfault.lock);
if (!list_empty(&bo->vram_userfault_link))
list_del(&bo->vram_userfault_link);
@@ -1947,7 +2120,7 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
alignment);
if (IS_ERR(bo))
return bo;
@@ -2049,7 +2222,8 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str
struct xe_bo *bo;
u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
- dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
+ dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, !(*src)->vmap.is_iomem);
@@ -2073,10 +2247,16 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
- if (res->mem_type == XE_PL_STOLEN)
+ switch (res->mem_type) {
+ case XE_PL_STOLEN:
return xe_ttm_stolen_gpu_offset(xe);
-
- return res_to_mem_region(res)->dpa_base;
+ case XE_PL_TT:
+ case XE_PL_SYSTEM:
+ return 0;
+ default:
+ return res_to_mem_region(res)->dpa_base;
+ }
+ return 0;
}
/**
@@ -2102,12 +2282,9 @@ int xe_bo_pin_external(struct xe_bo *bo)
if (err)
return err;
- if (xe_bo_is_vram(bo)) {
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- }
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
+ spin_unlock(&xe->pinned.lock);
}
ttm_bo_pin(&bo->ttm);
@@ -2149,25 +2326,12 @@ int xe_bo_pin(struct xe_bo *bo)
if (err)
return err;
- /*
- * For pinned objects in on DGFX, which are also in vram, we expect
- * these to be in contiguous VRAM memory. Required eviction / restore
- * during suspend / resume (force restore to same physical address).
- */
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- if (mem_type_is_vram(place->mem_type)) {
- xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
-
- place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
- vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
- place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
- }
- }
-
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
+ else
+ list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
spin_unlock(&xe->pinned.lock);
}
@@ -2231,6 +2395,13 @@ void xe_bo_unpin(struct xe_bo *bo)
xe_assert(xe, !list_empty(&bo->pinned_link));
list_del_init(&bo->pinned_link);
spin_unlock(&xe->pinned.lock);
+
+ if (bo->backup_obj) {
+ if (xe_bo_is_pinned(bo->backup_obj))
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
+ }
}
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
@@ -2398,7 +2569,7 @@ static int gem_create_user_ext_set_property(struct xe_device *xe,
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -2435,7 +2606,7 @@ static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -2759,19 +2930,17 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
/**
* xe_bo_evict - Evict an object to evict placement
* @bo: The buffer object to migrate.
- * @force_alloc: Set force_alloc in ttm_operation_ctx
*
* On successful completion, the object memory will be moved to evict
* placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
+int xe_bo_evict(struct xe_bo *bo)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = force_alloc,
.gfp_retry_mayfail = true,
};
struct ttm_placement placement;
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index ec3e4446d027..02ada1fb8a23 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -39,20 +39,23 @@
#define XE_BO_FLAG_NEEDS_64K BIT(15)
#define XE_BO_FLAG_NEEDS_2M BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
-#define XE_BO_FLAG_GGTT0 BIT(18)
-#define XE_BO_FLAG_GGTT1 BIT(19)
-#define XE_BO_FLAG_GGTT2 BIT(20)
-#define XE_BO_FLAG_GGTT3 BIT(21)
-#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
- XE_BO_FLAG_GGTT1 | \
- XE_BO_FLAG_GGTT2 | \
- XE_BO_FLAG_GGTT3)
-#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(22)
+#define XE_BO_FLAG_PINNED_NORESTORE BIT(18)
+#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19)
+#define XE_BO_FLAG_GGTT0 BIT(20)
+#define XE_BO_FLAG_GGTT1 BIT(21)
+#define XE_BO_FLAG_GGTT2 BIT(22)
+#define XE_BO_FLAG_GGTT3 BIT(23)
+#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
+#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
+ XE_BO_FLAG_GGTT1 | \
+ XE_BO_FLAG_GGTT2 | \
+ XE_BO_FLAG_GGTT3)
+
#define XE_BO_FLAG_GGTTx(tile) \
(XE_BO_FLAG_GGTT0 << (tile)->id)
@@ -271,11 +274,15 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res);
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
+int xe_bo_evict(struct xe_bo *bo);
int xe_bo_evict_pinned(struct xe_bo *bo);
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo);
+
extern const struct ttm_device_funcs xe_ttm_funcs;
extern const char *const xe_mem_type_to_name[];
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 6a40eedd9db1..ed3746d32b27 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -10,28 +10,103 @@
#include "xe_ggtt.h"
#include "xe_tile.h"
+typedef int (*xe_pinned_fn)(struct xe_bo *bo);
+
+static int xe_bo_apply_to_pinned(struct xe_device *xe,
+ struct list_head *pinned_list,
+ struct list_head *new_list,
+ const xe_pinned_fn pinned_fn)
+{
+ LIST_HEAD(still_in_list);
+ struct xe_bo *bo;
+ int ret = 0;
+
+ spin_lock(&xe->pinned.lock);
+ while (!ret) {
+ bo = list_first_entry_or_null(pinned_list, typeof(*bo),
+ pinned_link);
+ if (!bo)
+ break;
+ xe_bo_get(bo);
+ list_move_tail(&bo->pinned_link, &still_in_list);
+ spin_unlock(&xe->pinned.lock);
+
+ ret = pinned_fn(bo);
+ if (ret && pinned_list != new_list) {
+ spin_lock(&xe->pinned.lock);
+ /*
+ * We might no longer be pinned, since PM notifier can
+ * call this. If the pinned link is now empty, keep it
+ * that way.
+ */
+ if (!list_empty(&bo->pinned_link))
+ list_move(&bo->pinned_link, pinned_list);
+ spin_unlock(&xe->pinned.lock);
+ }
+ xe_bo_put(bo);
+ spin_lock(&xe->pinned.lock);
+ }
+ list_splice_tail(&still_in_list, new_list);
+ spin_unlock(&xe->pinned.lock);
+
+ return ret;
+}
+
/**
- * xe_bo_evict_all - evict all BOs from VRAM
+ * xe_bo_notifier_prepare_all_pinned() - Pre-allocate the backing pages for all
+ * pinned VRAM objects which need to be saved.
+ * @xe: xe device
+ *
+ * Should be called from PM notifier when preparing for s3/s4.
*
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe)
+{
+ int ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+
+ return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_all_pinned() - Remove the backing pages for all
+ * pinned VRAM objects which have been restored.
* @xe: xe device
*
- * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
- * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
- * All eviction magic done via TTM calls.
+ * Should be called from PM notifier after exiting s3/s4 (either on success or
+ * failure).
+ */
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe)
+{
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+}
+
+/**
+ * xe_bo_evict_all_user - evict all non-pinned user BOs from VRAM
+ * @xe: xe device
*
- * Evict == move VRAM BOs to temporary (typically system) memory.
+ * Evict non-pinned user BOs (via GPU).
*
- * This function should be called before the device goes into a suspend state
- * where the VRAM loses power.
+ * Evict == move VRAM BOs to temporary (typically system) memory.
*/
-int xe_bo_evict_all(struct xe_device *xe)
+int xe_bo_evict_all_user(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
u32 mem_type;
- u8 id;
int ret;
/* User memory */
@@ -57,34 +132,38 @@ int xe_bo_evict_all(struct xe_device *xe)
}
}
- /* Pinned user memory in VRAM */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &still_in_list);
- spin_unlock(&xe->pinned.lock);
+ return 0;
+}
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
- }
+/**
+ * xe_bo_evict_all - evict all BOs from VRAM
+ * @xe: xe device
+ *
+ * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
+ * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
+ * All eviction magic done via TTM calls.
+ *
+ * Evict == move VRAM BOs to temporary (typically system) memory.
+ *
+ * This function should be called before the device goes into a suspend state
+ * where the VRAM loses power.
+ */
+int xe_bo_evict_all(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ u8 id;
+ int ret;
- spin_lock(&xe->pinned.lock);
- }
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
+ ret = xe_bo_evict_all_user(xe);
+ if (ret)
+ return ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
+
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
/*
* Wait for all user BO to be evicted as those evictions depend on the
@@ -93,32 +172,49 @@ int xe_bo_evict_all(struct xe_device *xe)
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
- spin_unlock(&xe->pinned.lock);
+ if (ret)
+ return ret;
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret)
- return ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.evicted,
+ xe_bo_evict_pinned);
+}
- spin_lock(&xe->pinned.lock);
+static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ ret = xe_bo_restore_pinned(bo);
+ if (ret)
+ return ret;
+
+ if (bo->flags & XE_BO_FLAG_GGTT) {
+ struct xe_tile *tile;
+ u8 id;
+
+ for_each_tile(tile, xe_bo_device(bo), id) {
+ if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
+ continue;
+
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo(tile->mem.ggtt, bo);
+ mutex_unlock(&tile->mem.ggtt->lock);
+ }
}
- spin_unlock(&xe->pinned.lock);
+
+ /*
+ * We expect validate to trigger a move VRAM and our move code
+ * should setup the iosys map.
+ */
+ xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) ||
+ !iosys_map_is_null(&bo->vmap));
return 0;
}
/**
- * xe_bo_restore_kernel - restore kernel BOs to VRAM
+ * xe_bo_restore_early - restore early phase kernel BOs to VRAM
*
* @xe: xe device
*
@@ -128,111 +224,130 @@ int xe_bo_evict_all(struct xe_device *xe)
* This function should be called early, before trying to init the GT, on device
* resume.
*/
-int xe_bo_restore_kernel(struct xe_device *xe)
+int xe_bo_restore_early(struct xe_device *xe)
{
- struct xe_bo *bo;
- int ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.evicted,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
+}
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.evicted,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
+/**
+ * xe_bo_restore_late - restore pinned late phase BOs
+ *
+ * @xe: xe device
+ *
+ * Move pinned user and kernel BOs which can use blitter from temporary
+ * (typically system) memory to VRAM. All moves done via TTM calls.
+ *
+ * This function should be called late, after GT init, on device resume.
+ */
+int xe_bo_restore_late(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ int ret, id;
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- if (ret) {
- xe_bo_put(bo);
- return ret;
- }
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.evicted,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
- if (bo->flags & XE_BO_FLAG_GGTT) {
- struct xe_tile *tile;
- u8 id;
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- for_each_tile(tile, xe, id) {
- if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
- continue;
+ if (ret)
+ return ret;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
- }
- }
+ if (!IS_DGFX(xe))
+ return 0;
- /*
- * We expect validate to trigger a move VRAM and our move code
- * should setup the iosys map.
- */
- xe_assert(xe, !iosys_map_is_null(&bo->vmap));
+ /* Pinned user memory in VRAM should be validated on resume */
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_restore_pinned);
- xe_bo_put(bo);
+ /* Wait for restore to complete */
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- }
- spin_unlock(&xe->pinned.lock);
+ return ret;
+}
- return 0;
+static void xe_bo_pci_dev_remove_pinned(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_dma_unmap_pinned);
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
}
/**
- * xe_bo_restore_user - restore pinned user BOs to VRAM
- *
- * @xe: xe device
+ * xe_bo_pci_dev_remove_all() - Handle bos when the pci_device is about to be removed
+ * @xe: The xe device.
*
- * Move pinned user BOs from temporary (typically system) memory to VRAM via
- * CPU. All moves done via TTM calls.
+ * On pci_device removal we need to drop all dma mappings and move
+ * the data of exported bos out to system. This includes SVM bos and
+ * exported dma-buf bos. This is done by evicting all bos, but
+ * the evict placement in xe_evict_flags() is chosen such that all
+ * bos except those mentioned are purged, and thus their memory
+ * is released.
*
- * This function should be called late, after GT init, on device resume.
+ * For pinned bos, we're unmapping dma.
*/
-int xe_bo_restore_user(struct xe_device *xe)
+void xe_bo_pci_dev_remove_all(struct xe_device *xe)
{
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
- u8 id;
- int ret;
+ unsigned int mem_type;
- if (!IS_DGFX(xe))
- return 0;
+ /*
+ * Move pagemap bos and exported dma-buf to system, and
+ * purge everything else.
+ */
+ for (mem_type = XE_PL_VRAM1; mem_type >= XE_PL_TT; --mem_type) {
+ struct ttm_resource_manager *man =
+ ttm_manager_type(&xe->ttm, mem_type);
- /* Pinned user memory in VRAM should be validated on resume */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- list_move_tail(&bo->pinned_link, &still_in_list);
- xe_bo_get(bo);
- spin_unlock(&xe->pinned.lock);
+ if (man) {
+ int ret = ttm_resource_manager_evict_all(&xe->ttm, man);
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
+ drm_WARN_ON(&xe->drm, ret);
}
-
- spin_lock(&xe->pinned.lock);
}
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- /* Wait for restore to complete */
- for_each_tile(tile, xe, id)
- xe_tile_migrate_wait(tile);
+ xe_bo_pci_dev_remove_pinned(xe);
+}
- return 0;
+static void xe_bo_pinned_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+}
+
+/**
+ * xe_bo_pinned_init() - Initialize pinned bo tracking
+ * @xe: The xe device.
+ *
+ * Initializes the lists and locks required for pinned bo
+ * tracking and registers a callback to dma-unmap
+ * any remaining pinned bos on pci device removal.
+ *
+ * Return: %0 on success, negative error code on error.
+ */
+int xe_bo_pinned_init(struct xe_device *xe)
+{
+ spin_lock_init(&xe->pinned.lock);
+ INIT_LIST_HEAD(&xe->pinned.early.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.early.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.late.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.external);
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_bo_pinned_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.h b/drivers/gpu/drm/xe/xe_bo_evict.h
index 746894798852..e8385cb7f5e9 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.h
+++ b/drivers/gpu/drm/xe/xe_bo_evict.h
@@ -9,7 +9,13 @@
struct xe_device;
int xe_bo_evict_all(struct xe_device *xe);
-int xe_bo_restore_kernel(struct xe_device *xe);
-int xe_bo_restore_user(struct xe_device *xe);
+int xe_bo_evict_all_user(struct xe_device *xe);
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe);
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe);
+int xe_bo_restore_early(struct xe_device *xe);
+int xe_bo_restore_late(struct xe_device *xe);
+void xe_bo_pci_dev_remove_all(struct xe_device *xe);
+
+int xe_bo_pinned_init(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 15a92e3d4898..eb5e83c5f233 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -28,6 +28,10 @@ struct xe_vm;
struct xe_bo {
/** @ttm: TTM base buffer object */
struct ttm_buffer_object ttm;
+ /** @backup_obj: The backup object when pinned and suspended (vram only) */
+ struct xe_bo *backup_obj;
+ /** @parent_obj: Ref to parent bo if this a backup_obj */
+ struct xe_bo *parent_obj;
/** @size: Size of this buffer object */
size_t size;
/** @flags: flags for this buffer object */
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
new file mode 100644
index 000000000000..cb9f175c89a1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/configfs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "xe_configfs.h"
+#include "xe_module.h"
+
+/**
+ * DOC: Xe Configfs
+ *
+ * Overview
+ * =========
+ *
+ * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
+ * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory
+ * The user can create devices under this directory and configure them as necessary
+ * See Documentation/filesystems/configfs.rst for more information about how configfs works.
+ *
+ * Create devices
+ * ===============
+ *
+ * In order to create a device, the user has to create a directory inside ``'xe'``::
+ *
+ * mkdir /sys/kernel/config/xe/0000:03:00.0/
+ *
+ * Every device created is populated by the driver with entries that can be
+ * used to configure it::
+ *
+ * /sys/kernel/config/xe/
+ * .. 0000:03:00.0/
+ * ... survivability_mode
+ *
+ * Configure Attributes
+ * ====================
+ *
+ * Survivability mode:
+ * -------------------
+ *
+ * Enable survivability mode on supported cards. This setting only takes
+ * effect when probing the device. Example to enable it::
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
+ *
+ * Remove devices
+ * ==============
+ *
+ * The created device directories can be removed using ``rmdir``::
+ *
+ * rmdir /sys/kernel/config/xe/0000:03:00.0/
+ */
+
+struct xe_config_device {
+ struct config_group group;
+
+ bool survivability_mode;
+
+ /* protects attributes */
+ struct mutex lock;
+};
+
+static struct xe_config_device *to_xe_config_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct xe_config_device, group);
+}
+
+static ssize_t survivability_mode_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ return sprintf(page, "%d\n", dev->survivability_mode);
+}
+
+static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ bool survivability_mode;
+ int ret;
+
+ ret = kstrtobool(page, &survivability_mode);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = survivability_mode;
+ mutex_unlock(&dev->lock);
+
+ return len;
+}
+
+CONFIGFS_ATTR(, survivability_mode);
+
+static struct configfs_attribute *xe_config_device_attrs[] = {
+ &attr_survivability_mode,
+ NULL,
+};
+
+static void xe_config_device_release(struct config_item *item)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ mutex_destroy(&dev->lock);
+ kfree(dev);
+}
+
+static struct configfs_item_operations xe_config_device_ops = {
+ .release = xe_config_device_release,
+};
+
+static const struct config_item_type xe_config_device_type = {
+ .ct_item_ops = &xe_config_device_ops,
+ .ct_attrs = xe_config_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *xe_config_make_device_group(struct config_group *group,
+ const char *name)
+{
+ unsigned int domain, bus, slot, function;
+ struct xe_config_device *dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function);
+ if (ret != 4)
+ return ERR_PTR(-EINVAL);
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev)
+ return ERR_PTR(-EINVAL);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&dev->group, name, &xe_config_device_type);
+
+ mutex_init(&dev->lock);
+
+ return &dev->group;
+}
+
+static struct configfs_group_operations xe_config_device_group_ops = {
+ .make_group = xe_config_make_device_group,
+};
+
+static const struct config_item_type xe_configfs_type = {
+ .ct_group_ops = &xe_config_device_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem xe_configfs = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "xe",
+ .ci_type = &xe_configfs_type,
+ },
+ },
+};
+
+static struct xe_config_device *configfs_find_group(struct pci_dev *pdev)
+{
+ struct config_item *item;
+ char name[64];
+
+ snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus),
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ mutex_lock(&xe_configfs.su_mutex);
+ item = config_group_find_item(&xe_configfs.su_group, name);
+ mutex_unlock(&xe_configfs.su_mutex);
+
+ if (!item)
+ return NULL;
+
+ return to_xe_config_device(item);
+}
+
+/**
+ * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and return
+ * the survivability mode attribute
+ *
+ * Return: survivability mode if config group is found, false otherwise
+ */
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+ bool mode;
+
+ if (!dev)
+ return false;
+
+ mode = dev->survivability_mode;
+ config_item_put(&dev->group.cg_item);
+
+ return mode;
+}
+
+/**
+ * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and clear survivability
+ * mode attribute
+ */
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+
+ if (!dev)
+ return;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = 0;
+ mutex_unlock(&dev->lock);
+
+ config_item_put(&dev->group.cg_item);
+}
+
+int __init xe_configfs_init(void)
+{
+ struct config_group *root = &xe_configfs.su_group;
+ int ret;
+
+ config_group_init(root);
+ mutex_init(&xe_configfs.su_mutex);
+ ret = configfs_register_subsystem(&xe_configfs);
+ if (ret) {
+ pr_err("Error %d while registering %s subsystem\n",
+ ret, root->cg_item.ci_namebuf);
+ return ret;
+ }
+
+ return 0;
+}
+
+void __exit xe_configfs_exit(void)
+{
+ configfs_unregister_subsystem(&xe_configfs);
+}
+
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
new file mode 100644
index 000000000000..d7d041ec2611
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#ifndef _XE_CONFIGFS_H_
+#define _XE_CONFIGFS_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+int xe_configfs_init(void);
+void xe_configfs_exit(void);
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
+#else
+static inline int xe_configfs_init(void) { return 0; };
+static inline void xe_configfs_exit(void) {};
+static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; };
+static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) {};
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 81b9d9bb3f57..7a8af2311318 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -80,7 +80,8 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
return &q->gt->uc.guc;
}
-static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count,
+ ssize_t start,
struct xe_devcoredump *coredump)
{
struct xe_device *xe;
@@ -94,7 +95,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
ss = &coredump->snapshot;
iter.data = buffer;
- iter.start = 0;
+ iter.start = start;
iter.remain = count;
p = drm_coredump_printer(&iter);
@@ -168,12 +169,16 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
ss->vm = NULL;
}
+#define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G)
+
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
struct xe_devcoredump_snapshot *ss;
ssize_t byte_copied;
+ u32 chunk_offset;
+ ssize_t new_chunk_position;
if (!coredump)
return -ENODEV;
@@ -183,6 +188,9 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+ xe_pm_runtime_get(gt_to_xe(ss->gt));
+
mutex_lock(&coredump->lock);
if (!ss->read.buffer) {
@@ -195,12 +203,29 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
return 0;
}
+ new_chunk_position = div_u64_rem(offset,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ &chunk_offset);
+
+ if (offset >= ss->read.chunk_position + XE_DEVCOREDUMP_CHUNK_MAX ||
+ offset < ss->read.chunk_position) {
+ ss->read.chunk_position = new_chunk_position *
+ XE_DEVCOREDUMP_CHUNK_MAX;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ ss->read.chunk_position, coredump);
+ }
+
byte_copied = count < ss->read.size - offset ? count :
ss->read.size - offset;
- memcpy(buffer, ss->read.buffer + offset, byte_copied);
+ memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied);
mutex_unlock(&coredump->lock);
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+ xe_pm_runtime_put(gt_to_xe(ss->gt));
+
return byte_copied;
}
@@ -254,17 +279,32 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
- xe_pm_runtime_put(xe);
+ ss->read.chunk_position = 0;
/* Calculate devcoredump size */
- ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
-
- ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
- if (!ss->read.buffer)
- return;
+ ss->read.size = __xe_devcoredump_read(NULL, LONG_MAX, 0, coredump);
+
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) {
+ ss->read.buffer = kvmalloc(XE_DEVCOREDUMP_CHUNK_MAX,
+ GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ 0, coredump);
+ } else {
+ ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer, ss->read.size, 0,
+ coredump);
+ xe_devcoredump_snapshot_free(ss);
+ }
- __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
- xe_devcoredump_snapshot_free(ss);
+put_pm:
+ xe_pm_runtime_put(xe);
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
@@ -425,7 +465,7 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffi
if (offset & 3)
drm_printf(p, "Offset not word aligned: %zu", offset);
- line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL);
+ line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_ATOMIC);
if (!line_buff) {
drm_printf(p, "Failed to allocate line buffer\n");
return;
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 1a1d16a96b2d..a174385a6d83 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -66,6 +66,8 @@ struct xe_devcoredump_snapshot {
struct {
/** @read.size: size of devcoredump in human readable format */
ssize_t size;
+ /** @read.chunk_position: position of devcoredump chunk */
+ ssize_t chunk_position;
/** @read.buffer: buffer of devcoredump in human readable format */
char *buffer;
} read;
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 00191227bc95..c02c4c4e9412 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -23,8 +23,10 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
+#include "xe_bo_evict.h"
#include "xe_debugfs.h"
#include "xe_devcoredump.h"
+#include "xe_device_sysfs.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_drv.h"
@@ -467,10 +469,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
- spin_lock_init(&xe->pinned.lock);
- INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
- INIT_LIST_HEAD(&xe->pinned.external_vram);
- INIT_LIST_HEAD(&xe->pinned.evicted);
+ err = xe_bo_pinned_init(xe);
+ if (err)
+ goto err;
xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
WQ_MEM_RECLAIM);
@@ -505,7 +506,15 @@ ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
static bool xe_driver_flr_disabled(struct xe_device *xe)
{
- return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS;
+ if (IS_SRIOV_VF(xe))
+ return true;
+
+ if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
+ drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n");
+ return true;
+ }
+
+ return false;
}
/*
@@ -523,7 +532,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe)
*/
static void __xe_driver_flr(struct xe_device *xe)
{
- const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
+ const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
int ret;
@@ -569,10 +578,8 @@ static void __xe_driver_flr(struct xe_device *xe)
static void xe_driver_flr(struct xe_device *xe)
{
- if (xe_driver_flr_disabled(xe)) {
- drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
+ if (xe_driver_flr_disabled(xe))
return;
- }
__xe_driver_flr(xe);
}
@@ -706,7 +713,7 @@ int xe_device_probe_early(struct xe_device *xe)
sriov_update_device_info(xe);
err = xe_pcode_probe_early(xe);
- if (err) {
+ if (err || xe_survivability_mode_is_requested(xe)) {
int save_err = err;
/*
@@ -729,6 +736,7 @@ int xe_device_probe_early(struct xe_device *xe)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
static int probe_has_flat_ccs(struct xe_device *xe)
{
@@ -908,6 +916,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err_unregister_display;
+ err = xe_device_sysfs_init(xe);
+ if (err)
+ goto err_unregister_display;
+
xe_debugfs_register(xe);
err = xe_hwmon_register(xe);
@@ -932,6 +944,8 @@ void xe_device_remove(struct xe_device *xe)
xe_display_unregister(xe);
drm_dev_unplug(&xe->drm);
+
+ xe_bo_pci_dev_remove_all(xe);
}
void xe_device_shutdown(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 7efbd4c52791..2e657692e5b5 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -3,14 +3,16 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/pci.h>
#include <linux/sysfs.h>
-#include <drm/drm_managed.h>
-
#include "xe_device.h"
#include "xe_device_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_pcode_api.h"
+#include "xe_pcode.h"
#include "xe_pm.h"
/**
@@ -63,11 +65,94 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+/**
+ * DOC: PCIe Gen5 Limitations
+ *
+ * Default link speed of discrete GPUs is determined by configuration parameters
+ * stored in their flash memory, which are subject to override through user
+ * initiated firmware updates. It has been observed that devices configured with
+ * PCIe Gen5 as their default link speed can come across link quality issues due
+ * to host or motherboard limitations and may have to auto-downgrade their link
+ * to PCIe Gen4 speed when faced with unstable link at Gen5, which makes
+ * firmware updates rather risky on such setups. It is required to ensure that
+ * the device is capable of auto-downgrading its link to PCIe Gen4 speed before
+ * pushing the firmware image with PCIe Gen5 as default configuration. This can
+ * be done by reading ``auto_link_downgrade_capable`` sysfs entry, which will
+ * denote if the device is capable of auto-downgrading its link to PCIe Gen4
+ * speed with boolean output value of ``0`` or ``1``, meaning `incapable` or
+ * `capable` respectively.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_capable
+ *
+ * Pushing the firmware image with PCIe Gen5 as default configuration on a auto
+ * link downgrade incapable device and facing link instability due to host or
+ * motherboard limitations can result in driver failing to bind to the device,
+ * making further firmware updates impossible with RMA being the only last
+ * resort.
+ *
+ * Link downgrade status of auto link downgrade capable devices is available
+ * through ``auto_link_downgrade_status`` sysfs entry with boolean output value
+ * of ``0`` or ``1``, where ``0`` means no auto-downgrading was required during
+ * link training (which is the optimal scenario) and ``1`` means the device has
+ * auto-downgraded its link to PCIe Gen4 speed due to unstable Gen5 link.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_status
+ */
+
+static ssize_t
+auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ u32 cap, val;
+
+ xe_pm_runtime_get(xe);
+ val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP);
+ xe_pm_runtime_put(xe);
+
+ cap = REG_FIELD_GET(LINK_DOWNGRADE, val);
+ return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE ? true : false);
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_capable);
+
+static ssize_t
+auto_link_downgrade_status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ /* default the auto_link_downgrade status to 0 */
+ u32 val = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_pcode_read(xe_device_get_root_tile(xe),
+ PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0),
+ &val, NULL);
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val));
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
+
+static const struct attribute *auto_link_downgrade_attrs[] = {
+ &dev_attr_auto_link_downgrade_capable.attr,
+ &dev_attr_auto_link_downgrade_status.attr,
+ NULL
+};
+
static void xe_device_sysfs_fini(void *arg)
{
struct xe_device *xe = arg;
- sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (xe->d3cold.capable)
+ sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+
+ if (xe->info.platform == XE_BATTLEMAGE)
+ sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
}
int xe_device_sysfs_init(struct xe_device *xe)
@@ -75,9 +160,17 @@ int xe_device_sysfs_init(struct xe_device *xe)
struct device *dev = xe->drm.dev;
int ret;
- ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
- if (ret)
- return ret;
+ if (xe->d3cold.capable) {
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (ret)
+ return ret;
+ }
+
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
+ if (ret)
+ return ret;
+ }
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 0482f26aa480..c8fa2c011666 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -31,7 +31,6 @@
#endif
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
-#include "soc/intel_pch.h"
#include "intel_display_core.h"
#include "intel_display_device.h"
#endif
@@ -107,6 +106,9 @@ struct xe_vram_region {
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
void __iomem *mapping;
+ /** @ttm: VRAM TTM manager */
+ struct xe_ttm_vram_mgr ttm;
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
/** @pagemap: Used to remap device memory as ZONE_DEVICE */
struct dev_pagemap pagemap;
/**
@@ -120,8 +122,7 @@ struct xe_vram_region {
* This is generated when remap device memory as ZONE_DEVICE
*/
resource_size_t hpa_base;
- /** @ttm: VRAM TTM manager */
- struct xe_ttm_vram_mgr ttm;
+#endif
};
/**
@@ -314,6 +315,8 @@ struct xe_device {
u8 has_atomic_enable_pte_bit:1;
/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
u8 has_device_atomics_on_smem:1;
+ /** @info.has_fan_control: Device supports fan control */
+ u8 has_fan_control:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
/** @info.has_heci_cscfi: device has heci cscfi */
@@ -334,6 +337,8 @@ struct xe_device {
u8 has_64bit_timestamp:1;
/** @info.is_dgfx: is discrete device */
u8 is_dgfx:1;
+ /** @info.needs_scratch: needs scratch page for oob prefetch to work */
+ u8 needs_scratch:1;
/**
* @info.probe_display: Probe display hardware. If set to
* false, the driver will behave as if there is no display
@@ -420,12 +425,22 @@ struct xe_device {
struct {
/** @pinned.lock: protected pinned BO list state */
spinlock_t lock;
- /** @pinned.kernel_bo_present: pinned kernel BO that are present */
- struct list_head kernel_bo_present;
- /** @pinned.evicted: pinned BO that have been evicted */
- struct list_head evicted;
- /** @pinned.external_vram: pinned external BO in vram*/
- struct list_head external_vram;
+ /** @pinned.early: early pinned lists */
+ struct {
+ /** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.early.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ } early;
+ /** @pinned.late: late pinned lists */
+ struct {
+ /** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.late.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ /** @pinned.external: pinned external and dma-buf. */
+ struct list_head external;
+ } late;
} pinned;
/** @ufence_wq: user fence wait queue */
@@ -508,6 +523,9 @@ struct xe_device {
struct mutex lock;
} d3cold;
+ /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
+ struct notifier_block pm_notifier;
+
/** @pmt: Support the PMT driver callback interface */
struct {
/** @pmt.lock: protect access for telemetry data */
@@ -572,7 +590,6 @@ struct xe_device {
* migrating to the right sub-structs
*/
struct intel_display display;
- enum intel_pch pch_type;
struct dram_info {
bool wm_lv_0_adjust_needed;
@@ -588,6 +605,7 @@ struct xe_device {
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index f7a20264ea33..346f857f3837 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -233,7 +233,7 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct drm_gem_object *obj = attach->importer_priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
- XE_WARN_ON(xe_bo_evict(bo, false));
+ XE_WARN_ON(xe_bo_evict(bo));
}
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index e2bb156c71fb..96732613b4b7 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -283,7 +283,7 @@ static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -313,7 +313,7 @@ static int xe_eu_stall_user_extensions(struct xe_device *xe, u64 extension,
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index b75adfc99fb7..44364c042ad7 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -176,8 +176,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (xe_exec_queue_is_parallel(q)) {
- err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
- q->width);
+ err = copy_from_user(addresses, addresses_user, sizeof(u64) *
+ q->width);
if (err) {
err = -EFAULT;
goto err_syncs;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index cd9b1c32f30f..ce78cee5dec6 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -479,7 +479,7 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -518,7 +518,7 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -618,9 +618,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
return -EINVAL;
- err = __copy_from_user(eci, user_eci,
- sizeof(struct drm_xe_engine_class_instance) *
- len);
+ err = copy_from_user(eci, user_eci,
+ sizeof(struct drm_xe_engine_class_instance) * len);
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index 4f6784e5abf8..8a5cba22b586 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -49,9 +49,6 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
fw->gt = gt;
spin_lock_init(&fw->lock);
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
if (xe->info.graphics_verx100 >= 1270) {
init_domain(fw, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
@@ -67,9 +64,6 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
{
int i, j;
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
if (!xe_gt_is_media_type(gt))
init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER,
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 5fcb2b4c2c13..7062115909f2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -365,7 +365,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
* scratch entries, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
- flags = XE_BO_FLAG_PINNED;
+ flags = 0;
if (ggtt->flags & XE_GGTT_FLAGS_64K)
flags |= XE_BO_FLAG_SYSTEM;
else
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 66198cf2662c..0e5d243c9451 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -12,8 +12,10 @@
#include <generated/xe_wa_oob.h>
+#include "instructions/xe_alu_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
#include "instructions/xe_mi_commands.h"
+#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -176,15 +178,6 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
return 0;
}
-/*
- * Convert back from encoded value to type-safe, only to be used when reg.mcr
- * is true
- */
-static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
-{
- return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
-}
-
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
@@ -194,6 +187,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
struct xe_bb *bb;
struct dma_fence *fence;
long timeout;
+ int count_rmw = 0;
int count = 0;
if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
@@ -206,30 +200,32 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
if (IS_ERR(bb))
return PTR_ERR(bb);
- xa_for_each(&sr->xa, idx, entry)
- ++count;
+ /* count RMW registers as those will be handled separately */
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ ++count;
+ else
+ ++count_rmw;
+ }
- if (count) {
+ if (count || count_rmw)
xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
+ if (count) {
+ /* emit single LRI with all non RMW regs */
+
bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
xa_for_each(&sr->xa, idx, entry) {
struct xe_reg reg = entry->reg;
- struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
u32 val;
- /*
- * Skip reading the register if it's not really needed
- */
if (reg.masked)
val = entry->clr_bits << 16;
- else if (entry->clr_bits + 1)
- val = (reg.mcr ?
- xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
- xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
- else
+ else if (entry->clr_bits == ~0)
val = 0;
+ else
+ continue;
val |= entry->set_bits;
@@ -239,6 +235,52 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
}
}
+ if (count_rmw) {
+ /* emit MI_MATH for each RMW reg */
+
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ continue;
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
+ bb->cs[bb->len++] = entry->reg.addr;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ MI_LRI_LRM_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
+ bb->cs[bb->len++] = entry->clr_bits;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
+ bb->cs[bb->len++] = entry->set_bits;
+
+ bb->cs[bb->len++] = MI_MATH(8);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1);
+ bb->cs[bb->len++] = CS_ALU_INSTR_AND;
+ bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2);
+ bb->cs[bb->len++] = CS_ALU_INSTR_OR;
+ bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+ bb->cs[bb->len++] = entry->reg.addr;
+
+ xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
+ entry->reg.addr, entry->clr_bits, entry->set_bits);
+ }
+
+ /* reset used GPR */
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
+ bb->cs[bb->len++] = 0;
+ }
+
xe_lrc_emit_hwe_state_instructions(q, bb);
job = xe_bb_create_job(q, bb);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index f7005a3643e6..119a55bb7580 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -300,20 +300,20 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+/*
+ * only for GT debugfs files which can be safely used on the VF as well:
+ * - without access to the GT privileged registers
+ * - without access to the PF specific data
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
{"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
{"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync},
{"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
- {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
- {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
{"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
- {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
- {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
{"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
{"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc},
{"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
@@ -323,6 +323,15 @@ static const struct drm_info_list debugfs_list[] = {
{"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
};
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+ {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
+ {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
+ {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
+ {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
+};
+
void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -346,10 +355,15 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
*/
root->d_inode->i_private = gt;
- drm_debugfs_create_files(debugfs_list,
- ARRAY_SIZE(debugfs_list),
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
root, minor);
+ if (!IS_SRIOV_VF(xe))
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ root, minor);
+
xe_uc_debugfs_register(&gt->uc, root);
if (IS_SRIOV_PF(xe))
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 604bdc7c8173..868a5d2c1a52 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -56,9 +56,10 @@ dev_to_xe(struct device *dev)
return gt_to_xe(kobj_to_gt(dev->kobj.parent));
}
-static ssize_t act_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t act_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -68,11 +69,12 @@ static ssize_t act_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(act_freq);
+static struct kobj_attribute attr_act_freq = __ATTR_RO(act_freq);
-static ssize_t cur_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cur_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -85,11 +87,12 @@ static ssize_t cur_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(cur_freq);
+static struct kobj_attribute attr_cur_freq = __ATTR_RO(cur_freq);
-static ssize_t rp0_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rp0_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -99,11 +102,12 @@ static ssize_t rp0_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rp0_freq);
+static struct kobj_attribute attr_rp0_freq = __ATTR_RO(rp0_freq);
-static ssize_t rpe_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpe_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -113,11 +117,12 @@ static ssize_t rpe_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rpe_freq);
+static struct kobj_attribute attr_rpe_freq = __ATTR_RO(rpe_freq);
-static ssize_t rpa_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpa_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -127,20 +132,22 @@ static ssize_t rpa_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rpa_freq);
+static struct kobj_attribute attr_rpa_freq = __ATTR_RO(rpa_freq);
-static ssize_t rpn_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpn_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpn_freq(pc));
}
-static DEVICE_ATTR_RO(rpn_freq);
+static struct kobj_attribute attr_rpn_freq = __ATTR_RO(rpn_freq);
-static ssize_t min_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t min_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -154,9 +161,10 @@ static ssize_t min_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t min_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -173,11 +181,12 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(min_freq);
+static struct kobj_attribute attr_min_freq = __ATTR_RW(min_freq);
-static ssize_t max_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -191,9 +200,10 @@ static ssize_t max_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t max_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -210,17 +220,17 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(max_freq);
+static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq);
static const struct attribute *freq_attrs[] = {
- &dev_attr_act_freq.attr,
- &dev_attr_cur_freq.attr,
- &dev_attr_rp0_freq.attr,
- &dev_attr_rpa_freq.attr,
- &dev_attr_rpe_freq.attr,
- &dev_attr_rpn_freq.attr,
- &dev_attr_min_freq.attr,
- &dev_attr_max_freq.attr,
+ &attr_act_freq.attr,
+ &attr_cur_freq.attr,
+ &attr_rp0_freq.attr,
+ &attr_rpa_freq.attr,
+ &attr_rpe_freq.attr,
+ &attr_rpn_freq.attr,
+ &attr_min_freq.attr,
+ &attr_max_freq.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index fbbace7b0b12..c11206410a4d 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -249,9 +249,10 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
-static ssize_t name_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
ssize_t ret;
@@ -262,11 +263,12 @@ static ssize_t name_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR_RO(name);
+static struct kobj_attribute name_attr = __ATTR_RO(name);
-static ssize_t idle_status_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t idle_status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
enum xe_gt_idle_state state;
@@ -277,6 +279,7 @@ static ssize_t idle_status_show(struct device *dev,
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
+static struct kobj_attribute idle_status_attr = __ATTR_RO(idle_status);
u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
{
@@ -291,10 +294,11 @@ u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
return residency;
}
-static DEVICE_ATTR_RO(idle_status);
-static ssize_t idle_residency_ms_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+
+static ssize_t idle_residency_ms_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
u64 residency;
@@ -305,12 +309,12 @@ static ssize_t idle_residency_ms_show(struct device *dev,
return sysfs_emit(buff, "%llu\n", residency);
}
-static DEVICE_ATTR_RO(idle_residency_ms);
+static struct kobj_attribute idle_residency_attr = __ATTR_RO(idle_residency_ms);
static const struct attribute *gt_idle_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_idle_status.attr,
- &dev_attr_idle_residency_ms.attr,
+ &name_attr.attr,
+ &idle_status_attr.attr,
+ &idle_residency_attr.attr,
NULL,
};
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 605aad3554e7..d4d9730f0d2c 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -345,7 +345,8 @@ fallback:
* Some older platforms don't have tables or don't have complete tables.
* Newer platforms should always have the required info.
*/
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000)
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000 &&
+ !gt_to_xe(gt)->info.force_execlist)
xe_gt_err(gt, "Slice/Subslice counts missing from hwconfig table; using typical fallback values\n");
if (gt_to_xe(gt)->info.platform == XE_PVC)
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 0c22b3a36655..10622ca471a2 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -240,7 +240,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
atomic = access_is_atomic(pf->access_type);
if (xe_vma_is_cpu_addr_mirror(vma))
- err = xe_svm_handle_pagefault(vm, vma, gt_to_tile(gt),
+ err = xe_svm_handle_pagefault(vm, vma, gt,
pf->page_addr, atomic);
else
err = handle_vma_pagefault(gt, vma, atomic);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 10be109bf357..2420a548cacc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1444,15 +1444,23 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_NEEDS_2M |
- XE_BO_FLAG_PINNED);
+ bo = xe_bo_create_locked(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
+ err = xe_bo_pin(bo);
+ xe_bo_unlock(bo);
+ if (unlikely(err)) {
+ xe_bo_put(bo);
+ return err;
+ }
+
config->lmem_obj = bo;
if (xe_device_has_lmtt(xe)) {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index b2521dd6ec42..0fe47f41b63c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -51,27 +51,18 @@ static unsigned int extract_vfid(struct dentry *d)
* /sys/kernel/debug/dri/0/
* ├── gt0
* │   ├── pf
- * │   │   ├── ggtt_available
- * │   │   ├── ggtt_provisioned
* │   │   ├── contexts_provisioned
* │   │   ├── doorbells_provisioned
* │   │   ├── runtime_registers
* │   │   ├── negotiated_versions
* │   │   ├── adverse_events
+ * ├── gt1
+ * │   ├── pf
+ * │   │   ├── ...
*/
static const struct drm_info_list pf_info[] = {
{
- "ggtt_available",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_available_ggtt,
- },
- {
- "ggtt_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_ggtt,
- },
- {
"contexts_provisioned",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_config_print_ctxs,
@@ -82,11 +73,6 @@ static const struct drm_info_list pf_info[] = {
.data = xe_gt_sriov_pf_config_print_dbs,
},
{
- "lmem_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_lmem,
- },
- {
"runtime_registers",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_service_print_runtime,
@@ -107,6 +93,42 @@ static const struct drm_info_list pf_info[] = {
* /sys/kernel/debug/dri/0/
* ├── gt0
* │   ├── pf
+ * │   │   ├── ggtt_available
+ * │   │   ├── ggtt_provisioned
+ */
+
+static const struct drm_info_list pf_ggtt_info[] = {
+ {
+ "ggtt_available",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_available_ggtt,
+ },
+ {
+ "ggtt_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_ggtt,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ * │   │   ├── lmem_provisioned
+ */
+
+static const struct drm_info_list pf_lmem_info[] = {
+ {
+ "lmem_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_lmem,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
* │   │   ├── reset_engine
* │   │   ├── sample_period
* │   │   ├── sched_if_idle
@@ -532,6 +554,16 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
pfdentry->d_inode->i_private = gt;
drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
+ if (!xe_gt_is_media_type(gt)) {
+ drm_debugfs_create_files(pf_ggtt_info,
+ ARRAY_SIZE(pf_ggtt_info),
+ pfdentry, minor);
+ if (IS_DGFX(gt_to_xe(gt)))
+ drm_debugfs_create_files(pf_lmem_info,
+ ARRAY_SIZE(pf_lmem_info),
+ pfdentry, minor);
+ }
+
pf_add_policy_attrs(gt, pfdentry);
pf_add_config_attrs(gt, pfdentry, PFID);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 4efde5f46b43..821cfcc34e6b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -112,7 +112,6 @@ static const struct xe_reg tgl_runtime_regs[] = {
XELP_GT_SLICE_ENABLE, /* _MMIO(0x9138) */
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -124,7 +123,6 @@ static const struct xe_reg ats_m_runtime_regs[] = {
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -136,7 +134,6 @@ static const struct xe_reg pvc_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xA26C) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -150,7 +147,6 @@ static const struct xe_reg ver_1270_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -167,7 +163,6 @@ static const struct xe_reg ver_2000_runtime_regs[] = {
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -185,7 +180,6 @@ static const struct xe_reg ver_3000_runtime_regs[] = {
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 6155ea354432..30f942671c2b 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -27,6 +27,7 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
}
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
+ "svm_pagefault_count",
"tlb_inval_count",
"vma_pagefault_count",
"vma_pagefault_kb",
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index d556771f99d6..be3244d7133c 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -7,6 +7,7 @@
#define _XE_GT_STATS_TYPES_H_
enum xe_gt_stats_id {
+ XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT,
XE_GT_STATS_ID_TLB_INVAL,
XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 8db78d616b6f..aa962c783cdf 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -114,115 +114,115 @@ static u32 read_reason_vr_tdc(struct xe_gt *gt)
return tdc;
}
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool status = !!read_status(gt);
return sysfs_emit(buff, "%u\n", status);
}
-static DEVICE_ATTR_RO(status);
+static struct kobj_attribute attr_status = __ATTR_RO(status);
-static ssize_t reason_pl1_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl1_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl1 = !!read_reason_pl1(gt);
return sysfs_emit(buff, "%u\n", pl1);
}
-static DEVICE_ATTR_RO(reason_pl1);
+static struct kobj_attribute attr_reason_pl1 = __ATTR_RO(reason_pl1);
-static ssize_t reason_pl2_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl2_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl2 = !!read_reason_pl2(gt);
return sysfs_emit(buff, "%u\n", pl2);
}
-static DEVICE_ATTR_RO(reason_pl2);
+static struct kobj_attribute attr_reason_pl2 = __ATTR_RO(reason_pl2);
-static ssize_t reason_pl4_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl4_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl4 = !!read_reason_pl4(gt);
return sysfs_emit(buff, "%u\n", pl4);
}
-static DEVICE_ATTR_RO(reason_pl4);
+static struct kobj_attribute attr_reason_pl4 = __ATTR_RO(reason_pl4);
-static ssize_t reason_thermal_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_thermal_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermal = !!read_reason_thermal(gt);
return sysfs_emit(buff, "%u\n", thermal);
}
-static DEVICE_ATTR_RO(reason_thermal);
+static struct kobj_attribute attr_reason_thermal = __ATTR_RO(reason_thermal);
-static ssize_t reason_prochot_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_prochot_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool prochot = !!read_reason_prochot(gt);
return sysfs_emit(buff, "%u\n", prochot);
}
-static DEVICE_ATTR_RO(reason_prochot);
+static struct kobj_attribute attr_reason_prochot = __ATTR_RO(reason_prochot);
-static ssize_t reason_ratl_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_ratl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool ratl = !!read_reason_ratl(gt);
return sysfs_emit(buff, "%u\n", ratl);
}
-static DEVICE_ATTR_RO(reason_ratl);
+static struct kobj_attribute attr_reason_ratl = __ATTR_RO(reason_ratl);
-static ssize_t reason_vr_thermalert_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_thermalert_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermalert = !!read_reason_vr_thermalert(gt);
return sysfs_emit(buff, "%u\n", thermalert);
}
-static DEVICE_ATTR_RO(reason_vr_thermalert);
+static struct kobj_attribute attr_reason_vr_thermalert = __ATTR_RO(reason_vr_thermalert);
-static ssize_t reason_vr_tdc_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_tdc_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool tdc = !!read_reason_vr_tdc(gt);
return sysfs_emit(buff, "%u\n", tdc);
}
-static DEVICE_ATTR_RO(reason_vr_tdc);
+static struct kobj_attribute attr_reason_vr_tdc = __ATTR_RO(reason_vr_tdc);
static struct attribute *throttle_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_reason_pl1.attr,
- &dev_attr_reason_pl2.attr,
- &dev_attr_reason_pl4.attr,
- &dev_attr_reason_thermal.attr,
- &dev_attr_reason_prochot.attr,
- &dev_attr_reason_ratl.attr,
- &dev_attr_reason_vr_thermalert.attr,
- &dev_attr_reason_vr_tdc.attr,
+ &attr_status.attr,
+ &attr_reason_pl1.attr,
+ &attr_reason_pl2.attr,
+ &attr_reason_pl4.attr,
+ &attr_reason_thermal.attr,
+ &attr_reason_prochot.attr,
+ &attr_reason_ratl.attr,
+ &attr_reason_vr_thermalert.attr,
+ &attr_reason_vr_tdc.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index bc5714a5b36b..bac5471a1a78 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -483,7 +483,8 @@ static int guc_g2g_alloc(struct xe_guc *guc)
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_ALL |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1393,6 +1394,7 @@ proto:
/* Use data from the GuC response as our return value */
return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
}
+ALLOW_ERROR_INJECTION(xe_guc_mmio_send_recv, ERRNO);
int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
{
@@ -1508,30 +1510,32 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
xe_uc_fw_print(&guc->fw, p);
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
- return;
+ if (!IS_SRIOV_VF(gt_to_xe(gt))) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
+
+ status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ REG_FIELD_GET(GS_BOOTROM_MASK, status));
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ REG_FIELD_GET(GS_UKERNEL_MASK, status));
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ REG_FIELD_GET(GS_MIA_MASK, status));
+ drm_printf(p, "\tLog level = %d\n",
+ xe_guc_log_get_level(&guc->log));
+
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
+ }
- status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
-
- drm_printf(p, "\nGuC status 0x%08x:\n", status);
- drm_printf(p, "\tBootrom status = 0x%x\n",
- REG_FIELD_GET(GS_BOOTROM_MASK, status));
- drm_printf(p, "\tuKernel status = 0x%x\n",
- REG_FIELD_GET(GS_UKERNEL_MASK, status));
- drm_printf(p, "\tMIA Core status = 0x%x\n",
- REG_FIELD_GET(GS_MIA_MASK, status));
- drm_printf(p, "\tLog level = %d\n",
- xe_guc_log_get_level(&guc->log));
-
- drm_puts(p, "\nScratch registers:\n");
- for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
- drm_printf(p, "\t%2d: \t0x%x\n",
- i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
drm_puts(p, "\n");
xe_guc_ct_print(&guc->ct, p, false);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 7031542a70ce..44c1fa2fe7c8 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -376,6 +376,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
&offset, &remain);
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
+ guc_waklv_enable_simple(ads,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH,
+ &offset, &remain);
+
size = guc_ads_waklv_size(ads) - remain;
if (!size)
return;
@@ -414,7 +419,8 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -710,8 +716,8 @@ static int guc_capture_prep_lists(struct xe_guc_ads *ads)
}
if (ads->capture_size != PAGE_ALIGN(total_size))
- xe_gt_dbg(gt, "ADS capture alloc size changed from %d to %d\n",
- ads->capture_size, PAGE_ALIGN(total_size));
+ xe_gt_dbg(gt, "Updated ADS capture size %d (was %d)\n",
+ PAGE_ALIGN(total_size), ads->capture_size);
return PAGE_ALIGN(total_size);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index 9095618648bc..859a3ba91be5 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -105,49 +105,49 @@ struct __guc_capture_parsed_output {
* 3. Incorrect order will trigger XE_WARN.
*/
#define COMMON_XELP_BASE_GLOBAL \
- { FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"}
+ { FORCEWAKE_GT, REG_32BIT, 0, 0, 0, "FORCEWAKE_GT"}
#define COMMON_BASE_ENGINE_INSTANCE \
- { RING_HWSTAM(0), REG_32BIT, 0, 0, "HWSTAM"}, \
- { RING_HWS_PGA(0), REG_32BIT, 0, 0, "RING_HWS_PGA"}, \
- { RING_HEAD(0), REG_32BIT, 0, 0, "RING_HEAD"}, \
- { RING_TAIL(0), REG_32BIT, 0, 0, "RING_TAIL"}, \
- { RING_CTL(0), REG_32BIT, 0, 0, "RING_CTL"}, \
- { RING_MI_MODE(0), REG_32BIT, 0, 0, "RING_MI_MODE"}, \
- { RING_MODE(0), REG_32BIT, 0, 0, "RING_MODE"}, \
- { RING_ESR(0), REG_32BIT, 0, 0, "RING_ESR"}, \
- { RING_EMR(0), REG_32BIT, 0, 0, "RING_EMR"}, \
- { RING_EIR(0), REG_32BIT, 0, 0, "RING_EIR"}, \
- { RING_IMR(0), REG_32BIT, 0, 0, "RING_IMR"}, \
- { RING_IPEHR(0), REG_32BIT, 0, 0, "IPEHR"}, \
- { RING_INSTDONE(0), REG_32BIT, 0, 0, "RING_INSTDONE"}, \
- { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, "INDIRECT_RING_STATE"}, \
- { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, "ACTHD"}, \
- { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_BBADDR"}, \
- { RING_START(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_START"}, \
- { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_DMA_FADD"}, \
- { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_STATUS"}, \
- { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
+ { RING_HWSTAM(0), REG_32BIT, 0, 0, 0, "HWSTAM"}, \
+ { RING_HWS_PGA(0), REG_32BIT, 0, 0, 0, "RING_HWS_PGA"}, \
+ { RING_HEAD(0), REG_32BIT, 0, 0, 0, "RING_HEAD"}, \
+ { RING_TAIL(0), REG_32BIT, 0, 0, 0, "RING_TAIL"}, \
+ { RING_CTL(0), REG_32BIT, 0, 0, 0, "RING_CTL"}, \
+ { RING_MI_MODE(0), REG_32BIT, 0, 0, 0, "RING_MI_MODE"}, \
+ { RING_MODE(0), REG_32BIT, 0, 0, 0, "RING_MODE"}, \
+ { RING_ESR(0), REG_32BIT, 0, 0, 0, "RING_ESR"}, \
+ { RING_EMR(0), REG_32BIT, 0, 0, 0, "RING_EMR"}, \
+ { RING_EIR(0), REG_32BIT, 0, 0, 0, "RING_EIR"}, \
+ { RING_IMR(0), REG_32BIT, 0, 0, 0, "RING_IMR"}, \
+ { RING_IPEHR(0), REG_32BIT, 0, 0, 0, "IPEHR"}, \
+ { RING_INSTDONE(0), REG_32BIT, 0, 0, 0, "RING_INSTDONE"}, \
+ { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, 0, "INDIRECT_RING_STATE"}, \
+ { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "ACTHD"}, \
+ { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_BBADDR"}, \
+ { RING_START(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_START"}, \
+ { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_DMA_FADD"}, \
+ { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_STATUS"}, \
+ { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
#define COMMON_XELP_RC_CLASS \
- { RCU_MODE, REG_32BIT, 0, 0, "RCU_MODE"}
+ { RCU_MODE, REG_32BIT, 0, 0, 0, "RCU_MODE"}
#define COMMON_XELP_RC_CLASS_INSTDONE \
- { SC_INSTDONE, REG_32BIT, 0, 0, "SC_INSTDONE"}, \
- { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA"}, \
- { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA2"}
+ { SC_INSTDONE, REG_32BIT, 0, 0, 0, "SC_INSTDONE"}, \
+ { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA"}, \
+ { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA2"}
#define XELP_VEC_CLASS_REGS \
- { SFC_DONE(0), 0, 0, 0, "SFC_DONE[0]"}, \
- { SFC_DONE(1), 0, 0, 0, "SFC_DONE[1]"}, \
- { SFC_DONE(2), 0, 0, 0, "SFC_DONE[2]"}, \
- { SFC_DONE(3), 0, 0, 0, "SFC_DONE[3]"}
+ { SFC_DONE(0), 0, 0, 0, 0, "SFC_DONE[0]"}, \
+ { SFC_DONE(1), 0, 0, 0, 0, "SFC_DONE[1]"}, \
+ { SFC_DONE(2), 0, 0, 0, 0, "SFC_DONE[2]"}, \
+ { SFC_DONE(3), 0, 0, 0, 0, "SFC_DONE[3]"}
/* XE_LP Global */
static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
@@ -352,7 +352,7 @@ static const struct __ext_steer_reg xehpg_extregs[] = {
static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
const struct __ext_steer_reg *extlist,
- int slice_id, int subslice_id)
+ u32 dss_id, u16 slice_id, u16 subslice_id)
{
if (!ext || !extlist)
return;
@@ -361,6 +361,7 @@ static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->dss_id = dss_id;
ext->regname = extlist->name;
}
@@ -397,7 +398,7 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
u16 slice, subslice;
- int iter, i, total = 0;
+ int dss, i, total = 0;
const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists;
const struct __guc_mmio_reg_descr_group *list;
struct __guc_mmio_reg_descr_group *extlists;
@@ -454,15 +455,15 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
/* For steering registers, the list is generated at run-time */
extarray = (struct __guc_mmio_reg_descr *)extlists[0].list;
- for_each_dss_steering(iter, gt, slice, subslice) {
+ for_each_dss_steering(dss, gt, slice, subslice) {
for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
- __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ __fill_ext_reg(extarray, &xe_extregs[i], dss, slice, subslice);
++extarray;
}
if (has_xehpg_extregs)
for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
- __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
+ __fill_ext_reg(extarray, &xehpg_extregs[i], dss, slice, subslice);
++extarray;
}
}
@@ -1672,18 +1673,16 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
{
struct xe_gt *gt = snapshot->hwe->gt;
struct xe_device *xe = gt_to_xe(gt);
- struct xe_guc *guc = &gt->uc.guc;
struct xe_devcoredump *devcoredump = &xe->devcoredump;
struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot;
struct gcap_reg_list_info *reginfo = NULL;
u32 i, last_value = 0;
- bool is_ext, low32_ready = false;
+ bool low32_ready = false;
if (!list || !list->list || list->num_regs == 0)
return;
XE_WARN_ON(!devcore_snapshot->matched_node);
- is_ext = list == guc->capture->extlists;
reginfo = &devcore_snapshot->matched_node->reginfo[type];
/*
@@ -1749,17 +1748,12 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
*/
XE_WARN_ON(low32_ready);
- if (is_ext) {
- int dss, group, instance;
-
- group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags);
- instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags);
- dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance);
-
- drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value);
- } else {
+ if (FIELD_GET(GUC_REGSET_STEERING_NEEDED, reg_desc->flags))
+ drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname,
+ reg_desc->dss_id, value);
+ else
drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
- }
+
break;
}
}
diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h
index ca2d390ccbee..6cb439115597 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h
@@ -39,6 +39,8 @@ struct __guc_mmio_reg_descr {
u32 flags;
/** @mask: The mask to apply */
u32 mask;
+ /** @dss_id: Cached index for steered registers */
+ u32 dss_id;
/** @regname: Name of the register */
const char *regname;
};
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 72ad576fc18e..2447de0ebedf 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -238,7 +238,8 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1088,6 +1089,7 @@ int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
return guc_ct_send_recv(ct, action, len, response_buffer, false);
}
+ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 *response_buffer)
@@ -1828,10 +1830,10 @@ static void ct_dead_print(struct xe_dead_ct *dead)
return;
}
- drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason);
/* Can't generate a genuine core dump at this point, so just do the good bits */
drm_puts(&lp, "**** Xe Device Coredump ****\n");
+ drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
xe_device_snapshot_print(xe, &lp);
drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
index c569ff456e74..0b102ab46c4d 100644
--- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -17,101 +17,130 @@
#include "xe_macros.h"
#include "xe_pm.h"
-static struct xe_guc *node_to_guc(struct drm_info_node *node)
-{
- return node->info_ent->data;
-}
-
-static int guc_info(struct seq_file *m, void *data)
+/*
+ * guc_debugfs_show - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_guc specific in similar way how we handle &xe_gt
+ * specific files using &xe_gt_debugfs_simple_show.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which grandparent struct dentry d_inode->i_private points to &xe_gt.
+ *
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0 # dent->d_parent->d_parent (d_inode->i_private == gt)
+ * │   ├── uc # dent->d_parent
+ * │   │   ├── guc_info # dent
+ * │   │   ├── guc_...
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_guc *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int foo(struct xe_guc *guc, struct drm_printer *p)
+ * {
+ * drm_printf(p, "enabled %d\n", guc->submission_state.enabled);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list bar[] = {
+ * { name = "foo", .show = guc_debugfs_show, .data = foo },
+ * };
+ *
+ * parent = debugfs_create_dir("uc", gtdir);
+ * drm_debugfs_create_files(bar, ARRAY_SIZE(bar), parent, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int guc_debugfs_show(struct seq_file *m, void *data)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct dentry *grandparent = parent->d_parent;
+ struct xe_gt *gt = grandparent->d_inode->i_private;
+ struct xe_device *xe = gt_to_xe(gt);
+ int (*print)(struct xe_guc *, struct drm_printer *) = node->info_ent->data;
+ int ret;
xe_pm_runtime_get(xe);
- xe_guc_print_info(guc, &p);
+ ret = print(&gt->uc.guc, &p);
xe_pm_runtime_put(xe);
- return 0;
+ return ret;
}
-static int guc_log(struct seq_file *m, void *data)
+static int guc_log(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_log_print(&guc->log, &p);
- xe_pm_runtime_put(xe);
-
+ xe_guc_log_print(&guc->log, p);
return 0;
}
-static int guc_log_dmesg(struct seq_file *m, void *data)
+static int guc_log_dmesg(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
-
- xe_pm_runtime_get(xe);
xe_guc_log_print_dmesg(&guc->log);
- xe_pm_runtime_put(xe);
-
return 0;
}
-static int guc_ctb(struct seq_file *m, void *data)
+static int guc_ctb(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_ct_print(&guc->ct, &p, true);
- xe_pm_runtime_put(xe);
-
+ xe_guc_ct_print(&guc->ct, p, true);
return 0;
}
-static int guc_pc(struct seq_file *m, void *data)
+static int guc_pc(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_pc_print(&guc->pc, &p);
- xe_pm_runtime_put(xe);
-
+ xe_guc_pc_print(&guc->pc, p);
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"guc_info", guc_info, 0},
- {"guc_log", guc_log, 0},
- {"guc_log_dmesg", guc_log_dmesg, 0},
- {"guc_ctb", guc_ctb, 0},
- {"guc_pc", guc_pc, 0},
+/*
+ * only for GuC debugfs files which can be safely used on the VF as well:
+ * - without access to the GuC privileged registers
+ * - without access to the PF specific GuC objects
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
+ { "guc_info", .show = guc_debugfs_show, .data = xe_guc_print_info },
+ { "guc_ctb", .show = guc_debugfs_show, .data = guc_ctb },
+};
+
+/* For GuC debugfs files that require the SLPC support */
+static const struct drm_info_list slpc_debugfs_list[] = {
+ { "guc_pc", .show = guc_debugfs_show, .data = guc_pc },
+};
+
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ { "guc_log", .show = guc_debugfs_show, .data = guc_log },
+ { "guc_log_dmesg", .show = guc_debugfs_show, .data = guc_log_dmesg },
};
void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent)
{
- struct drm_minor *minor = guc_to_xe(guc)->drm.primary;
- struct drm_info_list *local;
- int i;
-
-#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
- local = drmm_kmalloc(&guc_to_xe(guc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
- if (!local)
- return;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_minor *minor = xe->drm.primary;
- memcpy(local, debugfs_list, DEBUGFS_SIZE);
-#undef DEBUGFS_SIZE
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
+ parent, minor);
- for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
- local[i].data = guc;
+ if (!IS_SRIOV_VF(xe)) {
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ parent, minor);
- drm_debugfs_create_files(local,
- ARRAY_SIZE(debugfs_list),
- parent, minor);
+ if (!xe->info.skip_guc_pc)
+ drm_debugfs_create_files(slpc_debugfs_list,
+ ARRAY_SIZE(slpc_debugfs_list),
+ parent, minor);
+ }
}
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 2a457dcf31d5..0fb48f8f05d8 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -17,36 +17,61 @@
#include "xe_hw_engine.h"
#include "xe_map.h"
#include "xe_mmio.h"
+#include "xe_sriov_pf_helpers.h"
#include "xe_trace_guc.h"
#define TOTAL_QUANTA 0x8000
-static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ struct engine_activity_buffer *buffer;
u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
size_t offset;
- offset = offsetof(struct guc_engine_activity_data,
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_data) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ offset += offsetof(struct guc_engine_activity_data,
engine_activity[guc_class][hwe->logical_instance]);
return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
}
-static struct iosys_map engine_metadata_map(struct xe_guc *guc)
+static struct iosys_map engine_metadata_map(struct xe_guc *guc,
+ unsigned int index)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ struct engine_activity_buffer *buffer;
+ size_t offset;
- return buffer->metadata_bo->vmap;
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_metadata) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ return IOSYS_MAP_INIT_OFFSET(&buffer->metadata_bo->vmap, offset);
}
static int allocate_engine_activity_group(struct xe_guc *guc)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
struct xe_device *xe = guc_to_xe(guc);
- u32 num_activity_group = 1; /* Will be modified for VF */
+ u32 num_activity_group;
+
+ /*
+ * An additional activity group is allocated for PF
+ */
+ num_activity_group = IS_SRIOV_PF(xe) ? xe_sriov_pf_get_totalvfs(xe) + 1 : 1;
engine_activity->eag = drmm_kcalloc(&xe->drm, num_activity_group,
sizeof(struct engine_activity_group), GFP_KERNEL);
@@ -60,10 +85,11 @@ static int allocate_engine_activity_group(struct xe_guc *guc)
}
static int allocate_engine_activity_buffers(struct xe_guc *guc,
- struct engine_activity_buffer *buffer)
+ struct engine_activity_buffer *buffer,
+ int count)
{
- u32 metadata_size = sizeof(struct guc_engine_activity_metadata);
- u32 size = sizeof(struct guc_engine_activity_data);
+ u32 metadata_size = sizeof(struct guc_engine_activity_metadata) * count;
+ u32 size = sizeof(struct guc_engine_activity_data) * count;
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo, *metadata_bo;
@@ -118,10 +144,11 @@ static bool is_engine_activity_supported(struct xe_guc *guc)
return true;
}
-static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe)
+static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe,
+ unsigned int index)
{
struct xe_guc *guc = &hwe->gt->uc.guc;
- struct engine_activity_group *eag = &guc->engine_activity.eag[0];
+ struct engine_activity_group *eag = &guc->engine_activity.eag[index];
u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
return &eag->engine[guc_class][hwe->logical_instance];
@@ -138,9 +165,10 @@ static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
#define read_metadata_record(xe_, map_, field_) \
xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
-static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
{
- struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
struct guc_engine_activity *cached_activity = &ea->activity;
struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
@@ -151,8 +179,8 @@ static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
u64 active_ticks, gpm_ts;
u16 change_num;
- activity_map = engine_activity_map(guc, hwe);
- metadata_map = engine_metadata_map(guc);
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
/* GuC has not initialized activity data yet, return 0 */
@@ -194,9 +222,9 @@ update:
return ea->total + ea->active;
}
-static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, unsigned int index)
{
- struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
struct guc_engine_activity *cached_activity = &ea->activity;
struct iosys_map activity_map, metadata_map;
@@ -205,8 +233,8 @@ static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
u64 numerator;
u16 quanta_ratio;
- activity_map = engine_activity_map(guc, hwe);
- metadata_map = engine_metadata_map(guc);
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
if (!cached_metadata->guc_tsc_frequency_hz)
cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
@@ -245,12 +273,39 @@ static int enable_engine_activity_stats(struct xe_guc *guc)
return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
}
-static void engine_activity_set_cpu_ts(struct xe_guc *guc)
+static int enable_function_engine_activity_stats(struct xe_guc *guc, bool enable)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_group *eag = &engine_activity->eag[0];
+ u32 metadata_ggtt_addr = 0, ggtt_addr = 0, num_functions = 0;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ u32 action[6];
+ int len = 0;
+
+ if (enable) {
+ metadata_ggtt_addr = xe_bo_ggtt_addr(buffer->metadata_bo);
+ ggtt_addr = xe_bo_ggtt_addr(buffer->activity_bo);
+ num_functions = engine_activity->num_functions;
+ }
+
+ action[len++] = XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER;
+ action[len++] = num_functions;
+ action[len++] = metadata_ggtt_addr;
+ action[len++] = 0;
+ action[len++] = ggtt_addr;
+ action[len++] = 0;
+
+ /* Blocking here to ensure the buffers are ready before reading them */
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static void engine_activity_set_cpu_ts(struct xe_guc *guc, unsigned int index)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_group *eag = &engine_activity->eag[index];
int i, j;
+ xe_gt_assert(guc_to_gt(guc), index < engine_activity->num_activity_group);
+
for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
eag->engine[i][j].last_cpu_ts = ktime_get();
@@ -265,34 +320,107 @@ static u32 gpm_timestamp_shift(struct xe_gt *gt)
return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
}
+static bool is_function_valid(struct xe_guc *guc, unsigned int fn_id)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+
+ if (!IS_SRIOV_PF(xe) && fn_id)
+ return false;
+
+ if (engine_activity->num_functions && fn_id >= engine_activity->num_functions)
+ return false;
+
+ return true;
+}
+
+static int engine_activity_disable_function_stats(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret;
+
+ if (!engine_activity->num_functions)
+ return 0;
+
+ ret = enable_function_engine_activity_stats(guc, false);
+ if (ret)
+ return ret;
+
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+
+ return 0;
+}
+
+static int engine_activity_enable_function_stats(struct xe_guc *guc, int num_vfs)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret, i;
+
+ if (!num_vfs)
+ return 0;
+
+ /* This includes 1 PF and num_vfs */
+ engine_activity->num_functions = num_vfs + 1;
+
+ ret = allocate_engine_activity_buffers(guc, buffer, engine_activity->num_functions);
+ if (ret)
+ return ret;
+
+ ret = enable_function_engine_activity_stats(guc, true);
+ if (ret) {
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+ return ret;
+ }
+
+ /* skip PF as it was already setup */
+ for (i = 1; i < engine_activity->num_functions; i++)
+ engine_activity_set_cpu_ts(guc, i);
+
+ return 0;
+}
+
/**
* xe_guc_engine_activity_active_ticks - Get engine active ticks
* @guc: The GuC object
* @hwe: The hw_engine object
+ * @fn_id: function id to report on
*
* Return: accumulated ticks @hwe was active since engine activity stats were enabled.
*/
-u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
{
if (!xe_guc_engine_activity_supported(guc))
return 0;
- return get_engine_active_ticks(guc, hwe);
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_active_ticks(guc, hwe, fn_id);
}
/**
* xe_guc_engine_activity_total_ticks - Get engine total ticks
* @guc: The GuC object
* @hwe: The hw_engine object
+ * @fn_id: function id to report on
*
* Return: accumulated quanta of ticks allocated for the engine
*/
-u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
{
if (!xe_guc_engine_activity_supported(guc))
return 0;
- return get_engine_total_ticks(guc, hwe);
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_total_ticks(guc, hwe, fn_id);
}
/**
@@ -311,6 +439,25 @@ bool xe_guc_engine_activity_supported(struct xe_guc *guc)
}
/**
+ * xe_guc_engine_activity_function_stats - Enable/Disable per-function engine activity stats
+ * @guc: The GuC object
+ * @num_vfs: number of vfs
+ * @enable: true to enable, false otherwise
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ if (enable)
+ return engine_activity_enable_function_stats(guc, num_vfs);
+
+ return engine_activity_disable_function_stats(guc);
+}
+
+/**
* xe_guc_engine_activity_enable_stats - Enable engine activity stats
* @guc: The GuC object
*
@@ -327,7 +474,7 @@ void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
if (ret)
xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
else
- engine_activity_set_cpu_ts(guc);
+ engine_activity_set_cpu_ts(guc, 0);
}
static void engine_activity_fini(void *arg)
@@ -360,7 +507,7 @@ int xe_guc_engine_activity_init(struct xe_guc *guc)
return ret;
}
- ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer);
+ ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer, 1);
if (ret) {
xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
return ret;
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.h b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
index a042d4cb404c..b32926c2d208 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.h
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
@@ -14,6 +14,9 @@ struct xe_guc;
int xe_guc_engine_activity_init(struct xe_guc *guc);
bool xe_guc_engine_activity_supported(struct xe_guc *guc);
void xe_guc_engine_activity_enable_stats(struct xe_guc *guc);
-u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
-u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable);
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
index 5cdd034b6b70..48f69ddefa36 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
@@ -79,14 +79,24 @@ struct xe_guc_engine_activity {
/** @num_activity_group: number of activity groups */
u32 num_activity_group;
+ /** @num_functions: number of functions */
+ u32 num_functions;
+
/** @supported: indicates support for engine activity stats */
bool supported;
- /** @eag: holds the device level engine activity data */
+ /**
+ * @eag: holds the device level engine activity data in native mode.
+ * In SRIOV mode, points to an array with entries which holds the engine
+ * activity data for PF and VF's
+ */
struct engine_activity_group *eag;
/** @device_buffer: buffer object for global engine activity */
struct engine_activity_buffer device_buffer;
+
+ /** @function_buffer: buffer object for per-function engine activity */
+ struct engine_activity_buffer function_buffer;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 80514a446ba2..38039c411387 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -260,7 +260,8 @@ int xe_guc_log_init(struct xe_guc_log *log)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 43b1192ba61c..18c623992035 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -462,6 +462,21 @@ static u32 get_cur_freq(struct xe_gt *gt)
}
/**
+ * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
+ * @pc: The GuC PC
+ *
+ * Returns: the requested frequency for that GT instance
+ */
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ return get_cur_freq(gt);
+}
+
+/**
* xe_guc_pc_get_cur_freq - Get Current requested frequency
* @pc: The GuC PC
* @freq: A pointer to a u32 where the freq value will be returned
@@ -1170,7 +1185,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index 39102b79602f..0a2664d5c811 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -22,6 +22,7 @@ void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 769781d577df..2ad38f6b103e 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -300,6 +300,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
primelockdep(guc);
+ guc->submission_state.initialized = true;
+
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
@@ -834,6 +836,13 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
+ /*
+ * If device is being wedged even before submission_state is
+ * initialized, there's nothing to do here.
+ */
+ if (!guc->submission_state.initialized)
+ return;
+
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc);
if (err) {
@@ -1170,9 +1179,12 @@ trigger_reset:
process_name = q->vm->xef->process_name;
pid = q->vm->xef->pid;
}
- xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
- xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
- q->guc->id, q->flags, process_name, pid);
+
+ if (!exec_queue_killed(q))
+ xe_gt_notice(guc_to_gt(guc),
+ "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags, process_name, pid);
trace_xe_sched_job_timedout(job);
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 63bac64429a5..1fde7614fcc5 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -89,6 +89,11 @@ struct xe_guc {
struct mutex lock;
/** @submission_state.enabled: submission is enabled */
bool enabled;
+ /**
+ * @submission_state.initialized: mark when submission state is
+ * even initialized - before that not even the lock is valid
+ */
+ bool initialized;
/** @submission_state.fini_wq: submit fini wait queue */
wait_queue_head_t fini_wq;
} submission_state;
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index a440442b4d72..640950172088 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -605,6 +605,7 @@ err_object:
kobject_put(kobj);
return err;
}
+ALLOW_ERROR_INJECTION(xe_add_hw_engine_class_defaults, ERRNO); /* See xe_pci_probe() */
static void hw_engine_class_sysfs_fini(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 48d80ffdf7bb..eb293aec36a0 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -5,6 +5,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/units.h>
@@ -27,6 +28,7 @@ enum xe_hwmon_reg {
REG_PKG_POWER_SKU_UNIT,
REG_GT_PERF_STATUS,
REG_PKG_ENERGY_STATUS,
+ REG_FAN_SPEED,
};
enum xe_hwmon_reg_operation {
@@ -42,6 +44,13 @@ enum xe_hwmon_channel {
CHANNEL_MAX,
};
+enum xe_fan_channel {
+ FAN_1,
+ FAN_2,
+ FAN_3,
+ FAN_MAX,
+};
+
/*
* SF_* - scale factors for particular quantities according to hwmon spec.
*/
@@ -62,6 +71,16 @@ struct xe_hwmon_energy_info {
};
/**
+ * struct xe_hwmon_fan_info - to cache previous fan reading
+ */
+struct xe_hwmon_fan_info {
+ /** @reg_val_prev: previous fan reg val */
+ u32 reg_val_prev;
+ /** @time_prev: previous timestamp */
+ u64 time_prev;
+};
+
+/**
* struct xe_hwmon - xe hwmon data structure
*/
struct xe_hwmon {
@@ -79,6 +98,8 @@ struct xe_hwmon {
int scl_shift_time;
/** @ei: Energy info for energyN_input */
struct xe_hwmon_energy_info ei[CHANNEL_MAX];
+ /** @fi: Fan info for fanN_input */
+ struct xe_hwmon_fan_info fi[FAN_MAX];
};
static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
@@ -144,6 +165,14 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return PCU_CR_PACKAGE_ENERGY_STATUS;
}
break;
+ case REG_FAN_SPEED:
+ if (channel == FAN_1)
+ return BMG_FAN_1_SPEED;
+ else if (channel == FAN_2)
+ return BMG_FAN_2_SPEED;
+ else if (channel == FAN_3)
+ return BMG_FAN_3_SPEED;
+ break;
default:
drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
break;
@@ -454,6 +483,7 @@ static const struct hwmon_channel_info * const hwmon_info[] = {
HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT),
NULL
};
@@ -480,6 +510,19 @@ static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
(uval & POWER_SETUP_I1_DATA_MASK));
}
+static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
+ /* Platforms that don't return correct value */
+ if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) {
+ *uval = 2;
+ return 0;
+ }
+
+ return xe_pcode_read(root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), uval, NULL);
+}
+
static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
long *value, u32 scale_factor)
{
@@ -706,6 +749,75 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
}
static umode_t
+xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
+{
+ u32 uval;
+
+ if (!hwmon->xe->info.has_fan_control)
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, &uval))
+ return 0;
+
+ return channel < uval ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
+ struct xe_hwmon_fan_info *fi = &hwmon->fi[channel];
+ u64 rotations, time_now, time;
+ u32 reg_val;
+ int ret = 0;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_FAN_SPEED, channel));
+ time_now = get_jiffies_64();
+
+ /*
+ * HW register value is accumulated count of pulses from PWM fan with the scale
+ * of 2 pulses per rotation.
+ */
+ rotations = (reg_val - fi->reg_val_prev) / 2;
+
+ time = jiffies_delta_to_msecs(time_now - fi->time_prev);
+ if (unlikely(!time)) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ /*
+ * Calculate fan speed in RPM by time averaging two subsequent readings in minutes.
+ * RPM = number of rotations * msecs per minute / time in msecs
+ */
+ *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
+
+ fi->reg_val_prev = reg_val;
+ fi->time_prev = time_now;
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static int
+xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ return xe_hwmon_fan_input_read(hwmon, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -730,6 +842,9 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
case hwmon_energy:
ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_is_visible(hwmon, attr, channel);
+ break;
default:
ret = 0;
break;
@@ -765,6 +880,9 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case hwmon_energy:
ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_read(hwmon, attr, channel, val);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -842,7 +960,7 @@ static void
xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- long energy;
+ long energy, fan_speed;
u64 val_sku_unit = 0;
int channel;
struct xe_reg pkg_power_sku_unit;
@@ -866,6 +984,11 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
for (channel = 0; channel < CHANNEL_MAX; channel++)
if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
xe_hwmon_energy_get(hwmon, channel, &energy);
+
+ /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */
+ for (channel = 0; channel < FAN_MAX; channel++)
+ if (xe_hwmon_is_visible(hwmon, hwmon_fan, hwmon_fan_input, channel))
+ xe_hwmon_fan_input_read(hwmon, channel, &fan_speed);
}
static void xe_hwmon_mutex_destroy(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 89393dcb53d9..63db66df064b 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_NEEDS_64K);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 03bfba696b37..61a2e87990a9 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -38,6 +38,7 @@
#define LRC_ENGINE_CLASS GENMASK_ULL(63, 61)
#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
+#define LRC_PPHWSP_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
static struct xe_device *
@@ -51,19 +52,22 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
struct xe_device *xe = gt_to_xe(gt);
size_t size;
+ /* Per-process HW status page (PPHWSP) */
+ size = LRC_PPHWSP_SIZE;
+
+ /* Engine context image */
switch (class) {
case XE_ENGINE_CLASS_RENDER:
if (GRAPHICS_VER(xe) >= 20)
- size = 4 * SZ_4K;
+ size += 3 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
case XE_ENGINE_CLASS_COMPUTE:
- /* 14 pages since graphics_ver == 11 */
if (GRAPHICS_VER(xe) >= 20)
- size = 3 * SZ_4K;
+ size += 2 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
default:
WARN(1, "Unknown engine class: %d", class);
@@ -72,7 +76,7 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
case XE_ENGINE_CLASS_VIDEO_DECODE:
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
case XE_ENGINE_CLASS_OTHER:
- size = 2 * SZ_4K;
+ size += 1 * SZ_4K;
}
/* Add indirect ring state page */
@@ -652,7 +656,6 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096
-#define LRC_PPHWSP_SIZE SZ_4K
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
@@ -997,6 +1000,8 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE;
+ if (vm && vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
/*
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
@@ -1566,6 +1571,7 @@ static int dump_gfxpipe_command(struct drm_printer *p,
MATCH3D(3DSTATE_CLIP_MESH);
MATCH3D(3DSTATE_SBE_MESH);
MATCH3D(3DSTATE_CPSIZE_CONTROL_BUFFER);
+ MATCH3D(3DSTATE_COARSE_PIXEL);
MATCH3D(3DSTATE_DRAWING_RECTANGLE);
MATCH3D(3DSTATE_CHROMA_KEY);
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 404fa2a456d5..49c45ec3e83c 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -86,7 +86,7 @@ static const char *guc_name(struct xe_guc *guc)
* This object needs to be 4KiB aligned.
*
* - _`Interrupt Source Report Page`: this is the equivalent of the
- * GEN11_GT_INTR_DWx registers, with each bit in those registers being
+ * GT_INTR_DWx registers, with each bit in those registers being
* mapped to a byte here. The offsets are the same, just bytes instead
* of bits. This object needs to be cacheline aligned.
*
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 5a3e89022c38..8f8e9fdfb2a8 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -97,7 +97,7 @@ struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
return tile->migrate->q;
}
-static void xe_migrate_fini(struct drm_device *dev, void *arg)
+static void xe_migrate_fini(void *arg)
{
struct xe_migrate *m = arg;
@@ -209,7 +209,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -401,7 +400,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
struct xe_vm *vm;
int err;
- m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
+ m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL);
if (!m)
return ERR_PTR(-ENOMEM);
@@ -455,7 +454,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
might_lock(&m->job_mutex);
fs_reclaim_release(GFP_KERNEL);
- err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
+ err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
if (err)
return ERR_PTR(err);
@@ -670,6 +669,7 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u32 mocs = 0;
u32 tile_y = 0;
+ xe_gt_assert(gt, !(pitch & 3));
xe_gt_assert(gt, size / pitch <= S16_MAX);
xe_gt_assert(gt, pitch / 4 <= S16_MAX);
xe_gt_assert(gt, pitch <= U16_MAX);
@@ -779,10 +779,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool dst_is_pltt = dst->mem_type == XE_PL_TT;
bool src_is_vram = mem_type_is_vram(src->mem_type);
bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+ bool type_device = src_bo->ttm.type == ttm_bo_type_device;
+ bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
- bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/* Copying CCS between two different BOs is not supported yet. */
@@ -839,6 +841,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
avail_pts, avail_pts);
if (copy_system_ccs) {
+ xe_assert(xe, type_device);
ccs_size = xe_device_ccs_bytes(xe, src_L0);
batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
&ccs_ofs, &ccs_pt, 0,
@@ -849,7 +852,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
/* Add copy commands size here */
batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
- ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
+ ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb)) {
@@ -878,7 +881,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!copy_only_ccs)
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
- if (xe_migrate_needs_ccs_emit(xe))
+ if (needs_ccs_emit)
flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
IS_DGFX(xe) ? src_is_vram : src_is_pltt,
dst_L0_ofs,
@@ -1601,55 +1604,63 @@ enum xe_migrate_copy_dir {
XE_MIGRATE_COPY_TO_SRAM,
};
+#define XE_CACHELINE_BYTES 64ull
+#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
+
static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
- unsigned long npages,
+ unsigned long len,
+ unsigned long sram_offset,
dma_addr_t *sram_addr, u64 vram_addr,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
+ bool use_usm_batch = xe->info.has_usm;
struct dma_fence *fence = NULL;
u32 batch_size = 2;
u64 src_L0_ofs, dst_L0_ofs;
- u64 round_update_size;
struct xe_sched_job *job;
struct xe_bb *bb;
u32 update_idx, pt_slot = 0;
+ unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
+ unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
+ PAGE_SIZE : 4;
int err;
- if (npages * PAGE_SIZE > MAX_PREEMPTDISABLE_TRANSFER)
- return ERR_PTR(-EINVAL);
+ if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
+ (sram_offset | vram_addr) & XE_CACHELINE_MASK))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
- round_update_size = npages * PAGE_SIZE;
- batch_size += pte_update_cmd_size(round_update_size);
+ batch_size += pte_update_cmd_size(len);
batch_size += EMIT_COPY_DW;
- bb = xe_bb_new(gt, batch_size, true);
+ bb = xe_bb_new(gt, batch_size, use_usm_batch);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
return ERR_PTR(err);
}
build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
- sram_addr, round_update_size);
+ sram_addr, len + sram_offset);
if (dir == XE_MIGRATE_COPY_TO_VRAM) {
- src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
} else {
src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
- dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, round_update_size,
- XE_PAGE_SIZE);
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
job = xe_bb_create_migration_job(m->q, bb,
- xe_migrate_batch_base(m, true),
+ xe_migrate_batch_base(m, use_usm_batch),
update_idx);
if (IS_ERR(job)) {
err = PTR_ERR(job);
@@ -1694,7 +1705,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
dma_addr_t *src_addr,
u64 dst_addr)
{
- return xe_migrate_vram(m, npages, src_addr, dst_addr,
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
XE_MIGRATE_COPY_TO_VRAM);
}
@@ -1715,10 +1726,193 @@ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
u64 src_addr,
dma_addr_t *dst_addr)
{
- return xe_migrate_vram(m, npages, dst_addr, src_addr,
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
XE_MIGRATE_COPY_TO_SRAM);
}
+static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
+ int len, int write)
+{
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i])
+ break;
+
+ dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+ kfree(dma_addr);
+}
+
+static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
+ void *buf, int len, int write)
+{
+ dma_addr_t *dma_addr;
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
+ if (!dma_addr)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < npages; ++i) {
+ dma_addr_t addr;
+ struct page *page;
+
+ if (is_vmalloc_addr(buf))
+ page = vmalloc_to_page(buf);
+ else
+ page = virt_to_page(buf);
+
+ addr = dma_map_page(xe->drm.dev,
+ page, 0, PAGE_SIZE,
+ write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(xe->drm.dev, addr))
+ goto err_fault;
+
+ dma_addr[i] = addr;
+ buf += PAGE_SIZE;
+ }
+
+ return dma_addr;
+
+err_fault:
+ xe_migrate_dma_unmap(xe, dma_addr, len, write);
+ return ERR_PTR(-EFAULT);
+}
+
+/**
+ * xe_migrate_access_memory - Access memory of a BO via GPU
+ *
+ * @m: The migration context.
+ * @bo: buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Access memory of a BO via GPU either reading in or writing from a passed in
+ * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
+ * read to or write from pointer.
+ *
+ * Returns:
+ * 0 if successful, negative error code on failure.
+ */
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct xe_tile *tile = m->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_res_cursor cursor;
+ struct dma_fence *fence = NULL;
+ dma_addr_t *dma_addr;
+ unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
+ int bytes_left = len, current_page = 0;
+ void *orig_buf = buf;
+
+ xe_bo_assert_held(bo);
+
+ /* Use bounce buffer for small access and unaligned access */
+ if (len & XE_CACHELINE_MASK ||
+ ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) {
+ int buf_offset = 0;
+
+ /*
+ * Less than ideal for large unaligned access but this should be
+ * fairly rare, can fixup if this becomes common.
+ */
+ do {
+ u8 bounce[XE_CACHELINE_BYTES];
+ void *ptr = (void *)bounce;
+ int err;
+ int copy_bytes = min_t(int, bytes_left,
+ XE_CACHELINE_BYTES -
+ (offset & XE_CACHELINE_MASK));
+ int ptr_offset = offset & XE_CACHELINE_MASK;
+
+ err = xe_migrate_access_memory(m, bo,
+ offset &
+ ~XE_CACHELINE_MASK,
+ (void *)ptr,
+ sizeof(bounce), 0);
+ if (err)
+ return err;
+
+ if (write) {
+ memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
+
+ err = xe_migrate_access_memory(m, bo,
+ offset & ~XE_CACHELINE_MASK,
+ (void *)ptr,
+ sizeof(bounce), 0);
+ if (err)
+ return err;
+ } else {
+ memcpy(buf + buf_offset, ptr + ptr_offset,
+ copy_bytes);
+ }
+
+ bytes_left -= copy_bytes;
+ buf_offset += copy_bytes;
+ offset += copy_bytes;
+ } while (bytes_left);
+
+ return 0;
+ }
+
+ dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
+ if (IS_ERR(dma_addr))
+ return PTR_ERR(dma_addr);
+
+ xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor);
+
+ do {
+ struct dma_fence *__fence;
+ u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
+ cursor.start;
+ int current_bytes;
+
+ if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
+ current_bytes = min_t(int, bytes_left,
+ MAX_PREEMPTDISABLE_TRANSFER);
+ else
+ current_bytes = min_t(int, bytes_left, cursor.size);
+
+ if (fence)
+ dma_fence_put(fence);
+
+ __fence = xe_migrate_vram(m, current_bytes,
+ (unsigned long)buf & ~PAGE_MASK,
+ dma_addr + current_page,
+ vram_addr, write ?
+ XE_MIGRATE_COPY_TO_VRAM :
+ XE_MIGRATE_COPY_TO_SRAM);
+ if (IS_ERR(__fence)) {
+ if (fence)
+ dma_fence_wait(fence, false);
+ fence = __fence;
+ goto out_err;
+ }
+ fence = __fence;
+
+ buf += current_bytes;
+ offset += current_bytes;
+ current_page = (int)(buf - orig_buf) / PAGE_SIZE;
+ bytes_left -= current_bytes;
+ if (bytes_left)
+ xe_res_next(&cursor, current_bytes);
+ } while (bytes_left);
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+out_err:
+ xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
+ return IS_ERR(fence) ? PTR_ERR(fence) : 0;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 6ff9a963425c..fb9839c1bae0 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -112,6 +112,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write);
+
#define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0)
#define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1)
#define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 46301f341773..7357458bc0d2 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -138,6 +138,7 @@ int xe_mmio_probe_early(struct xe_device *xe)
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
}
+ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */
/**
* xe_mmio_init() - Initialize an MMIO instance
@@ -204,8 +205,9 @@ void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
- if (!reg.vf && mmio->sriov_vf_gt)
- xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg, val);
else
writel(val, mmio->regs + addr);
}
@@ -218,8 +220,9 @@ u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
/* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
- if (!reg.vf && mmio->sriov_vf_gt)
- val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg);
else
val = readl(mmio->regs + addr);
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index e861c694f336..e4742e27e2cd 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -11,6 +11,7 @@
#include <drm/drm_module.h>
#include "xe_drv.h"
+#include "xe_configfs.h"
#include "xe_hw_fence.h"
#include "xe_pci.h"
#include "xe_pm.h"
@@ -35,8 +36,8 @@ MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
-module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600);
-MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
+module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600);
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size");
module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
@@ -86,6 +87,10 @@ static const struct init_funcs init_funcs[] = {
.init = xe_check_nomodeset,
},
{
+ .init = xe_configfs_init,
+ .exit = xe_configfs_exit,
+ },
+ {
.init = xe_hw_fence_module_init,
.exit = xe_hw_fence_module_exit,
},
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 7ffc98f67e69..fb842fa0552e 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1301,7 +1301,7 @@ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_fr
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -1338,7 +1338,7 @@ static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from fro
if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -2221,6 +2221,7 @@ addr_err:
kfree(oa_regs);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_oa_alloc_regs, ERRNO);
static ssize_t show_dynamic_id(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -2280,7 +2281,7 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi
return -EACCES;
}
- err = __copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
+ err = copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index f4d108dc49b1..024175cfe61e 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -62,11 +62,13 @@ struct xe_device_desc {
u8 is_dgfx:1;
u8 has_display:1;
+ u8 has_fan_control:1;
u8 has_heci_gscfi:1;
u8 has_heci_cscfi:1;
u8 has_llc:1;
u8 has_pxp:1;
u8 has_sriov:1;
+ u8 needs_scratch:1;
u8 skip_guc_pc:1;
u8 skip_mtcfg:1;
u8 skip_pcode:1;
@@ -303,6 +305,7 @@ static const struct xe_device_desc dg2_desc = {
DG2_FEATURES,
.has_display = true,
+ .has_fan_control = true,
};
static const __maybe_unused struct xe_device_desc pvc_desc = {
@@ -330,6 +333,7 @@ static const struct xe_device_desc lnl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_pxp = true,
+ .needs_scratch = true,
};
static const struct xe_device_desc bmg_desc = {
@@ -337,7 +341,9 @@ static const struct xe_device_desc bmg_desc = {
PLATFORM(BATTLEMAGE),
.dma_mask_size = 46,
.has_display = true,
+ .has_fan_control = true,
.has_heci_cscfi = 1,
+ .needs_scratch = true,
};
static const struct xe_device_desc ptl_desc = {
@@ -346,6 +352,7 @@ static const struct xe_device_desc ptl_desc = {
.has_display = true,
.has_sriov = true,
.require_force_probe = true,
+ .needs_scratch = true,
};
#undef PLATFORM
@@ -576,6 +583,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.dma_mask_size = desc->dma_mask_size;
xe->info.is_dgfx = desc->is_dgfx;
+ xe->info.has_fan_control = desc->has_fan_control;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
@@ -584,6 +592,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
+ xe->info.needs_scratch = desc->needs_scratch;
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
@@ -735,7 +744,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
return;
xe_device_remove(xe);
- xe_pm_runtime_fini(xe);
+ xe_pm_fini(xe);
}
/*
@@ -805,18 +814,17 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
err = xe_device_probe_early(xe);
- if (err) {
- /*
- * In Boot Survivability mode, no drm card is exposed and driver
- * is loaded with bare minimum to allow for firmware to be
- * flashed through mei. If early probe failed, but it managed to
- * enable survivability mode, return success.
- */
- if (xe_survivability_mode_is_enabled(xe))
- return 0;
+ /*
+ * In Boot Survivability mode, no drm card is exposed and driver
+ * is loaded with bare minimum to allow for firmware to be
+ * flashed through mei. Return success, if survivability mode
+ * is enabled due to pcode failure or configfs being set
+ */
+ if (xe_survivability_mode_is_enabled(xe))
+ return 0;
+ if (err)
return err;
- }
err = xe_info_init(xe, desc);
if (err)
@@ -922,6 +930,7 @@ static int xe_pci_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index 09ee8a06fe2e..8813efdcafbb 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -7,6 +7,8 @@
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_engine_activity.h"
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
@@ -111,6 +113,20 @@ static void pf_link_vfs(struct xe_device *xe, int num_vfs)
}
}
+static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_guc_engine_activity_function_stats(&gt->uc.guc, num_vfs, enable);
+ if (ret)
+ xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
+ }
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -145,6 +161,9 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
num_vfs, total_vfs, str_plural(total_vfs));
+
+ pf_engine_activity_stats(xe, num_vfs, true);
+
return num_vfs;
failed:
@@ -168,6 +187,8 @@ static int pf_disable_vfs(struct xe_device *xe)
if (!num_vfs)
return 0;
+ pf_engine_activity_stats(xe, num_vfs, false);
+
pci_disable_sriov(pdev);
pf_reset_vfs(xe, num_vfs);
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 9333ce776a6e..cf955b3ed52c 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/error-injection.h>
#include <drm/drm_managed.h>
@@ -323,3 +324,4 @@ int xe_pcode_probe_early(struct xe_device *xe)
{
return xe_pcode_ready(xe, false);
}
+ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index 2bae9afdbd35..127d4d26c4cf 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -34,6 +34,7 @@
#define DGFX_PCODE_STATUS 0x7E
#define DGFX_GET_INIT_STATUS 0x0
#define DGFX_INIT_STATUS_COMPLETE 0x1
+#define DGFX_LINK_DOWNGRADE_STATUS REG_BIT(31)
#define PCODE_POWER_SETUP 0x7C
#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
@@ -49,6 +50,9 @@
/* Domain IDs (param2) */
#define PCODE_MBOX_DOMAIN_HBM 0x2
+#define FAN_SPEED_CONTROL 0x7D
+#define FSC_READ_NUM_FANS 0x4
+
#define PCODE_SCRATCH(x) XE_REG(0x138320 + ((x) * 4))
/* PCODE_SCRATCH0 */
#define AUXINFO_REG_OFFSET REG_GENMASK(17, 15)
@@ -63,6 +67,10 @@
/* Auxiliary info bits */
#define AUXINFO_HISTORY_OFFSET REG_GENMASK(31, 29)
+#define BMG_PCIE_CAP XE_REG(0x138340)
+#define LINK_DOWNGRADE REG_GENMASK(1, 0)
+#define DOWNGRADE_CAPABLE 2
+
struct pcode_err_decode {
int errno;
const char *str;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 7b6b754ad6eb..ff749edc005b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -16,7 +16,6 @@
#include "xe_bo.h"
#include "xe_bo_evict.h"
#include "xe_device.h"
-#include "xe_device_sysfs.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_guc.h"
@@ -188,7 +187,7 @@ int xe_pm_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory required for the
* GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto err;
@@ -199,7 +198,7 @@ int xe_pm_resume(struct xe_device *xe)
xe_display_pm_resume(xe);
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto err;
@@ -273,6 +272,7 @@ int xe_pm_init_early(struct xe_device *xe)
if (err)
return err;
+ xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
return 0;
}
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
@@ -286,6 +286,42 @@ static u32 vram_threshold_value(struct xe_device *xe)
return DEFAULT_VRAM_THRESHOLD;
}
+static int xe_pm_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
+ int err = 0;
+
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ xe_pm_runtime_get(xe);
+ err = xe_bo_evict_all_user(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ err = xe_bo_notifier_prepare_all_pinned(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ xe_bo_notifier_unprepare_all_pinned(xe);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ if (err)
+ return NOTIFY_BAD;
+
+ return NOTIFY_DONE;
+}
+
/**
* xe_pm_init - Initialize Xe Power Management
* @xe: xe device instance
@@ -299,33 +335,31 @@ int xe_pm_init(struct xe_device *xe)
u32 vram_threshold;
int err;
+ xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
+ err = register_pm_notifier(&xe->pm_notifier);
+ if (err)
+ return err;
+
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
return 0;
- xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
-
if (xe->d3cold.capable) {
- err = xe_device_sysfs_init(xe);
- if (err)
- return err;
-
vram_threshold = vram_threshold_value(xe);
err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
- return err;
+ goto err_unregister;
}
xe_pm_runtime_init(xe);
-
return 0;
+
+err_unregister:
+ unregister_pm_notifier(&xe->pm_notifier);
+ return err;
}
-/**
- * xe_pm_runtime_fini - Finalize Runtime PM
- * @xe: xe device instance
- */
-void xe_pm_runtime_fini(struct xe_device *xe)
+static void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -333,6 +367,18 @@ void xe_pm_runtime_fini(struct xe_device *xe)
pm_runtime_forbid(dev);
}
+/**
+ * xe_pm_fini - Finalize PM
+ * @xe: xe device instance
+ */
+void xe_pm_fini(struct xe_device *xe)
+{
+ if (xe_device_uc_enabled(xe))
+ xe_pm_runtime_fini(xe);
+
+ unregister_pm_notifier(&xe->pm_notifier);
+}
+
static void xe_pm_write_callback_task(struct xe_device *xe,
struct task_struct *task)
{
@@ -484,7 +530,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory
* required for the GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto out;
}
@@ -497,7 +543,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_display_pm_runtime_resume(xe);
if (xe->d3cold.allowed) {
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto out;
}
@@ -641,7 +687,7 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
return dev->power.runtime_status == RPM_SUSPENDING ||
dev->power.runtime_status == RPM_RESUMING ||
- pm_suspend_target_state != PM_SUSPEND_ON;
+ pm_suspend_in_progress();
#else
return false;
#endif
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 998d1ed64556..59678b310e55 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -17,7 +17,7 @@ int xe_pm_resume(struct xe_device *xe);
int xe_pm_init_early(struct xe_device *xe);
int xe_pm_init(struct xe_device *xe);
-void xe_pm_runtime_fini(struct xe_device *xe);
+void xe_pm_fini(struct xe_device *xe);
bool xe_pm_runtime_suspended(struct xe_device *xe);
int xe_pm_runtime_suspend(struct xe_device *xe);
int xe_pm_runtime_resume(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
index 4f62a6e515d6..69df0e3520a5 100644
--- a/drivers/gpu/drm/xe/xe_pmu.c
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -10,9 +10,11 @@
#include "xe_force_wake.h"
#include "xe_gt_idle.h"
#include "xe_guc_engine_activity.h"
+#include "xe_guc_pc.h"
#include "xe_hw_engine.h"
#include "xe_pm.h"
#include "xe_pmu.h"
+#include "xe_sriov_pf_helpers.h"
/**
* DOC: Xe PMU (Performance Monitoring Unit)
@@ -32,9 +34,10 @@
* gt[60:63] Selects gt for the event
* engine_class[20:27] Selects engine-class for event
* engine_instance[12:19] Selects the engine-instance for the event
+ * function[44:59] Selects the function of the event (SRIOV enabled)
*
* For engine specific events (engine-*), gt, engine_class and engine_instance parameters must be
- * set as populated by DRM_XE_DEVICE_QUERY_ENGINES.
+ * set as populated by DRM_XE_DEVICE_QUERY_ENGINES and function if SRIOV is enabled.
*
* For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0.
*
@@ -49,6 +52,7 @@
*/
#define XE_PMU_EVENT_GT_MASK GENMASK_ULL(63, 60)
+#define XE_PMU_EVENT_FUNCTION_MASK GENMASK_ULL(59, 44)
#define XE_PMU_EVENT_ENGINE_CLASS_MASK GENMASK_ULL(27, 20)
#define XE_PMU_EVENT_ENGINE_INSTANCE_MASK GENMASK_ULL(19, 12)
#define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
@@ -58,6 +62,11 @@ static unsigned int config_to_event_id(u64 config)
return FIELD_GET(XE_PMU_EVENT_ID_MASK, config);
}
+static unsigned int config_to_function_id(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_FUNCTION_MASK, config);
+}
+
static unsigned int config_to_engine_class(u64 config)
{
return FIELD_GET(XE_PMU_EVENT_ENGINE_CLASS_MASK, config);
@@ -76,6 +85,8 @@ static unsigned int config_to_gt_id(u64 config)
#define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
#define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02
#define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03
+#define XE_PMU_EVENT_GT_ACTUAL_FREQUENCY 0x04
+#define XE_PMU_EVENT_GT_REQUESTED_FREQUENCY 0x05
static struct xe_gt *event_to_gt(struct perf_event *event)
{
@@ -111,6 +122,14 @@ static bool is_engine_event(u64 config)
event_id == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
}
+static bool is_gt_frequency_event(struct perf_event *event)
+{
+ u32 id = config_to_event_id(event->attr.config);
+
+ return id == XE_PMU_EVENT_GT_ACTUAL_FREQUENCY ||
+ id == XE_PMU_EVENT_GT_REQUESTED_FREQUENCY;
+}
+
static bool event_gt_forcewake(struct perf_event *event)
{
struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
@@ -118,7 +137,7 @@ static bool event_gt_forcewake(struct perf_event *event)
struct xe_gt *gt;
unsigned int *fw_ref;
- if (!is_engine_event(config))
+ if (!is_engine_event(config) && !is_gt_frequency_event(event))
return true;
gt = xe_device_get_gt(xe, config_to_gt_id(config));
@@ -151,7 +170,7 @@ static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
static bool event_param_valid(struct perf_event *event)
{
struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
- unsigned int engine_class, engine_instance;
+ unsigned int engine_class, engine_instance, function_id;
u64 config = event->attr.config;
struct xe_gt *gt;
@@ -161,16 +180,28 @@ static bool event_param_valid(struct perf_event *event)
engine_class = config_to_engine_class(config);
engine_instance = config_to_engine_instance(config);
+ function_id = config_to_function_id(config);
switch (config_to_event_id(config)) {
case XE_PMU_EVENT_GT_C6_RESIDENCY:
- if (engine_class || engine_instance)
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ if (engine_class || engine_instance || function_id)
return false;
break;
case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
if (!event_to_hwe(event))
return false;
+
+ /* PF(0) and total vfs when SRIOV is enabled */
+ if (IS_SRIOV_PF(xe)) {
+ if (function_id > xe_sriov_pf_get_totalvfs(xe))
+ return false;
+ } else if (function_id) {
+ return false;
+ }
+
break;
}
@@ -242,13 +273,17 @@ static int xe_pmu_event_init(struct perf_event *event)
static u64 read_engine_events(struct xe_gt *gt, struct perf_event *event)
{
struct xe_hw_engine *hwe;
- u64 val = 0;
+ unsigned int function_id;
+ u64 config, val = 0;
+
+ config = event->attr.config;
+ function_id = config_to_function_id(config);
hwe = event_to_hwe(event);
- if (config_to_event_id(event->attr.config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
- val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe);
+ if (config_to_event_id(config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
+ val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe, function_id);
else
- val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe);
+ val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe, function_id);
return val;
}
@@ -266,6 +301,10 @@ static u64 __xe_pmu_event_read(struct perf_event *event)
case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
return read_engine_events(gt, event);
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ return xe_guc_pc_get_act_freq(&gt->uc.guc.pc);
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ return xe_guc_pc_get_cur_freq_fw(&gt->uc.guc.pc);
}
return 0;
@@ -281,7 +320,14 @@ static void xe_pmu_event_update(struct perf_event *event)
new = __xe_pmu_event_read(event);
} while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new));
- local64_add(new - prev, &event->count);
+ /*
+ * GT frequency is not a monotonically increasing counter, so add the
+ * instantaneous value instead.
+ */
+ if (is_gt_frequency_event(event))
+ local64_add(new, &event->count);
+ else
+ local64_add(new - prev, &event->count);
}
static void xe_pmu_event_read(struct perf_event *event)
@@ -351,6 +397,7 @@ static void xe_pmu_event_del(struct perf_event *event, int flags)
}
PMU_FORMAT_ATTR(gt, "config:60-63");
+PMU_FORMAT_ATTR(function, "config:44-59");
PMU_FORMAT_ATTR(engine_class, "config:20-27");
PMU_FORMAT_ATTR(engine_instance, "config:12-19");
PMU_FORMAT_ATTR(event, "config:0-11");
@@ -359,6 +406,7 @@ static struct attribute *pmu_format_attrs[] = {
&format_attr_event.attr,
&format_attr_engine_class.attr,
&format_attr_engine_instance.attr,
+ &format_attr_function.attr,
&format_attr_gt.attr,
NULL,
};
@@ -419,6 +467,10 @@ static ssize_t event_attr_show(struct device *dev,
XE_EVENT_ATTR_SIMPLE(gt-c6-residency, gt_c6_residency, XE_PMU_EVENT_GT_C6_RESIDENCY, "ms");
XE_EVENT_ATTR_NOUNIT(engine-active-ticks, engine_active_ticks, XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
XE_EVENT_ATTR_NOUNIT(engine-total-ticks, engine_total_ticks, XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
+XE_EVENT_ATTR_SIMPLE(gt-actual-frequency, gt_actual_frequency,
+ XE_PMU_EVENT_GT_ACTUAL_FREQUENCY, "MHz");
+XE_EVENT_ATTR_SIMPLE(gt-requested-frequency, gt_requested_frequency,
+ XE_PMU_EVENT_GT_REQUESTED_FREQUENCY, "MHz");
static struct attribute *pmu_empty_event_attrs[] = {
/* Empty - all events are added as groups with .attr_update() */
@@ -434,6 +486,8 @@ static const struct attribute_group *pmu_events_attr_update[] = {
&pmu_group_gt_c6_residency,
&pmu_group_engine_active_ticks,
&pmu_group_engine_total_ticks,
+ &pmu_group_gt_actual_frequency,
+ &pmu_group_gt_requested_frequency,
NULL,
};
@@ -442,8 +496,11 @@ static void set_supported_events(struct xe_pmu *pmu)
struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
struct xe_gt *gt = xe_device_get_gt(xe, 0);
- if (!xe->info.skip_guc_pc)
+ if (!xe->info.skip_guc_pc) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_ACTUAL_FREQUENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_REQUESTED_FREQUENCY);
+ }
if (xe_guc_engine_activity_supported(&gt->uc.guc)) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 856038553b81..b04756a97cdc 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -103,6 +103,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
{
struct xe_pt *pt;
struct xe_bo *bo;
+ u32 bo_flags;
int err;
if (level) {
@@ -115,14 +116,16 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
if (!pt)
return ERR_PTR(-ENOMEM);
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
+ XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
+ if (vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
- XE_BO_FLAG_PINNED |
- XE_BO_FLAG_NO_RESV_EVICT |
- XE_BO_FLAG_PAGETABLE);
+ bo_flags);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
@@ -269,8 +272,11 @@ struct xe_pt_update {
bool preexisting;
};
+/**
+ * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk.
+ */
struct xe_pt_stage_bind_walk {
- /** base: The base class. */
+ /** @base: The base class. */
struct xe_pt_walk base;
/* Input parameters for the walk */
@@ -278,15 +284,19 @@ struct xe_pt_stage_bind_walk {
struct xe_vm *vm;
/** @tile: The tile we're building for. */
struct xe_tile *tile;
- /** @default_pte: PTE flag only template. No address is associated */
- u64 default_pte;
+ /** @default_vram_pte: PTE flag only template for VRAM. No address is associated */
+ u64 default_vram_pte;
+ /** @default_system_pte: PTE flag only template for System. No address is associated */
+ u64 default_system_pte;
/** @dma_offset: DMA offset to add to the PTE. */
u64 dma_offset;
/**
- * @needs_64k: This address range enforces 64K alignment and
- * granularity.
+ * @needs_64K: This address range enforces 64K alignment and
+ * granularity on VRAM.
*/
bool needs_64K;
+ /** @clear_pt: clear page table entries during the bind walk */
+ bool clear_pt;
/**
* @vma: VMA being mapped
*/
@@ -299,6 +309,7 @@ struct xe_pt_stage_bind_walk {
u64 va_curs_start;
/* Output */
+ /** @wupd: Walk output data for page-table updates. */
struct xe_walk_update {
/** @wupd.entries: Caller provided storage. */
struct xe_vm_pgtable_update *entries;
@@ -316,7 +327,7 @@ struct xe_pt_stage_bind_walk {
u64 l0_end_addr;
/** @addr_64K: The start address of the current 64K chunk. */
u64 addr_64K;
- /** @found_64: Whether @add_64K actually points to a 64K chunk. */
+ /** @found_64K: Whether @add_64K actually points to a 64K chunk. */
bool found_64K;
};
@@ -436,6 +447,10 @@ static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
if (xe_vma_is_null(xe_walk->vma))
return true;
+ /* if we are clearing page table, no dma addresses*/
+ if (xe_walk->clear_pt)
+ return true;
+
/* Is the DMA address huge PTE size aligned? */
size = next - addr;
dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
@@ -515,24 +530,35 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
struct xe_res_cursor *curs = xe_walk->curs;
bool is_null = xe_vma_is_null(xe_walk->vma);
+ bool is_vram = is_null ? false : xe_res_is_vram(curs);
XE_WARN_ON(xe_walk->va_curs_start != addr);
- pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
- xe_res_dma(curs) + xe_walk->dma_offset,
- xe_walk->vma, pat_index, level);
- pte |= xe_walk->default_pte;
+ if (xe_walk->clear_pt) {
+ pte = 0;
+ } else {
+ pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
+ xe_res_dma(curs) +
+ xe_walk->dma_offset,
+ xe_walk->vma,
+ pat_index, level);
+ if (!is_null)
+ pte |= is_vram ? xe_walk->default_vram_pte :
+ xe_walk->default_system_pte;
- /*
- * Set the XE_PTE_PS64 hint if possible, otherwise if
- * this device *requires* 64K PTE size for VRAM, fail.
- */
- if (level == 0 && !xe_parent->is_compact) {
- if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
- xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
- pte |= XE_PTE_PS64;
- } else if (XE_WARN_ON(xe_walk->needs_64K)) {
- return -EINVAL;
+ /*
+ * Set the XE_PTE_PS64 hint if possible, otherwise if
+ * this device *requires* 64K PTE size for VRAM, fail.
+ */
+ if (level == 0 && !xe_parent->is_compact) {
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |=
+ XE_VMA_PTE_64K;
+ pte |= XE_PTE_PS64;
+ } else if (XE_WARN_ON(xe_walk->needs_64K &&
+ is_vram)) {
+ return -EINVAL;
+ }
}
}
@@ -540,7 +566,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (unlikely(ret))
return ret;
- if (!is_null)
+ if (!is_null && !xe_walk->clear_pt)
xe_res_next(curs, next - addr);
xe_walk->va_curs_start = next;
xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
@@ -603,6 +629,44 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
.pt_entry = xe_pt_stage_bind_entry,
};
+/*
+ * Default atomic expectations for different allocation scenarios are as follows:
+ *
+ * 1. Traditional API: When the VM is not in LR mode:
+ * - Device atomics are expected to function with all allocations.
+ *
+ * 2. Compute/SVM API: When the VM is in LR mode:
+ * - Device atomics are the default behavior when the bo is placed in a single region.
+ * - In all other cases device atomics will be disabled with AE=0 until an application
+ * request differently using a ioctl like madvise.
+ */
+static bool xe_atomic_for_vram(struct xe_vm *vm)
+{
+ return true;
+}
+
+static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
+{
+ struct xe_device *xe = vm->xe;
+
+ if (!xe->info.has_device_atomics_on_smem)
+ return false;
+
+ /*
+ * If a SMEM+LMEM allocation is backed by SMEM, a device
+ * atomics will cause a gpu page fault and which then
+ * gets migrated to LMEM, bind such allocations with
+ * device atomics enabled.
+ *
+ * TODO: Revisit this. Perhaps add something like a
+ * fault_on_atomics_in_system UAPI flag.
+ * Note that this also prohibits GPU atomics in LR mode for
+ * userptr and system memory on DGFX.
+ */
+ return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
+ (bo && xe_bo_has_single_placement(bo))));
+}
+
/**
* xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
* range.
@@ -612,6 +676,7 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
* @entries: Storage for the update entries used for connecting the tree to
* the main tree at commit time.
* @num_entries: On output contains the number of @entries used.
+ * @clear_pt: Clear the page table entries.
*
* This function builds a disconnected page-table tree for a given address
* range. The tree is connected to the main vm tree for the gpu using
@@ -625,13 +690,13 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
static int
xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_svm_range *range,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool clear_pt)
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_bo *bo = xe_vma_bo(vma);
- bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
- (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
struct xe_res_cursor curs;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_pt_stage_bind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_bind_ops,
@@ -639,34 +704,31 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.max_level = XE_PT_HIGHEST_LEVEL,
.staging = true,
},
- .vm = xe_vma_vm(vma),
+ .vm = vm,
.tile = tile,
.curs = &curs,
.va_curs_start = range ? range->base.itree.start :
xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
+ .clear_pt = clear_pt,
};
- struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+ struct xe_pt *pt = vm->pt_root[tile->id];
int ret;
if (range) {
/* Move this entire thing to xe_svm.c? */
- xe_svm_notifier_lock(xe_vma_vm(vma));
+ xe_svm_notifier_lock(vm);
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "BIND PREPARE - RETRY");
- xe_svm_notifier_unlock(xe_vma_vm(vma));
+ xe_svm_notifier_unlock(vm);
return -EAGAIN;
}
if (xe_svm_range_has_dma_mapping(range)) {
xe_res_first_dma(range->base.dma_addr, 0,
range->base.itree.last + 1 - range->base.itree.start,
&curs);
- is_devmem = xe_res_is_vram(&curs);
- if (is_devmem)
- xe_svm_range_debug(range, "BIND PREPARE - DMA VRAM");
- else
- xe_svm_range_debug(range, "BIND PREPARE - DMA");
+ xe_svm_range_debug(range, "BIND PREPARE - MIXED");
} else {
xe_assert(xe, false);
}
@@ -674,54 +736,21 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
* Note, when unlocking the resource cursor dma addresses may become
* stale, but the bind will be aborted anyway at commit time.
*/
- xe_svm_notifier_unlock(xe_vma_vm(vma));
+ xe_svm_notifier_unlock(vm);
}
- xe_walk.needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem;
+ xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K);
+ if (clear_pt)
+ goto walk_pt;
- /**
- * Default atomic expectations for different allocation scenarios are as follows:
- *
- * 1. Traditional API: When the VM is not in LR mode:
- * - Device atomics are expected to function with all allocations.
- *
- * 2. Compute/SVM API: When the VM is in LR mode:
- * - Device atomics are the default behavior when the bo is placed in a single region.
- * - In all other cases device atomics will be disabled with AE=0 until an application
- * request differently using a ioctl like madvise.
- */
if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
- if (xe_vm_in_lr_mode(xe_vma_vm(vma))) {
- if (bo && xe_bo_has_single_placement(bo))
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- /**
- * If a SMEM+LMEM allocation is backed by SMEM, a device
- * atomics will cause a gpu page fault and which then
- * gets migrated to LMEM, bind such allocations with
- * device atomics enabled.
- */
- else if (is_devmem)
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- } else {
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- }
-
- /**
- * Unset AE if the platform(PVC) doesn't support it on an
- * allocation
- */
- if (!xe->info.has_device_atomics_on_smem && !is_devmem)
- xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE;
- }
-
- if (is_devmem) {
- xe_walk.default_pte |= XE_PPGTT_PTE_DM;
- xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
+ xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
+ xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
+ XE_USM_PPGTT_PTE_AE : 0;
}
- if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
- xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
-
+ xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM;
+ xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
if (!range)
xe_bo_assert_held(bo);
@@ -739,6 +768,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
curs.size = xe_vma_size(vma);
}
+walk_pt:
ret = xe_pt_walk_range(&pt->base, pt->level,
range ? range->base.itree.start : xe_vma_start(vma),
range ? range->base.itree.last + 1 : xe_vma_end(vma),
@@ -1103,12 +1133,14 @@ static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_svm_range *range,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool invalidate_on_bind)
{
int err;
*num_entries = 0;
- err = xe_pt_stage_bind(tile, vma, range, entries, num_entries);
+ err = xe_pt_stage_bind(tile, vma, range, entries, num_entries,
+ invalidate_on_bind);
if (!err)
xe_tile_assert(tile, *num_entries);
@@ -1420,6 +1452,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
return err;
}
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
{
struct xe_vm *vm = pt_update->vops->vm;
@@ -1453,6 +1486,7 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
return 0;
}
+#endif
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
@@ -1791,7 +1825,7 @@ static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma)
+ struct xe_vma *vma, bool invalidate_on_bind)
{
u32 current_op = pt_update_ops->current_op;
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
@@ -1813,7 +1847,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
return err;
err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries,
- &pt_op->num_entries);
+ &pt_op->num_entries, invalidate_on_bind);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
ARRAY_SIZE(pt_op->entries));
@@ -1835,11 +1869,11 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
* If !rebind, and scratch enabled VMs, there is a chance the scratch
* PTE is already cached in the TLB so it needs to be invalidated.
* On !LR VMs this is done in the ring ops preceding a batch, but on
- * non-faulting LR, in particular on user-space batch buffer chaining,
- * it needs to be done here.
+ * LR, in particular on user-space batch buffer chaining, it needs to
+ * be done here.
*/
if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
- xe_vm_in_preempt_fence_mode(vm)))
+ xe_vm_in_lr_mode(vm)))
pt_update_ops->needs_invalidation = true;
else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
/* We bump also if batch_invalidate_tlb is true */
@@ -1875,7 +1909,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
pt_op->rebind = BIT(tile->id) & range->tile_present;
err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
- &pt_op->num_entries);
+ &pt_op->num_entries, false);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
ARRAY_SIZE(pt_op->entries));
@@ -1987,11 +2021,13 @@ static int op_prepare(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
+ !op->map.invalidate_on_bind) ||
op->map.is_cpu_addr_mirror)
break;
- err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+ err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
+ op->map.invalidate_on_bind);
pt_update_ops->wait_vm_kernel = true;
break;
case DRM_GPUVA_OP_REMAP:
@@ -2005,12 +2041,12 @@ static int op_prepare(struct xe_vm *vm,
if (!err && op->remap.prev) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.prev);
+ op->remap.prev, false);
pt_update_ops->wait_vm_bookkeep = true;
}
if (!err && op->remap.next) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.next);
+ op->remap.next, false);
pt_update_ops->wait_vm_bookkeep = true;
}
break;
@@ -2032,7 +2068,7 @@ static int op_prepare(struct xe_vm *vm,
if (xe_vma_is_cpu_addr_mirror(vma))
break;
- err = bind_op_prepare(vm, tile, pt_update_ops, vma);
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
pt_update_ops->wait_vm_kernel = true;
break;
}
@@ -2115,7 +2151,7 @@ ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma, struct dma_fence *fence,
- struct dma_fence *fence2)
+ struct dma_fence *fence2, bool invalidate_on_bind)
{
xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
@@ -2132,6 +2168,8 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
}
vma->tile_present |= BIT(tile->id);
vma->tile_staged &= ~BIT(tile->id);
+ if (invalidate_on_bind)
+ vma->tile_invalidated |= BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
lockdep_assert_held_read(&vm->userptr.notifier_lock);
to_userptr_vma(vma)->userptr.initial_bind = true;
@@ -2193,7 +2231,7 @@ static void op_commit(struct xe_vm *vm,
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
- fence2);
+ fence2, op->map.invalidate_on_bind);
break;
case DRM_GPUVA_OP_REMAP:
{
@@ -2206,10 +2244,10 @@ static void op_commit(struct xe_vm *vm,
if (op->remap.prev)
bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
- fence, fence2);
+ fence, fence2, false);
if (op->remap.next)
bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
- fence, fence2);
+ fence, fence2, false);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -2227,7 +2265,7 @@ static void op_commit(struct xe_vm *vm,
if (!xe_vma_is_cpu_addr_mirror(vma))
bind_op_commit(vm, tile, pt_update_ops, vma, fence,
- fence2);
+ fence2, false);
break;
}
case DRM_GPUVA_OP_DRIVER:
@@ -2265,11 +2303,15 @@ static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
.pre_commit = xe_pt_userptr_pre_commit,
};
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
.populate = xe_vm_populate_pgtable,
.clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_svm_pre_commit,
};
+#else
+static const struct xe_migrate_pt_update_ops svm_migrate_ops;
+#endif
/**
* xe_pt_update_ops_run() - Run PT update operations
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 5e65830dad25..2dbf4066d86f 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -340,7 +340,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
- if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_GPUSVM))
+ if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 9475e3f74958..fc8447a838c4 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -173,6 +173,9 @@ void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
if (xa_empty(&sr->xa))
return;
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/xe/xe_ring_ops_types.h b/drivers/gpu/drm/xe/xe_ring_ops_types.h
index 1ae56e2ee7b4..d7e3e150a9a5 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops_types.h
+++ b/drivers/gpu/drm/xe/xe_ring_ops_types.h
@@ -8,7 +8,7 @@
struct xe_sched_job;
-#define MAX_JOB_SIZE_DW 48
+#define MAX_JOB_SIZE_DW 58
#define MAX_JOB_SIZE_BYTES (MAX_JOB_SIZE_DW * 4)
/**
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 13bb62d3e615..29e694bb1219 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -258,9 +258,6 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
rtp_get_context(ctx, &hwe, &gt, &xe);
- if (IS_SRIOV_VF(xe))
- return;
-
xe_assert(xe, entries);
for (entry = entries; entry - entries < n_entries; entry++) {
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index f8fe61e25518..1d43e183ca21 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -60,7 +60,8 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
size / SZ_1K, bo);
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index cb813b337fd3..1f710b3fc599 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/sysfs.h>
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_heci_gsc.h"
@@ -28,20 +29,32 @@
* This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware
* to be flashed through mei and collect telemetry. The driver's probe flow is modified
* such that it enters survivability mode when pcode initialization is incomplete and boot status
- * denotes a failure. The driver then populates the survivability_mode PCI sysfs indicating
- * survivability mode and provides additional information required for debug
+ * denotes a failure.
*
- * KMD exposes below admin-only readable sysfs in survivability mode
+ * Survivability mode can also be entered manually using the survivability mode attribute available
+ * through configfs which is beneficial in several usecases. It can be used to address scenarios
+ * where pcode does not detect failure or for validation purposes. It can also be used in
+ * In-Field-Repair (IFR) to repair a single card without impacting the other cards in a node.
*
- * device/survivability_mode: The presence of this file indicates that the card is in survivability
- * mode. Also, provides additional information on why the driver entered
- * survivability mode.
+ * Use below command enable survivability mode manually::
*
- * Capability Information - Provides boot status
- * Postcode Information - Provides information about the failure
- * Overflow Information - Provides history of previous failures
- * Auxiliary Information - Certain failures may have information in
- * addition to postcode information
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ *
+ * Refer :ref:`xe_configfs` for more details on how to use configfs
+ *
+ * Survivability mode is indicated by the below admin-only readable sysfs which provides additional
+ * debug information::
+ *
+ * /sys/bus/pci/devices/<device>/surivability_mode
+ *
+ * Capability Information:
+ * Provides boot status
+ * Postcode Information:
+ * Provides information about the failure
+ * Overflow Information
+ * Provides history of previous failures
+ * Auxiliary Information
+ * Certain failures may have information in addition to postcode information
*/
static u32 aux_history_offset(u32 reg_value)
@@ -133,6 +146,7 @@ static void xe_survivability_mode_fini(void *arg)
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct device *dev = &pdev->dev;
+ xe_configfs_clear_survivability_mode(pdev);
sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
}
@@ -186,24 +200,41 @@ bool xe_survivability_mode_is_enabled(struct xe_device *xe)
return xe->survivability.mode;
}
-/*
- * survivability_mode_requested - check if it's possible to enable
- * survivability mode and that was requested by firmware
+/**
+ * xe_survivability_mode_is_requested - check if it's possible to enable survivability
+ * mode that was requested by firmware or userspace
+ * @xe: xe device instance
*
- * This function reads the boot status from Pcode.
+ * This function reads configfs and boot status from Pcode.
*
* Return: true if platform support is available and boot status indicates
- * failure, false otherwise.
+ * failure or if survivability mode is requested, false otherwise.
*/
-static bool survivability_mode_requested(struct xe_device *xe)
+bool xe_survivability_mode_is_requested(struct xe_device *xe)
{
struct xe_survivability *survivability = &xe->survivability;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u32 data;
+ bool survivability_mode;
- if (!IS_DGFX(xe) || xe->info.platform < XE_BATTLEMAGE || IS_SRIOV_VF(xe))
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
return false;
+ survivability_mode = xe_configfs_get_survivability_mode(pdev);
+
+ if (xe->info.platform < XE_BATTLEMAGE) {
+ if (survivability_mode) {
+ dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n");
+ xe_configfs_clear_survivability_mode(pdev);
+ }
+ return false;
+ }
+
+ /* Enable survivability mode if set via configfs */
+ if (survivability_mode)
+ return true;
+
data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
@@ -226,7 +257,7 @@ int xe_survivability_mode_enable(struct xe_device *xe)
struct xe_survivability_info *info;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- if (!survivability_mode_requested(xe))
+ if (!xe_survivability_mode_is_requested(xe))
return 0;
survivability->size = MAX_SCRATCH_MMIO;
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
index d7e64885570d..02231c2bf008 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -12,5 +12,6 @@ struct xe_device;
int xe_survivability_mode_enable(struct xe_device *xe);
bool xe_survivability_mode_is_enabled(struct xe_device *xe);
+bool xe_survivability_mode_is_requested(struct xe_device *xe);
#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 975094c1a582..6345896585de 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -4,6 +4,7 @@
*/
#include "xe_bo.h"
+#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
@@ -348,6 +349,8 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w)
up_write(&vm->lock);
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+
static struct xe_vram_region *page_to_vr(struct page *page)
{
return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
@@ -586,6 +589,8 @@ static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
.copy_to_ram = xe_svm_copy_to_ram,
};
+#endif
+
static const struct drm_gpusvm_ops gpusvm_ops = {
.range_alloc = xe_svm_range_alloc,
.range_free = xe_svm_range_free,
@@ -666,6 +671,7 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range,
(!devmem_only || xe_svm_range_in_vram(range));
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
{
return &tile->mem.vram;
@@ -727,6 +733,14 @@ unlock:
return err;
}
+#else
+static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+#endif
static bool supports_4K_migration(struct xe_device *xe)
{
@@ -762,7 +776,7 @@ static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
* xe_svm_handle_pagefault() - SVM handle page fault
* @vm: The VM.
* @vma: The CPU address mirror VMA.
- * @tile: The tile upon the fault occurred.
+ * @gt: The gt upon the fault occurred.
* @fault_addr: The GPU fault address.
* @atomic: The fault atomic access bit.
*
@@ -772,7 +786,7 @@ static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
* Return: 0 on success, negative error code on error.
*/
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
struct drm_gpusvm_ctx ctx = {
@@ -791,12 +805,15 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct drm_exec exec;
struct dma_fence *fence;
int migrate_try_count = ctx.devmem_only ? 3 : 1;
+ struct xe_tile *tile = gt_to_tile(gt);
ktime_t end = 0;
int err;
lockdep_assert_held_write(&vm->lock);
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
+
retry:
/* Always process UNMAPs first so view SVM ranges is current */
err = xe_svm_garbage_collector(vm);
@@ -930,6 +947,7 @@ int xe_svm_bo_evict(struct xe_bo *bo)
}
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+
static struct drm_pagemap_device_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
struct device *dev,
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index fe58ac2f4baa..30fc78b85b30 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -6,16 +6,19 @@
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+
#include <drm/drm_pagemap.h>
#include <drm/drm_gpusvm.h>
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
struct xe_bo;
-struct xe_vram_region;
+struct xe_gt;
struct xe_tile;
struct xe_vm;
struct xe_vma;
+struct xe_vram_region;
/** struct xe_svm_range - SVM range */
struct xe_svm_range {
@@ -38,7 +41,6 @@ struct xe_svm_range {
u8 tile_invalidated;
};
-#if IS_ENABLED(CONFIG_DRM_GPUSVM)
/**
* xe_svm_range_pages_valid() - SVM range pages valid
* @range: SVM range
@@ -59,7 +61,7 @@ void xe_svm_fini(struct xe_vm *vm);
void xe_svm_close(struct xe_vm *vm);
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic);
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
@@ -68,9 +70,51 @@ int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+/**
+ * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
+ * @range: SVM range
+ *
+ * Return: True if SVM range has a DMA mapping, False otherwise
+ */
+static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+{
+ lockdep_assert_held(&range->base.gpusvm->notifier_lock);
+ return range->base.flags.has_dma_mapping;
+}
+
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
void xe_svm_flush(struct xe_vm *vm);
#else
+#include <linux/interval_tree.h>
+
+struct drm_pagemap_device_addr;
+struct xe_bo;
+struct xe_gt;
+struct xe_vm;
+struct xe_vma;
+struct xe_tile;
+struct xe_vram_region;
+
+#define XE_INTERCONNECT_VRAM 1
+
+struct xe_svm_range {
+ struct {
+ struct interval_tree_node itree;
+ const struct drm_pagemap_device_addr *dma_addr;
+ } base;
+ u32 tile_present;
+ u32 tile_invalidated;
+};
+
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return false;
@@ -100,7 +144,7 @@ void xe_svm_close(struct xe_vm *vm)
static inline
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
return 0;
@@ -123,31 +167,19 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
-static inline void xe_svm_flush(struct xe_vm *vm)
+#define xe_svm_assert_in_notifier(...) do {} while (0)
+#define xe_svm_range_has_dma_mapping(...) false
+
+static inline void xe_svm_notifier_lock(struct xe_vm *vm)
{
}
-#endif
-
-/**
- * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
- * @range: SVM range
- *
- * Return: True if SVM range has a DMA mapping, False otherwise
- */
-static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
{
- lockdep_assert_held(&range->base.gpusvm->notifier_lock);
- return range->base.flags.has_dma_mapping;
}
-#define xe_svm_assert_in_notifier(vm__) \
- lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
-
-#define xe_svm_notifier_lock(vm__) \
- drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
-
-#define xe_svm_notifier_unlock(vm__) \
- drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
-
+static inline void xe_svm_flush(struct xe_vm *vm)
+{
+}
+#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index fb0eda3d5682..2741849bbf4d 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -92,6 +92,8 @@
struct uc_fw_entry {
enum xe_platform platform;
+ enum xe_gt_type gt_type;
+
struct {
const char *path;
u16 major;
@@ -106,32 +108,37 @@ struct fw_blobs_by_type {
u32 count;
};
-#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \
- fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \
- fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \
- fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \
- fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \
- fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \
- fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2))
+/*
+ * Add an "ANY" define just to convey the meaning it's given here.
+ */
+#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
+
+#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 44, 1)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 44, 1)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
+ fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 44, 1)) \
+ fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \
+ fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
- fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \
- fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \
- fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
- fw_def(DG1, no_ver(i915, huc, dg1)) \
- fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \
- fw_def(ALDERLAKE_S, no_ver(i915, huc, tgl)) \
- fw_def(ROCKETLAKE, no_ver(i915, huc, tgl)) \
- fw_def(TIGERLAKE, no_ver(i915, huc, tgl))
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \
+ fw_def(DG1, GT_TYPE_ANY, no_ver(i915, huc, dg1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl))
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0))
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, gsc, lnl, 104, 1, 0)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, gsc, mtl, 102, 1, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
@@ -159,12 +166,13 @@ struct fw_blobs_by_type {
a, b, c }
/* All blobs need to be declared via MODULE_FIRMWARE() */
-#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
+#define XE_UC_MODULE_FIRMWARE(platform__, gt_type__, fw_filename) \
MODULE_FIRMWARE(fw_filename);
-#define XE_UC_FW_ENTRY(platform__, entry__) \
+#define XE_UC_FW_ENTRY(platform__, gt_type__, entry__) \
{ \
.platform = XE_ ## platform__, \
+ .gt_type = XE_ ## gt_type__, \
entry__, \
},
@@ -222,30 +230,38 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
[XE_UC_FW_TYPE_HUC] = { entries_huc, ARRAY_SIZE(entries_huc) },
[XE_UC_FW_TYPE_GSC] = { entries_gsc, ARRAY_SIZE(entries_gsc) },
};
- static const struct uc_fw_entry *entries;
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
enum xe_platform p = xe->info.platform;
+ const struct uc_fw_entry *entries;
u32 count;
int i;
- xe_assert(xe, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, gt->info.type != XE_GT_TYPE_UNINITIALIZED);
+
entries = blobs_all[uc_fw->type].entries;
count = blobs_all[uc_fw->type].count;
for (i = 0; i < count && p <= entries[i].platform; i++) {
- if (p == entries[i].platform) {
- uc_fw->path = entries[i].path;
- uc_fw->versions.wanted.major = entries[i].major;
- uc_fw->versions.wanted.minor = entries[i].minor;
- uc_fw->versions.wanted.patch = entries[i].patch;
- uc_fw->full_ver_required = entries[i].full_ver_required;
-
- if (uc_fw->type == XE_UC_FW_TYPE_GSC)
- uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
- else
- uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
-
- break;
- }
+ if (p != entries[i].platform)
+ continue;
+
+ if (entries[i].gt_type != XE_GT_TYPE_ANY &&
+ entries[i].gt_type != gt->info.type)
+ continue;
+
+ uc_fw->path = entries[i].path;
+ uc_fw->versions.wanted.major = entries[i].major;
+ uc_fw->versions.wanted.minor = entries[i].minor;
+ uc_fw->versions.wanted.patch = entries[i].patch;
+ uc_fw->full_ver_required = entries[i].full_ver_required;
+
+ if (uc_fw->type == XE_UC_FW_TYPE_GSC)
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ else
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
+
+ break;
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 367c84b90e9e..79323c78130f 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2049,7 +2049,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
- args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
+ !xe->info.needs_scratch))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
@@ -2201,6 +2202,20 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
}
#endif
+static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
+{
+ if (!xe_vm_in_fault_mode(vm))
+ return false;
+
+ if (!xe_vm_has_scratch(vm))
+ return false;
+
+ if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE)
+ return false;
+
+ return true;
+}
+
/*
* Create operations list from IOCTL arguments, setup operations fields so parse
* and commit steps are decoupled from IOCTL arguments. This step can fail.
@@ -2273,6 +2288,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
+ op->map.invalidate_on_bind =
+ __xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
op->prefetch.region = prefetch_region;
}
@@ -2472,8 +2489,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return PTR_ERR(vma);
op->map.vma = vma;
- if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
- !op->map.is_cpu_addr_mirror)
+ if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
+ !op->map.is_cpu_addr_mirror) ||
+ op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask);
break;
@@ -2726,9 +2744,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- err = vma_lock_and_validate(exec, op->map.vma,
- !xe_vm_in_fault_mode(vm) ||
- op->map.immediate);
+ if (!op->map.invalidate_on_bind)
+ err = vma_lock_and_validate(exec, op->map.vma,
+ !xe_vm_in_fault_mode(vm) ||
+ op->map.immediate);
break;
case DRM_GPUVA_OP_REMAP:
err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
@@ -3082,9 +3101,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (!*bind_ops)
return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
- err = __copy_from_user(*bind_ops, bind_user,
- sizeof(struct drm_xe_vm_bind_op) *
- args->num_binds);
+ err = copy_from_user(*bind_ops, bind_user,
+ sizeof(struct drm_xe_vm_bind_op) *
+ args->num_binds);
if (XE_IOCTL_DBG(xe, err)) {
err = -EFAULT;
goto free_bind_ops;
@@ -3109,7 +3128,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
(!xe_vm_in_fault_mode(vm) ||
- !IS_ENABLED(CONFIG_DRM_GPUSVM)))) {
+ !IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3243,7 +3262,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
- return -EINVAL;
+ return -EINVAL;
}
}
@@ -3251,7 +3270,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
if (bo->cpu_caching) {
if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
- return -EINVAL;
+ return -EINVAL;
}
} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
/*
@@ -3260,7 +3279,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
* how it was mapped on the CPU. Just assume is it
* potentially cached on CPU side.
*/
- return -EINVAL;
+ return -EINVAL;
}
/* If a BO is protected it can only be mapped if the key is still valid */
@@ -3846,6 +3865,9 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
}
drm_puts(p, "\n");
+
+ if (drm_coredump_printer_is_full(p))
+ return;
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 84fa41b9fa20..1662604c4486 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -330,6 +330,8 @@ struct xe_vma_op_map {
bool is_cpu_addr_mirror;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
+ /** @invalidate: invalidate the VMA before bind */
+ bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
};
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index b1f81dca610d..e421a74fb87c 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -49,7 +49,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size)
*/
static void resize_vram_bar(struct xe_device *xe)
{
- u64 force_vram_bar_size = xe_modparam.force_vram_bar_size;
+ int force_vram_bar_size = xe_modparam.force_vram_bar_size;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_bus *root = pdev->bus;
resource_size_t current_size;
@@ -66,6 +66,9 @@ static void resize_vram_bar(struct xe_device *xe)
if (!bar_size_mask)
return;
+ if (force_vram_bar_size < 0)
+ return;
+
/* set to a specific size? */
if (force_vram_bar_size) {
u32 bar_size_bit;
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 2f833f0d575f..67196baa4249 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -230,6 +230,18 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("16025250150"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_ACTIONS(SET(LSN_VC_REG2,
+ LSN_LNI_WGT(1) |
+ LSN_LNE_WGT(1) |
+ LSN_DIM_X_WGT(1) |
+ LSN_DIM_Y_WGT(1) |
+ LSN_DIM_Z_WGT(1)))
+ },
+
/* Xe2_HPM */
{ XE_RTP_NAME("16021867713"),
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 9b9e176992a8..9efc5accd43d 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -57,3 +57,5 @@ no_media_l3 MEDIA_VERSION(3000)
GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
MEDIA_VERSION_RANGE(1301, 3000)
+16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
+ MEDIA_VERSION_RANGE(1300, 3000)
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index dbecca9bdd54..cfabf5e2a0bb 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -22,6 +22,7 @@ config DRM_ZYNQMP_DPSUB_AUDIO
bool "ZynqMP DisplayPort Audio Support"
depends on DRM_ZYNQMP_DPSUB
depends on SND && SND_SOC
+ depends on SND_SOC=y || DRM_ZYNQMP_DPSUB=m
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Choose this option to enable DisplayPort audio support in the ZynqMP
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index a6a4a871f197..238cbb49963e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1481,6 +1481,7 @@ static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp,
*/
static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
@@ -1494,7 +1495,7 @@ static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
}
if (dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, dp->next_bridge,
+ ret = drm_bridge_attach(encoder, dp->next_bridge,
bridge, flags);
if (ret < 0)
goto error;
@@ -2466,10 +2467,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
dp->reset = devm_reset_control_get(dp->dev, NULL);
if (IS_ERR(dp->reset)) {
- if (PTR_ERR(dp->reset) != -EPROBE_DEFER)
- dev_err(dp->dev, "failed to get reset: %ld\n",
- PTR_ERR(dp->reset));
- ret = PTR_ERR(dp->reset);
+ ret = dev_err_probe(dp->dev, PTR_ERR(dp->reset),
+ "failed to get reset\n");
goto err_free;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
index f07ff4eb3a6d..1a46a046103f 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
@@ -45,7 +45,6 @@ struct zynqmp_dpsub_audio {
struct {
struct snd_soc_dai_link_component cpu;
- struct snd_soc_dai_link_component codec;
struct snd_soc_dai_link_component platform;
} components[ZYNQMP_NUM_PCMS];
@@ -403,10 +402,8 @@ int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
link->num_cpus = 1;
link->cpus[0].dai_name = audio->dai_name;
- link->codecs = &audio->components[i].codec;
+ link->codecs = &snd_soc_dummy_dlc;
link->num_codecs = 1;
- link->codecs[0].name = "snd-soc-dummy";
- link->codecs[0].dai_name = "snd-soc-dummy-dai";
link->platforms = &audio->components[i].platform;
link->num_platforms = 1;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 8e09d6d328d2..344cc9e741c1 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -41,7 +41,6 @@ static int host1x_subdev_add(struct host1x_device *device,
struct device_node *np)
{
struct host1x_subdev *subdev;
- struct device_node *child;
int err;
subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
@@ -56,13 +55,12 @@ static int host1x_subdev_add(struct host1x_device *device,
mutex_unlock(&device->subdevs_lock);
/* recursively add children */
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (of_match_node(driver->subdevs, child) &&
of_device_is_available(child)) {
err = host1x_subdev_add(device, driver, child);
if (err < 0) {
/* XXX cleanup? */
- of_node_put(child);
return err;
}
}
@@ -90,17 +88,14 @@ static void host1x_subdev_del(struct host1x_subdev *subdev)
static int host1x_device_parse_dt(struct host1x_device *device,
struct host1x_driver *driver)
{
- struct device_node *np;
int err;
- for_each_child_of_node(device->dev.parent->of_node, np) {
+ for_each_child_of_node_scoped(device->dev.parent->of_node, np) {
if (of_match_node(driver->subdevs, np) &&
of_device_is_available(np)) {
err = host1x_subdev_add(device, driver, np);
- if (err < 0) {
- of_node_put(np);
+ if (err < 0)
return err;
- }
}
}
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 407ed9b9cf64..ba2e572567c0 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -247,8 +247,6 @@ static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
CDMA_EVENT_PUSH_BUFFER_SPACE);
- host1x_hw_cdma_flush(host1x, cdma);
-
/* If somebody has managed to already start waiting, yield */
if (cdma->event != CDMA_EVENT_NONE) {
mutex_unlock(&cdma->lock);
@@ -591,7 +589,6 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
*/
void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
{
- struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
u32 slots_free = cdma->slots_free;
@@ -599,11 +596,9 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
op1, op2);
- if (slots_free == 0) {
- host1x_hw_cdma_flush(host1x, cdma);
+ if (slots_free == 0)
slots_free = host1x_cdma_wait_locked(cdma,
CDMA_EVENT_PUSH_BUFFER_SPACE);
- }
cdma->slots_free = slots_free - 1;
cdma->slots_used++;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index fa77e4e64f12..333f36e0a715 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1008,7 +1008,7 @@ int ipu_map_irq(struct ipu_soc *ipu, int irq)
{
int virq;
- virq = irq_linear_revmap(ipu->domain, irq);
+ virq = irq_find_mapping(ipu->domain, irq);
if (!virq)
virq = irq_create_mapping(ipu->domain, irq);
@@ -1169,8 +1169,8 @@ static int ipu_irq_init(struct ipu_soc *ipu)
};
int ret, i;
- ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
- &irq_generic_chip_ops, ipu);
+ ipu->domain = irq_domain_create_linear(of_fwnode_handle(ipu->dev->of_node), IPU_NUM_IRQS,
+ &irq_generic_chip_ops, ipu);
if (!ipu->domain) {
dev_err(ipu->dev, "failed to add irq domain\n");
return -ENODEV;
@@ -1219,7 +1219,7 @@ static void ipu_irq_exit(struct ipu_soc *ipu)
/* TODO: remove irq_domain_generic_chips */
for (i = 0; i < IPU_NUM_IRQS; i++) {
- irq = irq_linear_revmap(ipu->domain, i);
+ irq = irq_find_mapping(ipu->domain, i);
if (irq)
irq_dispose_mapping(irq);
}
diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig
index ad0c06756516..8726d80d6ba4 100644
--- a/drivers/gpu/nova-core/Kconfig
+++ b/drivers/gpu/nova-core/Kconfig
@@ -3,6 +3,7 @@ config NOVA_CORE
depends on PCI
depends on RUST
depends on RUST_FW_LOADER_ABSTRACTIONS
+ select AUXILIARY_BUS
default n
help
Choose this if you want to build the Nova Core driver for Nvidia
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index a08fb6599267..8c86101c26cb 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{bindings, c_str, device::Core, pci, prelude::*};
+use kernel::{auxiliary, bindings, c_str, device::Core, pci, prelude::*};
use crate::gpu::Gpu;
@@ -8,6 +8,7 @@ use crate::gpu::Gpu;
pub(crate) struct NovaCore {
#[pin]
pub(crate) gpu: Gpu,
+ _reg: auxiliary::Registration,
}
const BAR0_SIZE: usize = 8;
@@ -38,6 +39,12 @@ impl pci::Driver for NovaCore {
let this = KBox::pin_init(
try_pin_init!(Self {
gpu <- Gpu::new(pdev, bar)?,
+ _reg: auxiliary::Registration::new(
+ pdev.as_ref(),
+ c_str!("nova-drm"),
+ 0, // TODO: Once it lands, use XArray; for now we don't use the ID.
+ crate::MODULE_NAME
+ )?,
}),
GFP_KERNEL,
)?;
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
index 6e6361c59ca1..4b8a38358a4f 100644
--- a/drivers/gpu/nova-core/firmware.rs
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -1,13 +1,49 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::gpu;
+//! Contains structures and functions dedicated to the parsing, building and patching of firmwares
+//! to be loaded into a given execution unit.
+
+use kernel::device;
use kernel::firmware;
+use kernel::prelude::*;
+use kernel::str::CString;
+
+use crate::gpu;
+use crate::gpu::Chipset;
+
+pub(crate) const FIRMWARE_VERSION: &str = "535.113.01";
+
+/// Structure encapsulating the firmware blobs required for the GPU to operate.
+#[expect(dead_code)]
+pub(crate) struct Firmware {
+ booter_load: firmware::Firmware,
+ booter_unload: firmware::Firmware,
+ bootloader: firmware::Firmware,
+ gsp: firmware::Firmware,
+}
+
+impl Firmware {
+ pub(crate) fn new(dev: &device::Device, chipset: Chipset, ver: &str) -> Result<Firmware> {
+ let mut chip_name = CString::try_from_fmt(fmt!("{}", chipset))?;
+ chip_name.make_ascii_lowercase();
+
+ let request = |name_| {
+ CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
+ .and_then(|path| firmware::Firmware::request(&path, dev))
+ };
+
+ Ok(Firmware {
+ booter_load: request("booter_load")?,
+ booter_unload: request("booter_unload")?,
+ bootloader: request("bootloader")?,
+ gsp: request("gsp")?,
+ })
+ }
+}
pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>);
impl<const N: usize> ModInfoBuilder<N> {
- const VERSION: &'static str = "535.113.01";
-
const fn make_entry_file(self, chipset: &str, fw: &str) -> Self {
ModInfoBuilder(
self.0
@@ -17,7 +53,7 @@ impl<const N: usize> ModInfoBuilder<N> {
.push("/gsp/")
.push(fw)
.push("-")
- .push(Self::VERSION)
+ .push(FIRMWARE_VERSION)
.push(".bin"),
)
}
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index ab0e5a72a059..60b86f370284 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{
- device, devres::Devres, error::code::*, firmware, fmt, pci, prelude::*, str::CString,
-};
+use kernel::{device, devres::Devres, error::code::*, pci, prelude::*};
use crate::driver::Bar0;
+use crate::firmware::{Firmware, FIRMWARE_VERSION};
use crate::regs;
use crate::util;
use core::fmt;
@@ -13,7 +12,7 @@ macro_rules! define_chipset {
({ $($variant:ident = $value:expr),* $(,)* }) =>
{
/// Enum representation of the GPU chipset.
- #[derive(fmt::Debug)]
+ #[derive(fmt::Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub(crate) enum Chipset {
$($variant = $value),*,
}
@@ -54,6 +53,7 @@ define_chipset!({
TU117 = 0x167,
TU116 = 0x168,
// Ampere
+ GA100 = 0x170,
GA102 = 0x172,
GA103 = 0x173,
GA104 = 0x174,
@@ -73,7 +73,7 @@ impl Chipset {
Self::TU102 | Self::TU104 | Self::TU106 | Self::TU117 | Self::TU116 => {
Architecture::Turing
}
- Self::GA102 | Self::GA103 | Self::GA104 | Self::GA106 | Self::GA107 => {
+ Self::GA100 | Self::GA102 | Self::GA103 | Self::GA104 | Self::GA106 | Self::GA107 => {
Architecture::Ampere
}
Self::AD102 | Self::AD103 | Self::AD104 | Self::AD106 | Self::AD107 => {
@@ -100,9 +100,22 @@ impl fmt::Display for Chipset {
/// Enum representation of the GPU generation.
#[derive(fmt::Debug)]
pub(crate) enum Architecture {
- Turing,
- Ampere,
- Ada,
+ Turing = 0x16,
+ Ampere = 0x17,
+ Ada = 0x19,
+}
+
+impl TryFrom<u8> for Architecture {
+ type Error = Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0x16 => Ok(Self::Turing),
+ 0x17 => Ok(Self::Ampere),
+ 0x19 => Ok(Self::Ada),
+ _ => Err(ENODEV),
+ }
+ }
}
pub(crate) struct Revision {
@@ -111,10 +124,10 @@ pub(crate) struct Revision {
}
impl Revision {
- fn from_boot0(boot0: regs::Boot0) -> Self {
+ fn from_boot0(boot0: regs::NV_PMC_BOOT_0) -> Self {
Self {
- major: boot0.major_rev(),
- minor: boot0.minor_rev(),
+ major: boot0.major_revision(),
+ minor: boot0.minor_revision(),
}
}
}
@@ -133,45 +146,16 @@ pub(crate) struct Spec {
}
impl Spec {
- fn new(bar: &Devres<Bar0>) -> Result<Spec> {
- let bar = bar.try_access().ok_or(ENXIO)?;
- let boot0 = regs::Boot0::read(&bar);
+ fn new(bar: &Bar0) -> Result<Spec> {
+ let boot0 = regs::NV_PMC_BOOT_0::read(bar);
Ok(Self {
- chipset: boot0.chipset().try_into()?,
+ chipset: boot0.chipset()?,
revision: Revision::from_boot0(boot0),
})
}
}
-/// Structure encapsulating the firmware blobs required for the GPU to operate.
-#[expect(dead_code)]
-pub(crate) struct Firmware {
- booter_load: firmware::Firmware,
- booter_unload: firmware::Firmware,
- bootloader: firmware::Firmware,
- gsp: firmware::Firmware,
-}
-
-impl Firmware {
- fn new(dev: &device::Device, spec: &Spec, ver: &str) -> Result<Firmware> {
- let mut chip_name = CString::try_from_fmt(fmt!("{}", spec.chipset))?;
- chip_name.make_ascii_lowercase();
-
- let request = |name_| {
- CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
- .and_then(|path| firmware::Firmware::request(&path, dev))
- };
-
- Ok(Firmware {
- booter_load: request("booter_load")?,
- booter_unload: request("booter_unload")?,
- bootloader: request("bootloader")?,
- gsp: request("gsp")?,
- })
- }
-}
-
/// Structure holding the resources required to operate the GPU.
#[pin_data]
pub(crate) struct Gpu {
@@ -182,9 +166,13 @@ pub(crate) struct Gpu {
}
impl Gpu {
- pub(crate) fn new(pdev: &pci::Device, bar: Devres<Bar0>) -> Result<impl PinInit<Self>> {
- let spec = Spec::new(&bar)?;
- let fw = Firmware::new(pdev.as_ref(), &spec, "535.113.01")?;
+ pub(crate) fn new(
+ pdev: &pci::Device<device::Bound>,
+ devres_bar: Devres<Bar0>,
+ ) -> Result<impl PinInit<Self>> {
+ let bar = devres_bar.access(pdev.as_ref())?;
+ let spec = Spec::new(bar)?;
+ let fw = Firmware::new(pdev.as_ref(), spec.chipset, FIRMWARE_VERSION)?;
dev_info!(
pdev.as_ref(),
@@ -194,6 +182,10 @@ impl Gpu {
spec.revision
);
- Ok(pin_init!(Self { spec, bar, fw }))
+ Ok(pin_init!(Self {
+ spec,
+ bar: devres_bar,
+ fw
+ }))
}
}
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
index a91cd924054b..618632f0abcc 100644
--- a/drivers/gpu/nova-core/nova_core.rs
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -8,6 +8,8 @@ mod gpu;
mod regs;
mod util;
+pub(crate) const MODULE_NAME: &kernel::str::CStr = <LocalModule as kernel::ModuleMetadata>::NAME;
+
kernel::module_pci_driver! {
type: driver::NovaCore,
name: "NovaCore",
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index b1a25b86ef17..5a1273230306 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -1,55 +1,39 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::driver::Bar0;
-
-// TODO
-//
-// Create register definitions via generic macros. See task "Generic register
-// abstraction" in Documentation/gpu/nova/core/todo.rst.
-
-const BOOT0_OFFSET: usize = 0x00000000;
-
-// 3:0 - chipset minor revision
-const BOOT0_MINOR_REV_SHIFT: u8 = 0;
-const BOOT0_MINOR_REV_MASK: u32 = 0x0000000f;
-
-// 7:4 - chipset major revision
-const BOOT0_MAJOR_REV_SHIFT: u8 = 4;
-const BOOT0_MAJOR_REV_MASK: u32 = 0x000000f0;
-
-// 23:20 - chipset implementation Identifier (depends on architecture)
-const BOOT0_IMPL_SHIFT: u8 = 20;
-const BOOT0_IMPL_MASK: u32 = 0x00f00000;
-
-// 28:24 - chipset architecture identifier
-const BOOT0_ARCH_MASK: u32 = 0x1f000000;
-
-// 28:20 - chipset identifier (virtual register field combining BOOT0_IMPL and
-// BOOT0_ARCH)
-const BOOT0_CHIPSET_SHIFT: u8 = BOOT0_IMPL_SHIFT;
-const BOOT0_CHIPSET_MASK: u32 = BOOT0_IMPL_MASK | BOOT0_ARCH_MASK;
-
-#[derive(Copy, Clone)]
-pub(crate) struct Boot0(u32);
-
-impl Boot0 {
- #[inline]
- pub(crate) fn read(bar: &Bar0) -> Self {
- Self(bar.read32(BOOT0_OFFSET))
- }
-
- #[inline]
- pub(crate) fn chipset(&self) -> u32 {
- (self.0 & BOOT0_CHIPSET_MASK) >> BOOT0_CHIPSET_SHIFT
- }
-
- #[inline]
- pub(crate) fn minor_rev(&self) -> u8 {
- ((self.0 & BOOT0_MINOR_REV_MASK) >> BOOT0_MINOR_REV_SHIFT) as u8
+// Required to retain the original register names used by OpenRM, which are all capital snake case
+// but are mapped to types.
+#![allow(non_camel_case_types)]
+
+#[macro_use]
+mod macros;
+
+use crate::gpu::{Architecture, Chipset};
+use kernel::prelude::*;
+
+/* PMC */
+
+register!(NV_PMC_BOOT_0 @ 0x00000000, "Basic revision information about the GPU" {
+ 3:0 minor_revision as u8, "Minor revision of the chip";
+ 7:4 major_revision as u8, "Major revision of the chip";
+ 8:8 architecture_1 as u8, "MSB of the architecture";
+ 23:20 implementation as u8, "Implementation version of the architecture";
+ 28:24 architecture_0 as u8, "Lower bits of the architecture";
+});
+
+impl NV_PMC_BOOT_0 {
+ /// Combines `architecture_0` and `architecture_1` to obtain the architecture of the chip.
+ pub(crate) fn architecture(self) -> Result<Architecture> {
+ Architecture::try_from(
+ self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0.len()),
+ )
}
- #[inline]
- pub(crate) fn major_rev(&self) -> u8 {
- ((self.0 & BOOT0_MAJOR_REV_MASK) >> BOOT0_MAJOR_REV_SHIFT) as u8
+ /// Combines `architecture` and `implementation` to obtain a code unique to the chipset.
+ pub(crate) fn chipset(self) -> Result<Chipset> {
+ self.architecture()
+ .map(|arch| {
+ ((arch as u32) << Self::IMPLEMENTATION.len()) | self.implementation() as u32
+ })
+ .and_then(Chipset::try_from)
}
}
diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs
new file mode 100644
index 000000000000..7ecc70efb3cd
--- /dev/null
+++ b/drivers/gpu/nova-core/regs/macros.rs
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Macro to define register layout and accessors.
+//!
+//! A single register typically includes several fields, which are accessed through a combination
+//! of bit-shift and mask operations that introduce a class of potential mistakes, notably because
+//! not all possible field values are necessarily valid.
+//!
+//! The macro in this module allow to define, using an intruitive and readable syntax, a dedicated
+//! type for each register with its own field accessors that can return an error is a field's value
+//! is invalid.
+
+/// Defines a dedicated type for a register with an absolute offset, alongside with getter and
+/// setter methods for its fields and methods to read and write it from an `Io` region.
+///
+/// Example:
+///
+/// ```no_run
+/// register!(BOOT_0 @ 0x00000100, "Basic revision information about the GPU" {
+/// 3:0 minor_revision as u8, "Minor revision of the chip";
+/// 7:4 major_revision as u8, "Major revision of the chip";
+/// 28:20 chipset as u32 ?=> Chipset, "Chipset model";
+/// });
+/// ```
+///
+/// This defines a `BOOT_0` type which can be read or written from offset `0x100` of an `Io`
+/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 less
+/// significant bits of the register. Each field can be accessed and modified using accessor
+/// methods:
+///
+/// ```no_run
+/// // Read from the register's defined offset (0x100).
+/// let boot0 = BOOT_0::read(&bar);
+/// pr_info!("chip revision: {}.{}", boot0.major_revision(), boot0.minor_revision());
+///
+/// // `Chipset::try_from` will be called with the value of the field and returns an error if the
+/// // value is invalid.
+/// let chipset = boot0.chipset()?;
+///
+/// // Update some fields and write the value back.
+/// boot0.set_major_revision(3).set_minor_revision(10).write(&bar);
+///
+/// // Or just read and update the register in a single step:
+/// BOOT_0::alter(&bar, |r| r.set_major_revision(3).set_minor_revision(10));
+/// ```
+///
+/// Fields can be defined as follows:
+///
+/// - `as <type>` simply returns the field value casted as the requested integer type, typically
+/// `u32`, `u16`, `u8` or `bool`. Note that `bool` fields must have a range of 1 bit.
+/// - `as <type> => <into_type>` calls `<into_type>`'s `From::<<type>>` implementation and returns
+/// the result.
+/// - `as <type> ?=> <try_into_type>` calls `<try_into_type>`'s `TryFrom::<<type>>` implementation
+/// and returns the result. This is useful on fields for which not all values are value.
+///
+/// The documentation strings are optional. If present, they will be added to the type's
+/// definition, or the field getter and setter methods they are attached to.
+///
+/// Putting a `+` before the address of the register makes it relative to a base: the `read` and
+/// `write` methods take a `base` argument that is added to the specified address before access,
+/// and `try_read` and `try_write` methods are also created, allowing access with offsets unknown
+/// at compile-time:
+///
+/// ```no_run
+/// register!(CPU_CTL @ +0x0000010, "CPU core control" {
+/// 0:0 start as bool, "Start the CPU core";
+/// });
+///
+/// // Flip the `start` switch for the CPU core which base address is at `CPU_BASE`.
+/// let cpuctl = CPU_CTL::read(&bar, CPU_BASE);
+/// pr_info!("CPU CTL: {:#x}", cpuctl);
+/// cpuctl.set_start(true).write(&bar, CPU_BASE);
+/// ```
+macro_rules! register {
+ // Creates a register at a fixed offset of the MMIO space.
+ (
+ $name:ident @ $offset:literal $(, $comment:literal)? {
+ $($fields:tt)*
+ }
+ ) => {
+ register!(@common $name $(, $comment)?);
+ register!(@field_accessors $name { $($fields)* });
+ register!(@io $name @ $offset);
+ };
+
+ // Creates a register at a relative offset from a base address.
+ (
+ $name:ident @ + $offset:literal $(, $comment:literal)? {
+ $($fields:tt)*
+ }
+ ) => {
+ register!(@common $name $(, $comment)?);
+ register!(@field_accessors $name { $($fields)* });
+ register!(@io$name @ + $offset);
+ };
+
+ // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`, `BitOr`,
+ // and conversion to regular `u32`).
+ (@common $name:ident $(, $comment:literal)?) => {
+ $(
+ #[doc=$comment]
+ )?
+ #[repr(transparent)]
+ #[derive(Clone, Copy, Default)]
+ pub(crate) struct $name(u32);
+
+ // TODO: display the raw hex value, then the value of all the fields. This requires
+ // matching the fields, which will complexify the syntax considerably...
+ impl ::core::fmt::Debug for $name {
+ fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
+ f.debug_tuple(stringify!($name))
+ .field(&format_args!("0x{0:x}", &self.0))
+ .finish()
+ }
+ }
+
+ impl core::ops::BitOr for $name {
+ type Output = Self;
+
+ fn bitor(self, rhs: Self) -> Self::Output {
+ Self(self.0 | rhs.0)
+ }
+ }
+
+ impl ::core::convert::From<$name> for u32 {
+ fn from(reg: $name) -> u32 {
+ reg.0
+ }
+ }
+ };
+
+ // Defines all the field getter/methods methods for `$name`.
+ (
+ @field_accessors $name:ident {
+ $($hi:tt:$lo:tt $field:ident as $type:tt
+ $(?=> $try_into_type:ty)?
+ $(=> $into_type:ty)?
+ $(, $comment:literal)?
+ ;
+ )*
+ }
+ ) => {
+ $(
+ register!(@check_field_bounds $hi:$lo $field as $type);
+ )*
+
+ #[allow(dead_code)]
+ impl $name {
+ $(
+ register!(@field_accessor $name $hi:$lo $field as $type
+ $(?=> $try_into_type)?
+ $(=> $into_type)?
+ $(, $comment)?
+ ;
+ );
+ )*
+ }
+ };
+
+ // Boolean fields must have `$hi == $lo`.
+ (@check_field_bounds $hi:tt:$lo:tt $field:ident as bool) => {
+ #[allow(clippy::eq_op)]
+ const _: () = {
+ kernel::build_assert!(
+ $hi == $lo,
+ concat!("boolean field `", stringify!($field), "` covers more than one bit")
+ );
+ };
+ };
+
+ // Non-boolean fields must have `$hi >= $lo`.
+ (@check_field_bounds $hi:tt:$lo:tt $field:ident as $type:tt) => {
+ #[allow(clippy::eq_op)]
+ const _: () = {
+ kernel::build_assert!(
+ $hi >= $lo,
+ concat!("field `", stringify!($field), "`'s MSB is smaller than its LSB")
+ );
+ };
+ };
+
+ // Catches fields defined as `bool` and convert them into a boolean value.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as bool => $into_type:ty
+ $(, $comment:literal)?;
+ ) => {
+ register!(
+ @leaf_accessor $name $hi:$lo $field as bool
+ { |f| <$into_type>::from(if f != 0 { true } else { false }) }
+ $into_type => $into_type $(, $comment)?;
+ );
+ };
+
+ // Shortcut for fields defined as `bool` without the `=>` syntax.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as bool $(, $comment:literal)?;
+ ) => {
+ register!(@field_accessor $name $hi:$lo $field as bool => bool $(, $comment)?;);
+ };
+
+ // Catches the `?=>` syntax for non-boolean fields.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt ?=> $try_into_type:ty
+ $(, $comment:literal)?;
+ ) => {
+ register!(@leaf_accessor $name $hi:$lo $field as $type
+ { |f| <$try_into_type>::try_from(f as $type) } $try_into_type =>
+ ::core::result::Result<
+ $try_into_type,
+ <$try_into_type as ::core::convert::TryFrom<$type>>::Error
+ >
+ $(, $comment)?;);
+ };
+
+ // Catches the `=>` syntax for non-boolean fields.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt => $into_type:ty
+ $(, $comment:literal)?;
+ ) => {
+ register!(@leaf_accessor $name $hi:$lo $field as $type
+ { |f| <$into_type>::from(f as $type) } $into_type => $into_type $(, $comment)?;);
+ };
+
+ // Shortcut for fields defined as non-`bool` without the `=>` or `?=>` syntax.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt
+ $(, $comment:literal)?;
+ ) => {
+ register!(@field_accessor $name $hi:$lo $field as $type => $type $(, $comment)?;);
+ };
+
+ // Generates the accessor methods for a single field.
+ (
+ @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:ty
+ { $process:expr } $to_type:ty => $res_type:ty $(, $comment:literal)?;
+ ) => {
+ kernel::macros::paste!(
+ const [<$field:upper>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
+ const [<$field:upper _MASK>]: u32 = ((((1 << $hi) - 1) << 1) + 1) - ((1 << $lo) - 1);
+ const [<$field:upper _SHIFT>]: u32 = Self::[<$field:upper _MASK>].trailing_zeros();
+ );
+
+ $(
+ #[doc="Returns the value of this field:"]
+ #[doc=$comment]
+ )?
+ #[inline]
+ pub(crate) fn $field(self) -> $res_type {
+ kernel::macros::paste!(
+ const MASK: u32 = $name::[<$field:upper _MASK>];
+ const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
+ );
+ let field = ((self.0 & MASK) >> SHIFT);
+
+ $process(field)
+ }
+
+ kernel::macros::paste!(
+ $(
+ #[doc="Sets the value of this field:"]
+ #[doc=$comment]
+ )?
+ #[inline]
+ pub(crate) fn [<set_ $field>](mut self, value: $to_type) -> Self {
+ const MASK: u32 = $name::[<$field:upper _MASK>];
+ const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
+ let value = ((value as u32) << SHIFT) & MASK;
+ self.0 = (self.0 & !MASK) | value;
+
+ self
+ }
+ );
+ };
+
+ // Creates the IO accessors for a fixed offset register.
+ (@io $name:ident @ $offset:literal) => {
+ #[allow(dead_code)]
+ impl $name {
+ #[inline]
+ pub(crate) fn read<const SIZE: usize, T>(io: &T) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ Self(io.read32($offset))
+ }
+
+ #[inline]
+ pub(crate) fn write<const SIZE: usize, T>(self, io: &T) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.write32(self.0, $offset)
+ }
+
+ #[inline]
+ pub(crate) fn alter<const SIZE: usize, T, F>(
+ io: &T,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io));
+ reg.write(io);
+ }
+ }
+ };
+
+ // Create the IO accessors for a relative offset register.
+ (@io $name:ident @ + $offset:literal) => {
+ #[allow(dead_code)]
+ impl $name {
+ #[inline]
+ pub(crate) fn read<const SIZE: usize, T>(
+ io: &T,
+ base: usize,
+ ) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ Self(io.read32(base + $offset))
+ }
+
+ #[inline]
+ pub(crate) fn write<const SIZE: usize, T>(
+ self,
+ io: &T,
+ base: usize,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.write32(self.0, base + $offset)
+ }
+
+ #[inline]
+ pub(crate) fn alter<const SIZE: usize, T, F>(
+ io: &T,
+ base: usize,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io, base));
+ reg.write(io, base);
+ }
+
+ #[inline]
+ pub(crate) fn try_read<const SIZE: usize, T>(
+ io: &T,
+ base: usize,
+ ) -> ::kernel::error::Result<Self> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.try_read32(base + $offset).map(Self)
+ }
+
+ #[inline]
+ pub(crate) fn try_write<const SIZE: usize, T>(
+ self,
+ io: &T,
+ base: usize,
+ ) -> ::kernel::error::Result<()> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.try_write32(self.0, base + $offset)
+ }
+
+ #[inline]
+ pub(crate) fn try_alter<const SIZE: usize, T, F>(
+ io: &T,
+ base: usize,
+ f: F,
+ ) -> ::kernel::error::Result<()> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::try_read(io, base)?);
+ reg.try_write(io, base)
+ }
+ }
+ };
+}
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 46e3e42f9eb5..4b45e31f0bab 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -52,6 +52,10 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define FEATURE_KBD_LED_REPORT_ID1 0x5d
#define FEATURE_KBD_LED_REPORT_ID2 0x5e
+#define ROG_ALLY_REPORT_SIZE 64
+#define ROG_ALLY_X_MIN_MCU 313
+#define ROG_ALLY_MIN_MCU 319
+
#define SUPPORT_KBD_BACKLIGHT BIT(0)
#define MAX_TOUCH_MAJOR 8
@@ -84,6 +88,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_MEDION_E1239T BIT(10)
#define QUIRK_ROG_NKEY_KEYBOARD BIT(11)
#define QUIRK_ROG_CLAYMORE_II_KEYBOARD BIT(12)
+#define QUIRK_ROG_ALLY_XPAD BIT(13)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -534,9 +539,102 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
return !!(value & ASUS_WMI_DSTS_PRESENCE_BIT);
}
+/*
+ * We don't care about any other part of the string except the version section.
+ * Example strings: FGA80100.RC72LA.312_T01, FGA80100.RC71LS.318_T01
+ * The bytes "5a 05 03 31 00 1a 13" and possibly more come before the version
+ * string, and there may be additional bytes after the version string such as
+ * "75 00 74 00 65 00" or a postfix such as "_T01"
+ */
+static int mcu_parse_version_string(const u8 *response, size_t response_size)
+{
+ const u8 *end = response + response_size;
+ const u8 *p = response;
+ int dots, err, version;
+ char buf[4];
+
+ dots = 0;
+ while (p < end && dots < 2) {
+ if (*p++ == '.')
+ dots++;
+ }
+
+ if (dots != 2 || p >= end || (p + 3) >= end)
+ return -EINVAL;
+
+ memcpy(buf, p, 3);
+ buf[3] = '\0';
+
+ err = kstrtoint(buf, 10, &version);
+ if (err || version < 0)
+ return -EINVAL;
+
+ return version;
+}
+
+static int mcu_request_version(struct hid_device *hdev)
+{
+ u8 *response __free(kfree) = kzalloc(ROG_ALLY_REPORT_SIZE, GFP_KERNEL);
+ const u8 request[] = { 0x5a, 0x05, 0x03, 0x31, 0x00, 0x20 };
+ int ret;
+
+ if (!response)
+ return -ENOMEM;
+
+ ret = asus_kbd_set_report(hdev, request, sizeof(request));
+ if (ret < 0)
+ return ret;
+
+ ret = hid_hw_raw_request(hdev, FEATURE_REPORT_ID, response,
+ ROG_ALLY_REPORT_SIZE, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0)
+ return ret;
+
+ ret = mcu_parse_version_string(response, ROG_ALLY_REPORT_SIZE);
+ if (ret < 0) {
+ pr_err("Failed to parse MCU version: %d\n", ret);
+ print_hex_dump(KERN_ERR, "MCU: ", DUMP_PREFIX_NONE,
+ 16, 1, response, ROG_ALLY_REPORT_SIZE, false);
+ }
+
+ return ret;
+}
+
+static void validate_mcu_fw_version(struct hid_device *hdev, int idProduct)
+{
+ int min_version, version;
+
+ version = mcu_request_version(hdev);
+ if (version < 0)
+ return;
+
+ switch (idProduct) {
+ case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY:
+ min_version = ROG_ALLY_MIN_MCU;
+ break;
+ case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X:
+ min_version = ROG_ALLY_X_MIN_MCU;
+ break;
+ default:
+ min_version = 0;
+ }
+
+ if (version < min_version) {
+ hid_warn(hdev,
+ "The MCU firmware version must be %d or greater to avoid issues with suspend.\n",
+ min_version);
+ } else {
+ set_ally_mcu_hack(ASUS_WMI_ALLY_MCU_HACK_DISABLED);
+ set_ally_mcu_powersave(true);
+ }
+}
+
static int asus_kbd_register_leds(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+ struct usb_interface *intf;
+ struct usb_device *udev;
unsigned char kbd_func;
int ret;
@@ -560,6 +658,14 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
if (ret < 0)
return ret;
}
+
+ if (drvdata->quirks & QUIRK_ROG_ALLY_XPAD) {
+ intf = to_usb_interface(hdev->dev.parent);
+ udev = interface_to_usbdev(intf);
+ validate_mcu_fw_version(hdev,
+ le16_to_cpu(udev->descriptor.idProduct));
+ }
+
} else {
/* Initialize keyboard */
ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
@@ -1280,10 +1386,10 @@ static const struct hid_device_id asus_devices[] = {
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY),
- QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X),
- QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD),
QUIRK_ROG_CLAYMORE_II_KEYBOARD },
@@ -1327,4 +1433,5 @@ static struct hid_driver asus_driver = {
};
module_hid_driver(asus_driver);
+MODULE_IMPORT_NS("ASUS_WMI");
MODULE_LICENSE("GPL");
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index e3d51a316316..d74adb5bba44 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1841,7 +1841,7 @@ static struct attribute *vmbus_chan_attrs[] = {
NULL
};
-static struct bin_attribute *vmbus_chan_bin_attrs[] = {
+static const struct bin_attribute *vmbus_chan_bin_attrs[] = {
&chan_attr_ring_buffer,
NULL
};
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f91f713b0105..5fd93aad2d6d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1795,17 +1795,6 @@ config SENSORS_NZXT_SMART2
source "drivers/hwmon/occ/Kconfig"
-config SENSORS_OXP
- tristate "OneXPlayer EC fan control"
- depends on ACPI_EC
- depends on X86
- help
- If you say yes here you get support for fan readings and control over
- OneXPlayer handheld devices. Only OneXPlayer mini AMD handheld variant
- boards are supported.
-
- Can also be built as a module. In that case it will be called oxp-sensors.
-
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 766c652ef22b..e3468d024ff3 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -183,7 +183,6 @@ obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_NZXT_KRAKEN2) += nzxt-kraken2.o
obj-$(CONFIG_SENSORS_NZXT_KRAKEN3) += nzxt-kraken3.o
obj-$(CONFIG_SENSORS_NZXT_SMART2) += nzxt-smart2.o
-obj-$(CONFIG_SENSORS_OXP) += oxp-sensors.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 9ed2c4b6734e..8ecebea53651 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -143,8 +143,8 @@ static void do_read_registers_on_cu(void *_data)
*/
cu = topology_core_id(smp_processor_id());
- rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
- rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
+ rdmsrq_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
+ rdmsrq_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
data->cu_on[cu] = 1;
}
@@ -424,7 +424,7 @@ static int fam15h_power_init_data(struct pci_dev *f4,
*/
data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
- if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
+ if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
pr_err("Failed to read max compute unit power accumulator MSR\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 6d1175a51832..2df4956296ed 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -15,6 +15,10 @@
#include <linux/kernel.h>
#include <linux/hwmon-vid.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
+
/*
* Common code for decoding VID pins.
*
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 3685906cc57c..472bcf6092f6 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
diff --git a/drivers/hwmon/oxp-sensors.c b/drivers/hwmon/oxp-sensors.c
deleted file mode 100644
index 83730d931824..000000000000
--- a/drivers/hwmon/oxp-sensors.c
+++ /dev/null
@@ -1,716 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Platform driver for OneXPlayer, AOKZOE, AYANEO, and OrangePi Handhelds
- * that expose fan reading and control via hwmon sysfs.
- *
- * Old OXP boards have the same DMI strings and they are told apart by
- * the boot cpu vendor (Intel/AMD). Of these older models only AMD is
- * supported.
- *
- * Fan control is provided via pwm interface in the range [0-255].
- * Old AMD boards use [0-100] as range in the EC, the written value is
- * scaled to accommodate for that. Newer boards like the mini PRO and
- * AOKZOE are not scaled but have the same EC layout. Newer models
- * like the 2 and X1 are [0-184] and are scaled to 0-255. OrangePi
- * are [1-244] and scaled to 0-255.
- *
- * Copyright (C) 2022 Joaquín I. Aramendía <samsagax@gmail.com>
- * Copyright (C) 2024 Derek J. Clark <derekjohn.clark@gmail.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/dmi.h>
-#include <linux/hwmon.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/processor.h>
-
-/* Handle ACPI lock mechanism */
-static u32 oxp_mutex;
-
-#define ACPI_LOCK_DELAY_MS 500
-
-static bool lock_global_acpi_lock(void)
-{
- return ACPI_SUCCESS(acpi_acquire_global_lock(ACPI_LOCK_DELAY_MS, &oxp_mutex));
-}
-
-static bool unlock_global_acpi_lock(void)
-{
- return ACPI_SUCCESS(acpi_release_global_lock(oxp_mutex));
-}
-
-enum oxp_board {
- aok_zoe_a1 = 1,
- aya_neo_2,
- aya_neo_air,
- aya_neo_air_1s,
- aya_neo_air_plus_mendo,
- aya_neo_air_pro,
- aya_neo_flip,
- aya_neo_geek,
- aya_neo_kun,
- orange_pi_neo,
- oxp_2,
- oxp_fly,
- oxp_mini_amd,
- oxp_mini_amd_a07,
- oxp_mini_amd_pro,
- oxp_x1,
-};
-
-static enum oxp_board board;
-
-/* Fan reading and PWM */
-#define OXP_SENSOR_FAN_REG 0x76 /* Fan reading is 2 registers long */
-#define OXP_2_SENSOR_FAN_REG 0x58 /* Fan reading is 2 registers long */
-#define OXP_SENSOR_PWM_ENABLE_REG 0x4A /* PWM enable is 1 register long */
-#define OXP_SENSOR_PWM_REG 0x4B /* PWM reading is 1 register long */
-#define PWM_MODE_AUTO 0x00
-#define PWM_MODE_MANUAL 0x01
-
-/* OrangePi fan reading and PWM */
-#define ORANGEPI_SENSOR_FAN_REG 0x78 /* Fan reading is 2 registers long */
-#define ORANGEPI_SENSOR_PWM_ENABLE_REG 0x40 /* PWM enable is 1 register long */
-#define ORANGEPI_SENSOR_PWM_REG 0x38 /* PWM reading is 1 register long */
-
-/* Turbo button takeover function
- * Different boards have different values and EC registers
- * for the same function
- */
-#define OXP_TURBO_SWITCH_REG 0xF1 /* Mini Pro, OneXFly, AOKZOE */
-#define OXP_2_TURBO_SWITCH_REG 0xEB /* OXP2 and X1 */
-#define OXP_MINI_TURBO_SWITCH_REG 0x1E /* Mini AO7 */
-
-#define OXP_MINI_TURBO_TAKE_VAL 0x01 /* Mini AO7 */
-#define OXP_TURBO_TAKE_VAL 0x40 /* All other models */
-
-#define OXP_TURBO_RETURN_VAL 0x00 /* Common return val */
-
-static const struct dmi_system_id dmi_table[] = {
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1 AR07"),
- },
- .driver_data = (void *)aok_zoe_a1,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1 Pro"),
- },
- .driver_data = (void *)aok_zoe_a1,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
- },
- .driver_data = (void *)aya_neo_2,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
- },
- .driver_data = (void *)aya_neo_air,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"),
- },
- .driver_data = (void *)aya_neo_air_1s,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
- },
- .driver_data = (void *)aya_neo_air_plus_mendo,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
- },
- .driver_data = (void *)aya_neo_air_pro,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "FLIP"),
- },
- .driver_data = (void *)aya_neo_flip,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "GEEK"),
- },
- .driver_data = (void *)aya_neo_geek,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"),
- },
- .driver_data = (void *)aya_neo_kun,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "OrangePi"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "NEO-01"),
- },
- .driver_data = (void *)orange_pi_neo,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONE XPLAYER"),
- },
- .driver_data = (void *)oxp_mini_amd,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER 2"),
- },
- .driver_data = (void *)oxp_2,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1"),
- },
- .driver_data = (void *)oxp_fly,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER mini A07"),
- },
- .driver_data = (void *)oxp_mini_amd_a07,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER Mini Pro"),
- },
- .driver_data = (void *)oxp_mini_amd_pro,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1"),
- },
- .driver_data = (void *)oxp_x1,
- },
- {},
-};
-
-/* Helper functions to handle EC read/write */
-static int read_from_ec(u8 reg, int size, long *val)
-{
- int i;
- int ret;
- u8 buffer;
-
- if (!lock_global_acpi_lock())
- return -EBUSY;
-
- *val = 0;
- for (i = 0; i < size; i++) {
- ret = ec_read(reg + i, &buffer);
- if (ret)
- return ret;
- *val <<= i * 8;
- *val += buffer;
- }
-
- if (!unlock_global_acpi_lock())
- return -EBUSY;
-
- return 0;
-}
-
-static int write_to_ec(u8 reg, u8 value)
-{
- int ret;
-
- if (!lock_global_acpi_lock())
- return -EBUSY;
-
- ret = ec_write(reg, value);
-
- if (!unlock_global_acpi_lock())
- return -EBUSY;
-
- return ret;
-}
-
-/* Turbo button toggle functions */
-static int tt_toggle_enable(void)
-{
- u8 reg;
- u8 val;
-
- switch (board) {
- case oxp_mini_amd_a07:
- reg = OXP_MINI_TURBO_SWITCH_REG;
- val = OXP_MINI_TURBO_TAKE_VAL;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- reg = OXP_TURBO_SWITCH_REG;
- val = OXP_TURBO_TAKE_VAL;
- break;
- case oxp_2:
- case oxp_x1:
- reg = OXP_2_TURBO_SWITCH_REG;
- val = OXP_TURBO_TAKE_VAL;
- break;
- default:
- return -EINVAL;
- }
- return write_to_ec(reg, val);
-}
-
-static int tt_toggle_disable(void)
-{
- u8 reg;
- u8 val;
-
- switch (board) {
- case oxp_mini_amd_a07:
- reg = OXP_MINI_TURBO_SWITCH_REG;
- val = OXP_TURBO_RETURN_VAL;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- reg = OXP_TURBO_SWITCH_REG;
- val = OXP_TURBO_RETURN_VAL;
- break;
- case oxp_2:
- case oxp_x1:
- reg = OXP_2_TURBO_SWITCH_REG;
- val = OXP_TURBO_RETURN_VAL;
- break;
- default:
- return -EINVAL;
- }
- return write_to_ec(reg, val);
-}
-
-/* Callbacks for turbo toggle attribute */
-static umode_t tt_toggle_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- switch (board) {
- case aok_zoe_a1:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return attr->mode;
- default:
- break;
- }
- return 0;
-}
-
-static ssize_t tt_toggle_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- int rval;
- bool value;
-
- rval = kstrtobool(buf, &value);
- if (rval)
- return rval;
-
- if (value) {
- rval = tt_toggle_enable();
- } else {
- rval = tt_toggle_disable();
- }
- if (rval)
- return rval;
-
- return count;
-}
-
-static ssize_t tt_toggle_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int retval;
- u8 reg;
- long val;
-
- switch (board) {
- case oxp_mini_amd_a07:
- reg = OXP_MINI_TURBO_SWITCH_REG;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- reg = OXP_TURBO_SWITCH_REG;
- break;
- case oxp_2:
- case oxp_x1:
- reg = OXP_2_TURBO_SWITCH_REG;
- break;
- default:
- return -EINVAL;
- }
-
- retval = read_from_ec(reg, 1, &val);
- if (retval)
- return retval;
-
- return sysfs_emit(buf, "%d\n", !!val);
-}
-
-static DEVICE_ATTR_RW(tt_toggle);
-
-/* PWM enable/disable functions */
-static int oxp_pwm_enable(void)
-{
- switch (board) {
- case orange_pi_neo:
- return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
- default:
- return -EINVAL;
- }
-}
-
-static int oxp_pwm_disable(void)
-{
- switch (board) {
- case orange_pi_neo:
- return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
- default:
- return -EINVAL;
- }
-}
-
-/* Callbacks for hwmon interface */
-static umode_t oxp_ec_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type, u32 attr, int channel)
-{
- switch (type) {
- case hwmon_fan:
- return 0444;
- case hwmon_pwm:
- return 0644;
- default:
- return 0;
- }
-}
-
-static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- int ret;
-
- switch (type) {
- case hwmon_fan:
- switch (attr) {
- case hwmon_fan_input:
- switch (board) {
- case orange_pi_neo:
- return read_from_ec(ORANGEPI_SENSOR_FAN_REG, 2, val);
- case oxp_2:
- case oxp_x1:
- return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- return read_from_ec(OXP_SENSOR_FAN_REG, 2, val);
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- case hwmon_pwm:
- switch (attr) {
- case hwmon_pwm_input:
- switch (board) {
- case orange_pi_neo:
- ret = read_from_ec(ORANGEPI_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- /* scale from range [1-244] */
- *val = ((*val - 1) * 254 / 243) + 1;
- break;
- case oxp_2:
- case oxp_x1:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- /* scale from range [0-184] */
- *val = (*val * 255) / 184;
- break;
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- /* scale from range [0-100] */
- *val = (*val * 255) / 100;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- default:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- break;
- }
- return 0;
- case hwmon_pwm_enable:
- switch (board) {
- case orange_pi_neo:
- return read_from_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, 1, val);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val);
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
-static int oxp_platform_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
-{
- switch (type) {
- case hwmon_pwm:
- switch (attr) {
- case hwmon_pwm_enable:
- if (val == 1)
- return oxp_pwm_enable();
- else if (val == 0)
- return oxp_pwm_disable();
- return -EINVAL;
- case hwmon_pwm_input:
- if (val < 0 || val > 255)
- return -EINVAL;
- switch (board) {
- case orange_pi_neo:
- /* scale to range [1-244] */
- val = ((val - 1) * 243 / 254) + 1;
- return write_to_ec(ORANGEPI_SENSOR_PWM_REG, val);
- case oxp_2:
- case oxp_x1:
- /* scale to range [0-184] */
- val = (val * 184) / 255;
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- /* scale to range [0-100] */
- val = (val * 100) / 255;
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
-/* Known sensors in the OXP EC controllers */
-static const struct hwmon_channel_info * const oxp_platform_sensors[] = {
- HWMON_CHANNEL_INFO(fan,
- HWMON_F_INPUT),
- HWMON_CHANNEL_INFO(pwm,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
- NULL,
-};
-
-static struct attribute *oxp_ec_attrs[] = {
- &dev_attr_tt_toggle.attr,
- NULL
-};
-
-static struct attribute_group oxp_ec_attribute_group = {
- .is_visible = tt_toggle_is_visible,
- .attrs = oxp_ec_attrs,
-};
-
-static const struct attribute_group *oxp_ec_groups[] = {
- &oxp_ec_attribute_group,
- NULL
-};
-
-static const struct hwmon_ops oxp_ec_hwmon_ops = {
- .is_visible = oxp_ec_hwmon_is_visible,
- .read = oxp_platform_read,
- .write = oxp_platform_write,
-};
-
-static const struct hwmon_chip_info oxp_ec_chip_info = {
- .ops = &oxp_ec_hwmon_ops,
- .info = oxp_platform_sensors,
-};
-
-/* Initialization logic */
-static int oxp_platform_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device *hwdev;
-
- hwdev = devm_hwmon_device_register_with_info(dev, "oxpec", NULL,
- &oxp_ec_chip_info, NULL);
-
- return PTR_ERR_OR_ZERO(hwdev);
-}
-
-static struct platform_driver oxp_platform_driver = {
- .driver = {
- .name = "oxp-platform",
- .dev_groups = oxp_ec_groups,
- },
- .probe = oxp_platform_probe,
-};
-
-static struct platform_device *oxp_platform_device;
-
-static int __init oxp_platform_init(void)
-{
- const struct dmi_system_id *dmi_entry;
-
- dmi_entry = dmi_first_match(dmi_table);
- if (!dmi_entry)
- return -ENODEV;
-
- board = (enum oxp_board)(unsigned long)dmi_entry->driver_data;
-
- /*
- * Have to check for AMD processor here because DMI strings are the same
- * between Intel and AMD boards on older OneXPlayer devices, the only way
- * to tell them apart is the CPU. Old Intel boards have an unsupported EC.
- */
- if (board == oxp_mini_amd && boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
- return -ENODEV;
-
- oxp_platform_device =
- platform_create_bundle(&oxp_platform_driver,
- oxp_platform_probe, NULL, 0, NULL, 0);
-
- return PTR_ERR_OR_ZERO(oxp_platform_device);
-}
-
-static void __exit oxp_platform_exit(void)
-{
- platform_device_unregister(oxp_platform_device);
- platform_driver_unregister(&oxp_platform_driver);
-}
-
-MODULE_DEVICE_TABLE(dmi, dmi_table);
-
-module_init(oxp_platform_init);
-module_exit(oxp_platform_exit);
-
-MODULE_AUTHOR("Joaquín Ignacio Aramendía <samsagax@gmail.com>");
-MODULE_DESCRIPTION("Platform driver that handles EC sensors of OneXPlayer devices");
-MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 7a01f2687b4c..740066ceaea3 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
+#include <linux/string_choices.h>
#include "i2c-algo-pcf.h"
@@ -316,7 +317,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
pmsg = &msgs[i];
DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n",
- pmsg->flags & I2C_M_RD ? "read" : "write",
+ str_read_write(pmsg->flags & I2C_M_RD),
pmsg->len, pmsg->addr, i + 1, num);)
ret = pcf_doAddress(adap, pmsg);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 83c88c79afe2..48c5ab832009 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -200,7 +200,7 @@ config I2C_ISMT
config I2C_PIIX4
tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)"
- depends on PCI && HAS_IOPORT
+ depends on PCI && HAS_IOPORT && X86
select I2C_SMBUS
help
If you say yes to this option, support will be included for the Intel
@@ -592,6 +592,17 @@ config I2C_DESIGNWARE_PLATFORM
This driver can also be built as a module. If so, the module
will be called i2c-designware-platform.
+config I2C_DESIGNWARE_AMDISP
+ tristate "Synopsys DesignWare Platform for AMDISP"
+ depends on DRM_AMD_ISP || COMPILE_TEST
+ depends on I2C_DESIGNWARE_CORE
+ help
+ If you say yes to this option, support will be included for the
+ AMDISP Synopsys DesignWare I2C adapter.
+
+ This driver can also be built as a module. If so, the module
+ will be called amd_isp_i2c_designware.
+
config I2C_DESIGNWARE_AMDPSP
bool "AMD PSP I2C semaphore support"
depends on ACPI
@@ -845,7 +856,7 @@ config I2C_LS2X
config I2C_MLXBF
tristate "Mellanox BlueField I2C controller"
- depends on MELLANOX_PLATFORM && ARM64
+ depends on (MELLANOX_PLATFORM && ARM64) || COMPILE_TEST
depends on ACPI
select I2C_SLAVE
help
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index c1252e2b779e..04db855fdfd6 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
i2c-designware-platform-y := i2c-designware-platdrv.o
i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_AMDPSP) += i2c-designware-amdpsp.o
i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
+obj-$(CONFIG_I2C_DESIGNWARE_AMDISP) += i2c-designware-amdisp.o
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-y := i2c-designware-pcidrv.o
obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index ee3b469ddfb9..374fc50bb205 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/string_choices.h>
#include "i2c-at91.h"
@@ -523,7 +524,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
*/
dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
- (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
+ str_read_write(dev->msg->flags & I2C_M_RD), dev->buf_len);
reinit_completion(&dev->cmd_complete);
dev->transfer_status = 0;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 332a0fcca28d..63bc3c8f49d3 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -224,11 +224,6 @@ static void slave_rx_tasklet_fn(unsigned long);
| BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
| BIT(IS_S_RX_THLD_SHIFT))
-static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
-static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable);
-
static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 offset)
{
@@ -264,8 +259,8 @@ static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
}
}
-static void bcm_iproc_i2c_slave_init(
- struct bcm_iproc_i2c_dev *iproc_i2c, bool need_reset)
+static void bcm_iproc_i2c_slave_init(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool need_reset)
{
u32 val;
@@ -276,8 +271,8 @@ static void bcm_iproc_i2c_slave_init(
val |= BIT(CFG_RESET_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- /* wait 100 usec per spec */
- udelay(100);
+ /* wait approximately 100 usec as per spec */
+ usleep_range(100, 200);
/* bring controller out of reset */
val &= ~(BIT(CFG_RESET_SHIFT));
@@ -316,6 +311,19 @@ static void bcm_iproc_i2c_slave_init(
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
}
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool enable)
+{
+ u32 val;
+
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+ if (enable)
+ val |= BIT(CFG_EN_SHIFT);
+ else
+ val &= ~BIT(CFG_EN_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
+}
+
static bool bcm_iproc_i2c_check_slave_status
(struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
{
@@ -438,7 +446,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 val;
u8 value;
-
if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
iproc_i2c->tx_underrun++;
if (iproc_i2c->tx_underrun == 1)
@@ -542,7 +549,7 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
{
struct i2c_msg *msg = iproc_i2c->msg;
- uint32_t val;
+ u32 val;
/* Read valid data from RX FIFO */
while (iproc_i2c->rx_bytes < msg->len) {
@@ -688,8 +695,8 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
val &= ~(BIT(CFG_EN_SHIFT));
iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- /* wait 100 usec per spec */
- udelay(100);
+ /* wait approximately 100 usec as per spec */
+ usleep_range(100, 200);
/* bring controller out of reset */
val &= ~(BIT(CFG_RESET_SHIFT));
@@ -708,19 +715,6 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, 0xffffffff);
}
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable)
-{
- u32 val;
-
- val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
- if (enable)
- val |= BIT(CFG_EN_SHIFT);
- else
- val &= ~BIT(CFG_EN_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
-}
-
static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg)
{
@@ -734,31 +728,31 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
return 0;
case M_CMD_STATUS_LOST_ARB:
- dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
+ dev_err(iproc_i2c->device, "lost bus arbitration\n");
return -EAGAIN;
case M_CMD_STATUS_NACK_ADDR:
- dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
+ dev_err(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
return -ENXIO;
case M_CMD_STATUS_NACK_DATA:
- dev_dbg(iproc_i2c->device, "NAK data\n");
+ dev_err(iproc_i2c->device, "NAK data\n");
return -ENXIO;
case M_CMD_STATUS_TIMEOUT:
- dev_dbg(iproc_i2c->device, "bus timeout\n");
+ dev_err(iproc_i2c->device, "bus timeout\n");
return -ETIMEDOUT;
case M_CMD_STATUS_FIFO_UNDERRUN:
- dev_dbg(iproc_i2c->device, "FIFO under-run\n");
+ dev_err(iproc_i2c->device, "FIFO under-run\n");
return -ENXIO;
case M_CMD_STATUS_RX_FIFO_FULL:
- dev_dbg(iproc_i2c->device, "RX FIFO full\n");
+ dev_err(iproc_i2c->device, "RX FIFO full\n");
return -ETIMEDOUT;
default:
- dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+ dev_err(iproc_i2c->device, "unknown error code=%d\n", val);
/* re-initialize i2c for recovery */
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
@@ -833,7 +827,7 @@ static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
* The i2c quirks are set to enforce this rule.
*/
static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
- struct i2c_msg *msgs, bool process_call)
+ struct i2c_msg *msgs, bool process_call)
{
int i;
u8 addr;
@@ -842,8 +836,8 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg = &msgs[0];
/* check if bus is busy */
- if (!!(iproc_i2c_rd_reg(iproc_i2c,
- M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT))) {
+ if (iproc_i2c_rd_reg(iproc_i2c,
+ M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT)) {
dev_warn(iproc_i2c->device, "bus is busy\n");
return -EBUSY;
}
@@ -970,14 +964,14 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
ret = bcm_iproc_i2c_xfer_internal(iproc_i2c, msgs, process_call);
if (ret) {
- dev_dbg(iproc_i2c->device, "xfer failed\n");
+ dev_err(iproc_i2c->device, "xfer failed\n");
return ret;
}
return num;
}
-static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
+static u32 bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
{
u32 val;
@@ -989,6 +983,63 @@ static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
return val;
}
+static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+
+ if (iproc_i2c->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ iproc_i2c->slave = slave;
+
+ tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
+ (unsigned long)iproc_i2c);
+
+ bcm_iproc_i2c_slave_init(iproc_i2c, false);
+
+ return 0;
+}
+
+static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+ u32 tmp;
+
+ if (!iproc_i2c->slave)
+ return -EINVAL;
+
+ disable_irq(iproc_i2c->irq);
+
+ tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
+ /* disable all slave interrupts */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
+ IE_S_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+
+ /* Erase the slave address programmed */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
+
+ /* flush TX/RX FIFOs */
+ tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
+
+ /* clear all pending slave interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
+
+ iproc_i2c->slave = NULL;
+
+ enable_irq(iproc_i2c->irq);
+
+ return 0;
+}
+
static struct i2c_algorithm bcm_iproc_algo = {
.master_xfer = bcm_iproc_i2c_xfer,
.functionality = bcm_iproc_i2c_functionality,
@@ -1010,21 +1061,18 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
"clock-frequency", &bus_speed);
if (ret < 0) {
dev_info(iproc_i2c->device,
- "unable to interpret clock-frequency DT property\n");
+ "unable to interpret clock-frequency DT property\n");
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
}
- if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ) {
- dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
- bus_speed);
- dev_err(iproc_i2c->device,
- "valid speeds are 100khz and 400khz\n");
- return -EINVAL;
- } else if (bus_speed < I2C_MAX_FAST_MODE_FREQ) {
+ if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ)
+ return dev_err_probe(iproc_i2c->device, -EINVAL,
+ "%d Hz not supported (out of 100-400 kHz range)\n",
+ bus_speed);
+ else if (bus_speed < I2C_MAX_FAST_MODE_FREQ)
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- } else {
+ else
bus_speed = I2C_MAX_FAST_MODE_FREQ;
- }
iproc_i2c->bus_speed = bus_speed;
val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
@@ -1039,9 +1087,9 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
static int bcm_iproc_i2c_probe(struct platform_device *pdev)
{
- int irq, ret = 0;
struct bcm_iproc_i2c_dev *iproc_i2c;
struct i2c_adapter *adap;
+ int irq, ret;
iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
GFP_KERNEL);
@@ -1066,11 +1114,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
ret = of_property_read_u32(iproc_i2c->device->of_node,
"brcm,ape-hsls-addr-mask",
&iproc_i2c->ape_addr_mask);
- if (ret < 0) {
- dev_err(iproc_i2c->device,
- "'brcm,ape-hsls-addr-mask' missing\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ return dev_err_probe(iproc_i2c->device, ret,
+ "'brcm,ape-hsls-addr-mask' missing\n");
spin_lock_init(&iproc_i2c->idm_lock);
@@ -1090,11 +1136,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(iproc_i2c->device, irq,
bcm_iproc_i2c_isr, 0, pdev->name,
iproc_i2c);
- if (ret < 0) {
- dev_err(iproc_i2c->device,
- "unable to request irq %i\n", irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(iproc_i2c->device, ret,
+ "unable to request irq %i\n", irq);
iproc_i2c->irq = irq;
} else {
@@ -1106,9 +1150,8 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
adap = &iproc_i2c->adapter;
i2c_set_adapdata(adap, iproc_i2c);
- snprintf(adap->name, sizeof(adap->name),
- "Broadcom iProc (%s)",
- of_node_full_name(iproc_i2c->device->of_node));
+ snprintf(adap->name, sizeof(adap->name), "Broadcom iProc (%s)",
+ of_node_full_name(iproc_i2c->device->of_node));
adap->algo = &bcm_iproc_algo;
adap->quirks = &bcm_iproc_i2c_quirks;
adap->dev.parent = &pdev->dev;
@@ -1182,62 +1225,6 @@ static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = {
.resume_early = &bcm_iproc_i2c_resume
};
-static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
-{
- struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
-
- if (iproc_i2c->slave)
- return -EBUSY;
-
- if (slave->flags & I2C_CLIENT_TEN)
- return -EAFNOSUPPORT;
-
- iproc_i2c->slave = slave;
-
- tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
- (unsigned long)iproc_i2c);
-
- bcm_iproc_i2c_slave_init(iproc_i2c, false);
- return 0;
-}
-
-static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
-{
- u32 tmp;
- struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
-
- if (!iproc_i2c->slave)
- return -EINVAL;
-
- disable_irq(iproc_i2c->irq);
-
- tasklet_kill(&iproc_i2c->slave_rx_tasklet);
-
- /* disable all slave interrupts */
- tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
- tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
- IE_S_ALL_INTERRUPT_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
-
- /* Erase the slave address programmed */
- tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
- tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
-
- /* flush TX/RX FIFOs */
- tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
- iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
-
- /* clear all pending slave interrupts */
- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
-
- iproc_i2c->slave = NULL;
-
- enable_irq(iproc_i2c->irq);
-
- return 0;
-}
-
static const struct of_device_id bcm_iproc_i2c_of_match[] = {
{
.compatible = "brcm,iproc-i2c",
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 26a36a65521e..606ac071cb80 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -467,7 +467,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
return ret;
/* Alloc and register client IRQ */
- adap->irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL);
+ adap->irq_domain = irq_domain_create_linear(NULL, 1, &irq_domain_simple_ops, NULL);
if (!adap->irq_domain)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 6a909d339681..6a3d4e9e07f4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -551,7 +551,8 @@ out:
static u32 i2c_davinci_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
static void terminate_read(struct davinci_i2c_dev *dev)
diff --git a/drivers/i2c/busses/i2c-designware-amdisp.c b/drivers/i2c/busses/i2c-designware-amdisp.c
new file mode 100644
index 000000000000..ad6f08338124
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-amdisp.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Based on Synopsys DesignWare I2C adapter driver.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "i2c-designware-core.h"
+
+#define DRV_NAME "amd_isp_i2c_designware"
+#define AMD_ISP_I2C_INPUT_CLK 100 /* Mhz */
+
+static void amd_isp_dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *i2c_dev)
+{
+ pm_runtime_disable(i2c_dev->dev);
+
+ if (i2c_dev->shared_with_punit)
+ pm_runtime_put_noidle(i2c_dev->dev);
+}
+
+static inline u32 amd_isp_dw_i2c_get_clk_rate(struct dw_i2c_dev *i2c_dev)
+{
+ return AMD_ISP_I2C_INPUT_CLK * 1000;
+}
+
+static int amd_isp_dw_i2c_plat_probe(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *isp_i2c_dev;
+ struct i2c_adapter *adap;
+ int ret;
+
+ isp_i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*isp_i2c_dev), GFP_KERNEL);
+ if (!isp_i2c_dev)
+ return -ENOMEM;
+ isp_i2c_dev->dev = &pdev->dev;
+
+ pdev->dev.init_name = DRV_NAME;
+
+ /*
+ * Use the polling mode to send/receive the data, because
+ * no IRQ connection from ISP I2C
+ */
+ isp_i2c_dev->flags |= ACCESS_POLLING;
+ platform_set_drvdata(pdev, isp_i2c_dev);
+
+ isp_i2c_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(isp_i2c_dev->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(isp_i2c_dev->base),
+ "failed to get IOMEM resource\n");
+
+ isp_i2c_dev->get_clk_rate_khz = amd_isp_dw_i2c_get_clk_rate;
+ ret = i2c_dw_fw_parse_and_configure(isp_i2c_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to parse i2c dw fwnode and configure\n");
+
+ i2c_dw_configure(isp_i2c_dev);
+
+ adap = &isp_i2c_dev->adapter;
+ adap->owner = THIS_MODULE;
+ ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
+ adap->dev.of_node = pdev->dev.of_node;
+ /* use dynamically allocated adapter id */
+ adap->nr = -1;
+
+ if (isp_i2c_dev->flags & ACCESS_NO_IRQ_SUSPEND)
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE);
+ else
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND);
+
+ device_enable_async_suspend(&pdev->dev);
+
+ if (isp_i2c_dev->shared_with_punit)
+ pm_runtime_get_noresume(&pdev->dev);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = i2c_dw_probe(isp_i2c_dev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "i2c_dw_probe failed\n");
+ goto error_release_rpm;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+
+error_release_rpm:
+ amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev);
+ pm_runtime_put_sync(&pdev->dev);
+ return ret;
+}
+
+static void amd_isp_dw_i2c_plat_remove(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *isp_i2c_dev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ i2c_del_adapter(&isp_i2c_dev->adapter);
+
+ i2c_dw_disable(isp_i2c_dev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev);
+}
+
+static int amd_isp_dw_i2c_plat_prepare(struct device *dev)
+{
+ /*
+ * If the ACPI companion device object is present for this device, it
+ * may be accessed during suspend and resume of other devices via I2C
+ * operation regions, so tell the PM core and middle layers to avoid
+ * skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(dev);
+}
+
+static int amd_isp_dw_i2c_plat_runtime_suspend(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ if (i_dev->shared_with_punit)
+ return 0;
+
+ i2c_dw_disable(i_dev);
+ i2c_dw_prepare_clk(i_dev, false);
+
+ return 0;
+}
+
+static int amd_isp_dw_i2c_plat_suspend(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!i_dev)
+ return -ENODEV;
+
+ ret = amd_isp_dw_i2c_plat_runtime_suspend(dev);
+ if (!ret)
+ i2c_mark_adapter_suspended(&i_dev->adapter);
+
+ return ret;
+}
+
+static int amd_isp_dw_i2c_plat_runtime_resume(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ if (!i_dev)
+ return -ENODEV;
+
+ if (!i_dev->shared_with_punit)
+ i2c_dw_prepare_clk(i_dev, true);
+ if (i_dev->init)
+ i_dev->init(i_dev);
+
+ return 0;
+}
+
+static int amd_isp_dw_i2c_plat_resume(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ amd_isp_dw_i2c_plat_runtime_resume(dev);
+ i2c_mark_adapter_resumed(&i_dev->adapter);
+
+ return 0;
+}
+
+static const struct dev_pm_ops amd_isp_dw_i2c_dev_pm_ops = {
+ .prepare = pm_sleep_ptr(amd_isp_dw_i2c_plat_prepare),
+ LATE_SYSTEM_SLEEP_PM_OPS(amd_isp_dw_i2c_plat_suspend, amd_isp_dw_i2c_plat_resume)
+ RUNTIME_PM_OPS(amd_isp_dw_i2c_plat_runtime_suspend, amd_isp_dw_i2c_plat_runtime_resume, NULL)
+};
+
+/* Work with hotplug and coldplug */
+MODULE_ALIAS("platform:amd_isp_i2c_designware");
+
+static struct platform_driver amd_isp_dw_i2c_driver = {
+ .probe = amd_isp_dw_i2c_plat_probe,
+ .remove = amd_isp_dw_i2c_plat_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = pm_ptr(&amd_isp_dw_i2c_dev_pm_ops),
+ },
+};
+module_platform_driver(amd_isp_dw_i2c_driver);
+
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter in AMD ISP");
+MODULE_IMPORT_NS("I2C_DW");
+MODULE_IMPORT_NS("I2C_DW_COMMON");
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vengutta@amd.com>");
+MODULE_AUTHOR("Pratap Nirujogi <pratap.nirujogi@amd.com>");
+MODULE_AUTHOR("Bin Du <bin.du@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 8eb7bd640f8d..5b1e8f74c4ac 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -572,8 +572,10 @@ u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev)
* Clock is not necessary if we got LCNT/HCNT values directly from
* the platform code.
*/
- if (WARN_ON_ONCE(!dev->get_clk_rate_khz))
+ if (!dev->get_clk_rate_khz) {
+ dev_dbg_once(dev->dev, "Callback get_clk_rate_khz() is not defined\n");
return 0;
+ }
return dev->get_clk_rate_khz(dev);
}
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index d6e1ee935399..879719e91df2 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -34,7 +34,7 @@
static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
{
- return clk_get_rate(dev->clk) / KILO;
+ return clk_get_rate(dev->clk) / HZ_PER_KHZ;
}
#ifdef CONFIG_OF
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 5cd4a5f7a472..b936a240db0a 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -96,7 +96,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
i2c_dw_disable(dev);
synchronize_irq(dev->irq);
dev->slave = NULL;
- pm_runtime_put(dev->dev);
+ pm_runtime_put_sync_suspend(dev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 48e1af544b75..a7f89946dad4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1180,7 +1180,7 @@ static void i801_probe_optional_targets(struct i801_priv *priv)
#ifdef CONFIG_I2C_I801_MUX
if (!priv->mux_pdev)
#endif
- i2c_register_spd(&priv->adapter);
+ i2c_register_spd_write_enable(&priv->adapter);
}
#else
static void __init input_apanel_init(void) {}
@@ -1283,7 +1283,7 @@ static int i801_notifier_call(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
/* Call i2c_register_spd for muxed child segments */
- i2c_register_spd(to_i2c_adapter(dev));
+ i2c_register_spd_write_enable(to_i2c_adapter(dev));
return NOTIFY_OK;
}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 9e5d454d8318..de01dfecb16e 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1711,11 +1711,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return irq;
+ return dev_err_probe(&pdev->dev, irq, "can't get IRQ\n");
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
- return PTR_ERR(base);
+ return dev_err_probe(&pdev->dev, PTR_ERR(base), "can't get IO memory\n");
phy_addr = (dma_addr_t)res->start;
i2c_imx = devm_kzalloc(&pdev->dev, sizeof(*i2c_imx), GFP_KERNEL);
@@ -1810,13 +1810,15 @@ static int i2c_imx_probe(struct platform_device *pdev)
*/
ret = i2c_imx_dma_request(i2c_imx, phy_addr);
if (ret) {
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ dev_err_probe(&pdev->dev, ret, "can't get DMA channels\n");
goto clk_notifier_unregister;
- else if (ret == -ENODEV)
+ } else if (ret == -ENODEV) {
dev_dbg(&pdev->dev, "Only use PIO mode\n");
- else
+ } else {
dev_warn(&pdev->dev, "Failed to setup DMA (%pe), only use PIO mode\n",
ERR_PTR(ret));
+ }
}
/* Add I2C adapter */
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index c93c02aa6ac8..7aaefb21416a 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -933,7 +933,7 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_region(pdev, SMBBAR, ismt_driver.name);
+ err = pcim_request_region(pdev, SMBBAR, ismt_driver.name);
if (err) {
dev_err(&pdev->dev,
"Failed to request SMBus region 0x%lx-0x%lx\n",
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 6943a0de860a..ccd13c4fb83e 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -442,8 +442,13 @@ static int i2c_lpc2k_suspend(struct device *dev)
static int i2c_lpc2k_resume(struct device *dev)
{
struct lpc2k_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
- clk_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock.\n");
+ return ret;
+ }
i2c_lpc2k_reset(i2c);
return 0;
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 5db73429125c..492bf4c34722 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -76,6 +76,8 @@
#define CORE_I2C_FREQ (0x14)
#define CORE_I2C_GLITCHREG (0x18)
#define CORE_I2C_SLAVE1_ADDR (0x1c)
+#define CORE_I2C_SMBUS_MSG_WR (0x0)
+#define CORE_I2C_SMBUS_MSG_RD (0x1)
#define PCLK_DIV_960 (CTRL_CR2)
#define PCLK_DIV_256 (0)
@@ -424,9 +426,109 @@ static u32 mchp_corei2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+static int mchp_corei2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
+ char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct i2c_msg msgs[2];
+ struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
+ u8 tx_buf[I2C_SMBUS_BLOCK_MAX + 2];
+ u8 rx_buf[I2C_SMBUS_BLOCK_MAX + 1];
+ int num_msgs = 1;
+
+ msgs[CORE_I2C_SMBUS_MSG_WR].addr = addr;
+ msgs[CORE_I2C_SMBUS_MSG_WR].flags = 0;
+
+ if (read_write == I2C_SMBUS_READ && size <= I2C_SMBUS_BYTE)
+ msgs[CORE_I2C_SMBUS_MSG_WR].flags = I2C_M_RD;
+
+ if (read_write == I2C_SMBUS_WRITE && size <= I2C_SMBUS_WORD_DATA)
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = size;
+
+ if (read_write == I2C_SMBUS_WRITE && size > I2C_SMBUS_BYTE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command;
+ }
+
+ if (read_write == I2C_SMBUS_READ && size >= I2C_SMBUS_BYTE_DATA) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command;
+ msgs[CORE_I2C_SMBUS_MSG_RD].addr = addr;
+ msgs[CORE_I2C_SMBUS_MSG_RD].flags = I2C_M_RD;
+ num_msgs = 2;
+ }
+
+ if (read_write == I2C_SMBUS_READ && size > I2C_SMBUS_QUICK)
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = 1;
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = NULL;
+ return 0;
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE)
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = &command;
+ else
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = &data->byte;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->byte;
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = &data->byte;
+ }
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->word & 0xFF;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[2] = (data->word >> 8) & 0xFF;
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf;
+ }
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ int data_len;
+
+ data_len = data->block[0];
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = data_len + 2;
+ for (int i = 0; i <= data_len; i++)
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[i + 1] = data->block[i];
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = I2C_SMBUS_BLOCK_MAX + 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mchp_corei2c_xfer(&idev->adapter, msgs, num_msgs);
+ if (read_write == I2C_SMBUS_WRITE || size <= I2C_SMBUS_BYTE_DATA)
+ return 0;
+
+ switch (size) {
+ case I2C_SMBUS_WORD_DATA:
+ data->word = (rx_buf[0] | (rx_buf[1] << 8));
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (rx_buf[0] > I2C_SMBUS_BLOCK_MAX)
+ rx_buf[0] = I2C_SMBUS_BLOCK_MAX;
+ /* As per protocol first member of block is size of the block. */
+ for (int i = 0; i <= rx_buf[0]; i++)
+ data->block[i] = rx_buf[i];
+ break;
+ }
+
+ return 0;
+}
+
static const struct i2c_algorithm mchp_corei2c_algo = {
.master_xfer = mchp_corei2c_xfer,
.functionality = mchp_corei2c_func,
+ .smbus_xfer = mchp_corei2c_smbus_xfer,
};
static int mchp_corei2c_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 280dde53d7f3..8345f7e6385d 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
/* Defines what functionality is present. */
#define MLXBF_I2C_FUNC_SMBUS_BLOCK \
@@ -197,6 +198,7 @@
#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
+#define MLXBF_I2C_MASK_32 GENMASK(31, 0)
#define MLXBF_I2C_MST_ADDR_OFFSET 0x200
@@ -223,7 +225,7 @@
#define MLXBF_I2C_MASTER_ENABLE \
(MLXBF_I2C_MASTER_LOCK_BIT | MLXBF_I2C_MASTER_BUSY_BIT | \
- MLXBF_I2C_MASTER_START_BIT | MLXBF_I2C_MASTER_STOP_BIT)
+ MLXBF_I2C_MASTER_START_BIT)
#define MLXBF_I2C_MASTER_ENABLE_WRITE \
(MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_WRITE_BIT)
@@ -337,6 +339,7 @@ enum {
MLXBF_I2C_F_SMBUS_BLOCK = BIT(5),
MLXBF_I2C_F_SMBUS_PEC = BIT(6),
MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7),
+ MLXBF_I2C_F_WRITE_WITHOUT_STOP = BIT(8),
};
/* Mellanox BlueField chip type. */
@@ -637,16 +640,19 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
}
static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
- u8 len, u8 block_en, u8 pec_en, bool read)
+ u8 len, u8 block_en, u8 pec_en, bool read,
+ bool stop)
{
- u32 command;
+ u32 command = 0;
/* Set Master GW control word. */
+ if (stop)
+ command |= MLXBF_I2C_MASTER_STOP_BIT;
if (read) {
- command = MLXBF_I2C_MASTER_ENABLE_READ;
+ command |= MLXBF_I2C_MASTER_ENABLE_READ;
command |= rol32(len, MLXBF_I2C_MASTER_READ_SHIFT);
} else {
- command = MLXBF_I2C_MASTER_ENABLE_WRITE;
+ command |= MLXBF_I2C_MASTER_ENABLE_WRITE;
command |= rol32(len, MLXBF_I2C_MASTER_WRITE_SHIFT);
}
command |= rol32(slave, MLXBF_I2C_MASTER_SLV_ADDR_SHIFT);
@@ -681,8 +687,10 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
u8 op_idx, data_idx, data_len, write_len, read_len;
struct mlxbf_i2c_smbus_operation *operation;
u8 read_en, write_en, block_en, pec_en;
- u8 slave, flags, addr;
+ bool stop_after_write = true;
+ u8 slave, addr;
u8 *read_buf;
+ u32 flags;
u32 bits;
int ret;
@@ -754,7 +762,16 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
memcpy(data_desc + data_idx,
operation->buffer, operation->length);
data_idx += operation->length;
+
+ /*
+ * The stop condition can be skipped when writing on the bus
+ * to implement a repeated start condition on the next read
+ * as required for several SMBus and I2C operations.
+ */
+ if (flags & MLXBF_I2C_F_WRITE_WITHOUT_STOP)
+ stop_after_write = false;
}
+
/*
* We assume that read operations are performed only once per
* SMBus transaction. *TBD* protect this statement so it won't
@@ -780,7 +797,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
if (write_en) {
ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en,
- pec_en, 0);
+ pec_en, 0, stop_after_write);
if (ret)
goto out_unlock;
}
@@ -790,7 +807,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1,
MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en,
- pec_en, 1);
+ pec_en, 1, true);
if (!ret) {
/* Get Master GW data descriptor. */
mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1,
@@ -896,6 +913,9 @@ mlxbf_i2c_smbus_i2c_block_func(struct mlxbf_i2c_smbus_request *request,
request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
request->operation[0].buffer = command;
+ if (read)
+ request->operation[0].flags |= MLXBF_I2C_F_WRITE_WITHOUT_STOP;
+
/*
* As specified in the standard, the max number of bytes to read/write
* per block operation is 32 bytes. In Golan code, the controller can
@@ -1063,7 +1083,7 @@ static u32 mlxbf_i2c_get_ticks(struct mlxbf_i2c_priv *priv, u64 nanoseconds,
* Frequency
*/
frequency = priv->frequency;
- ticks = (nanoseconds * frequency) / MLXBF_I2C_FREQUENCY_1GHZ;
+ ticks = div_u64(nanoseconds * frequency, MLXBF_I2C_FREQUENCY_1GHZ);
/*
* The number of ticks is rounded down and if minimum is equal to 1
* then add one tick.
@@ -1130,7 +1150,8 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
- timer = timings->timeout;
+ timer = mlxbf_i2c_set_timer(priv, timings->timeout, false,
+ MLXBF_I2C_MASK_32, MLXBF_I2C_SHIFT_0);
writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
}
@@ -1140,11 +1161,7 @@ enum mlxbf_i2c_timings_config {
MLXBF_I2C_TIMING_CONFIG_1000KHZ,
};
-/*
- * Note that the mlxbf_i2c_timings->timeout value is not related to the
- * bus frequency, it is impacted by the time it takes the driver to
- * complete data transmission before transaction abort.
- */
+/* Timing values are in nanoseconds */
static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
[MLXBF_I2C_TIMING_CONFIG_100KHZ] = {
.scl_high = 4810,
@@ -1159,8 +1176,8 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
.scl_fall = 50,
.hold_data = 300,
.buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .thigh_max = 50000,
+ .timeout = 35000000
},
[MLXBF_I2C_TIMING_CONFIG_400KHZ] = {
.scl_high = 1011,
@@ -1175,24 +1192,24 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
.scl_fall = 50,
.hold_data = 300,
.buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .thigh_max = 50000,
+ .timeout = 35000000
},
[MLXBF_I2C_TIMING_CONFIG_1000KHZ] = {
- .scl_high = 600,
- .scl_low = 1300,
+ .scl_high = 383,
+ .scl_low = 460,
.hold_start = 600,
- .setup_start = 600,
- .setup_stop = 600,
- .setup_data = 100,
+ .setup_start = 260,
+ .setup_stop = 260,
+ .setup_data = 50,
.sda_rise = 50,
.sda_fall = 50,
.scl_rise = 50,
.scl_fall = 50,
.hold_data = 300,
- .buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .buf = 500,
+ .thigh_max = 50000,
+ .timeout = 35000000
}
};
@@ -1443,9 +1460,8 @@ static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_
* and PadFrequency, respectively.
*/
core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
- core_frequency /= (++core_r) * (++core_od);
- return core_frequency;
+ return div_u64(core_frequency, (++core_r) * (++core_od));
}
static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
@@ -1474,9 +1490,8 @@ static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_r
* and PadFrequency, respectively.
*/
corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
- corepll_frequency /= (++core_r) * (++core_od);
- return corepll_frequency;
+ return div_u64(corepll_frequency, (++core_r) * (++core_od));
}
static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev,
@@ -2038,21 +2053,21 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
read ? &data->byte : &command, read,
pec);
dev_dbg(&adap->dev, "smbus %s byte, slave 0x%02x.\n",
- read ? "read" : "write", addr);
+ str_read_write(read), addr);
break;
case I2C_SMBUS_BYTE_DATA:
mlxbf_i2c_smbus_data_byte_func(&request, &command, &data->byte,
read, pec);
dev_dbg(&adap->dev, "smbus %s byte data at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", command, addr);
+ str_read_write(read), command, addr);
break;
case I2C_SMBUS_WORD_DATA:
mlxbf_i2c_smbus_data_word_func(&request, &command,
(u8 *)&data->word, read, pec);
dev_dbg(&adap->dev, "smbus %s word data at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", command, addr);
+ str_read_write(read), command, addr);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
@@ -2060,7 +2075,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
mlxbf_i2c_smbus_i2c_block_func(&request, &command, data->block,
&byte_cnt, read, pec);
dev_dbg(&adap->dev, "i2c %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", byte_cnt, command, addr);
+ str_read_write(read), byte_cnt, command, addr);
break;
case I2C_SMBUS_BLOCK_DATA:
@@ -2068,7 +2083,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
mlxbf_i2c_smbus_block_func(&request, &command, data->block,
&byte_cnt, read, pec);
dev_dbg(&adap->dev, "smbus %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", byte_cnt, command, addr);
+ str_read_write(read), byte_cnt, command, addr);
break;
case I2C_FUNC_SMBUS_PROC_CALL:
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index de713b5747fe..892e2d2988a7 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -1115,14 +1115,10 @@ static void npcm_i2c_master_abort(struct npcm_i2c *bus)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type)
{
- u8 slave_add;
-
if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10)
dev_err(bus->dev, "get slave: try to use more than 2 SA not supported\n");
- slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]);
-
- return slave_add;
+ return ioread8(bus->reg + npcm_i2caddr[addr_type]);
}
static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add)
@@ -2178,10 +2174,14 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
/* Check HW is OK: SDA and SCL should be high at this point. */
if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
- dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
- dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
- npcm_i2c_get_SCL(&bus->adap));
- return -ENXIO;
+ dev_warn(bus->dev, " I2C%d SDA=%d SCL=%d, attempting to recover\n", bus->num,
+ npcm_i2c_get_SDA(&bus->adap), npcm_i2c_get_SCL(&bus->adap));
+ if (npcm_i2c_recovery_tgclk(&bus->adap)) {
+ dev_err(bus->dev, "I2C%d init fail: SDA=%d SCL=%d\n",
+ bus->num, npcm_i2c_get_SDA(&bus->adap),
+ npcm_i2c_get_SCL(&bus->adap));
+ return -ENXIO;
+ }
}
npcm_i2c_int_enable(bus, true);
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index baf6b27f3752..93a49e4637ec 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -135,6 +135,32 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
}
+static void octeon_i2c_block_enable(struct octeon_i2c *i2c)
+{
+ u64 mode;
+
+ if (i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c))
+ return;
+
+ i2c->block_enabled = true;
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ mode |= TWSX_MODE_BLOCK_MODE;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+}
+
+static void octeon_i2c_block_disable(struct octeon_i2c *i2c)
+{
+ u64 mode;
+
+ if (!i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c))
+ return;
+
+ i2c->block_enabled = false;
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ mode &= ~TWSX_MODE_BLOCK_MODE;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+}
+
/**
* octeon_i2c_hlc_wait - wait for an HLC operation to complete
* @i2c: The struct octeon_i2c
@@ -281,6 +307,7 @@ static int octeon_i2c_start(struct octeon_i2c *i2c)
u8 stat;
octeon_i2c_hlc_disable(i2c);
+ octeon_i2c_block_disable(i2c);
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
ret = octeon_i2c_wait(i2c);
@@ -605,6 +632,125 @@ err:
}
/**
+ * octeon_i2c_hlc_block_comp_read - high-level-controller composite block read
+ * @i2c: The struct octeon_i2c
+ * @msgs: msg[0] contains address, place read data into msg[1]
+ *
+ * i2c core command is constructed and written into the SW_TWSI register.
+ * The execution of the command will result in requested data being
+ * placed into a FIFO buffer, ready to be read.
+ * Used in the case where the i2c xfer is for greater than 8 bytes of read data.
+ *
+ * Returns: 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_hlc_block_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ int ret;
+ u16 len, i;
+ u64 cmd;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_block_enable(i2c);
+
+ /* Write (size - 1) into block control register */
+ len = msgs[1].len - 1;
+ octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c));
+
+ /* Prepare core command */
+ cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ /* Send core command */
+ ret = octeon_i2c_hlc_read_cmd(i2c, msgs[0], cmd);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
+ if ((cmd & SW_TWSI_R) == 0) {
+ octeon_i2c_block_disable(i2c);
+ return octeon_i2c_check_status(i2c, false);
+ }
+
+ /* read data in FIFO */
+ octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR,
+ i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c));
+ for (i = 0; i <= len; i += 8) {
+ /* Byte-swap FIFO data and copy into msg buffer */
+ __be64 rd = cpu_to_be64(__raw_readq(i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c)));
+
+ memcpy(&msgs[1].buf[i], &rd, min(8, msgs[1].len - i));
+ }
+
+err:
+ octeon_i2c_block_disable(i2c);
+ return ret;
+}
+
+/**
+ * octeon_i2c_hlc_block_comp_write - high-level-controller composite block write
+ * @i2c: The struct octeon_i2c
+ * @msgs: msg[0] contains address, msg[1] contains data to be written
+ *
+ * i2c core command is constructed and write data is written into the FIFO buffer.
+ * The execution of the command will result in HW write, using the data in FIFO.
+ * Used in the case where the i2c xfer is for greater than 8 bytes of write data.
+ *
+ * Returns: 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_hlc_block_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ bool set_ext;
+ int ret;
+ u16 len, i;
+ u64 cmd, ext = 0;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_block_enable(i2c);
+
+ /* Write (size - 1) into block control register */
+ len = msgs[1].len - 1;
+ octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c));
+
+ /* Prepare core command */
+ cmd = SW_TWSI_V | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ /* Set parameters for extended message (if required) */
+ set_ext = octeon_i2c_hlc_ext(i2c, msgs[0], &cmd, &ext);
+
+ /* Write msg into FIFO buffer */
+ octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR,
+ i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c));
+ for (i = 0; i <= len; i += 8) {
+ __be64 buf = 0;
+
+ /* Copy 8 bytes or remaining bytes from message buffer */
+ memcpy(&buf, &msgs[1].buf[i], min(8, msgs[1].len - i));
+
+ /* Byte-swap message data and write into FIFO */
+ buf = cpu_to_be64(buf);
+ octeon_i2c_writeq_flush((u64)buf, i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c));
+ }
+ if (set_ext)
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
+
+ /* Send command to core (send data in FIFO) */
+ ret = octeon_i2c_hlc_cmd_send(i2c, cmd);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
+ if ((cmd & SW_TWSI_R) == 0) {
+ octeon_i2c_block_disable(i2c);
+ return octeon_i2c_check_status(i2c, false);
+ }
+
+err:
+ octeon_i2c_block_disable(i2c);
+ return ret;
+}
+
+/**
* octeon_i2c_xfer - The driver's xfer function
* @adap: Pointer to the i2c_adapter structure
* @msgs: Pointer to the messages to be processed
@@ -630,13 +776,21 @@ int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if ((msgs[0].flags & I2C_M_RD) == 0 &&
(msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
msgs[0].len > 0 && msgs[0].len <= 2 &&
- msgs[1].len > 0 && msgs[1].len <= 8 &&
+ msgs[1].len > 0 &&
msgs[0].addr == msgs[1].addr) {
- if (msgs[1].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_comp_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_comp_write(i2c, msgs);
- goto out;
+ if (msgs[1].len <= 8) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_comp_write(i2c, msgs);
+ goto out;
+ } else if (msgs[1].len <= 1024 && OCTEON_REG_BLOCK_CTL(i2c)) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_block_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_block_comp_write(i2c, msgs);
+ goto out;
+ }
}
}
}
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index b265e21189a1..32a44f2d6274 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -96,18 +96,28 @@ struct octeon_i2c_reg_offset {
unsigned int twsi_int;
unsigned int sw_twsi_ext;
unsigned int mode;
+ unsigned int block_ctl;
+ unsigned int block_sts;
+ unsigned int block_fifo;
};
#define OCTEON_REG_SW_TWSI(x) ((x)->roff.sw_twsi)
#define OCTEON_REG_TWSI_INT(x) ((x)->roff.twsi_int)
#define OCTEON_REG_SW_TWSI_EXT(x) ((x)->roff.sw_twsi_ext)
#define OCTEON_REG_MODE(x) ((x)->roff.mode)
+#define OCTEON_REG_BLOCK_CTL(x) ((x)->roff.block_ctl)
+#define OCTEON_REG_BLOCK_STS(x) ((x)->roff.block_sts)
+#define OCTEON_REG_BLOCK_FIFO(x) ((x)->roff.block_fifo)
-/* Set REFCLK_SRC and HS_MODE in TWSX_MODE register */
+/* TWSX_MODE register */
#define TWSX_MODE_REFCLK_SRC BIT(4)
+#define TWSX_MODE_BLOCK_MODE BIT(2)
#define TWSX_MODE_HS_MODE BIT(0)
#define TWSX_MODE_HS_MASK (TWSX_MODE_REFCLK_SRC | TWSX_MODE_HS_MODE)
+/* TWSX_BLOCK_STS register */
+#define TWSX_BLOCK_STS_RESET_PTR BIT(0)
+
/* Set BUS_MON_RST to reset bus monitor */
#define BUS_MON_RST_MASK BIT(3)
@@ -123,6 +133,7 @@ struct octeon_i2c {
void __iomem *twsi_base;
struct device *dev;
bool hlc_enabled;
+ bool block_enabled;
bool broken_irq_mode;
bool broken_irq_check;
void (*int_enable)(struct octeon_i2c *);
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index bd128ab2e2eb..f4eca44ed183 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -5,22 +5,24 @@
* SMBus host driver for PA Semi PWRficient
*/
-#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
#include <linux/sched.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/io.h>
+#include <linux/stddef.h>
#include "i2c-pasemi-core.h"
/* Register offsets */
#define REG_MTXFIFO 0x00
#define REG_MRXFIFO 0x04
+#define REG_XFSTA 0x0c
#define REG_SMSTA 0x14
#define REG_IMASK 0x18
#define REG_CTL 0x1c
@@ -52,6 +54,12 @@
#define CTL_UJM BIT(8)
#define CTL_CLK_M GENMASK(7, 0)
+/*
+ * The hardware (supposedly) has a 25ms timeout for clock stretching, thus
+ * use 100ms here which should be plenty.
+ */
+#define PASEMI_TRANSFER_TIMEOUT_MS 100
+
static inline void reg_write(struct pasemi_smbus *smbus, int reg, int val)
{
dev_dbg(smbus->dev, "smbus write reg %x val %08x\n", reg, val);
@@ -71,7 +79,7 @@ static inline int reg_read(struct pasemi_smbus *smbus, int reg)
static void pasemi_reset(struct pasemi_smbus *smbus)
{
- u32 val = (CTL_MTR | CTL_MRR | (smbus->clk_div & CTL_CLK_M));
+ u32 val = (CTL_MTR | CTL_MRR | CTL_UJM | (smbus->clk_div & CTL_CLK_M));
if (smbus->hw_rev >= 6)
val |= CTL_EN;
@@ -80,43 +88,102 @@ static void pasemi_reset(struct pasemi_smbus *smbus)
reinit_completion(&smbus->irq_completion);
}
-static void pasemi_smb_clear(struct pasemi_smbus *smbus)
+static int pasemi_smb_clear(struct pasemi_smbus *smbus)
{
unsigned int status;
+ int ret;
+
+ /* First wait for the bus to go idle */
+ ret = readx_poll_timeout(ioread32, smbus->ioaddr + REG_SMSTA,
+ status, !(status & (SMSTA_XIP | SMSTA_JAM)),
+ USEC_PER_MSEC,
+ USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS);
+
+ if (ret < 0) {
+ dev_err(smbus->dev, "Bus is still stuck (status 0x%08x xfstatus 0x%08x)\n",
+ status, reg_read(smbus, REG_XFSTA));
+ return -EIO;
+ }
+
+ /* If any badness happened or there is data in the FIFOs, reset the FIFOs */
+ if ((status & (SMSTA_MRNE | SMSTA_JMD | SMSTA_MTO | SMSTA_TOM | SMSTA_MTN | SMSTA_MTA)) ||
+ !(status & SMSTA_MTE)) {
+ dev_warn(smbus->dev, "Issuing reset due to status 0x%08x (xfstatus 0x%08x)\n",
+ status, reg_read(smbus, REG_XFSTA));
+ pasemi_reset(smbus);
+ }
- status = reg_read(smbus, REG_SMSTA);
+ /* Clear the flags */
reg_write(smbus, REG_SMSTA, status);
+
+ return 0;
}
static int pasemi_smb_waitready(struct pasemi_smbus *smbus)
{
- int timeout = 100;
unsigned int status;
if (smbus->use_irq) {
reinit_completion(&smbus->irq_completion);
reg_write(smbus, REG_IMASK, SMSTA_XEN | SMSTA_MTN);
- wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(100));
+ int ret = wait_for_completion_timeout(
+ &smbus->irq_completion,
+ msecs_to_jiffies(PASEMI_TRANSFER_TIMEOUT_MS));
reg_write(smbus, REG_IMASK, 0);
status = reg_read(smbus, REG_SMSTA);
+
+ if (ret < 0) {
+ dev_err(smbus->dev,
+ "Completion wait failed with %d, status 0x%08x\n",
+ ret, status);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(smbus->dev, "Timeout, status 0x%08x\n", status);
+ return -ETIME;
+ }
} else {
- status = reg_read(smbus, REG_SMSTA);
- while (!(status & SMSTA_XEN) && timeout--) {
- msleep(1);
- status = reg_read(smbus, REG_SMSTA);
+ int ret = readx_poll_timeout(
+ ioread32, smbus->ioaddr + REG_SMSTA,
+ status, status & SMSTA_XEN,
+ USEC_PER_MSEC,
+ USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS);
+
+ if (ret < 0) {
+ dev_err(smbus->dev, "Timeout, status 0x%08x\n", status);
+ return -ETIME;
}
}
- /* Got NACK? */
- if (status & SMSTA_MTN)
- return -ENXIO;
+ /* Controller timeout? */
+ if (status & SMSTA_TOM) {
+ dev_err(smbus->dev, "Controller timeout, status 0x%08x\n", status);
+ return -EIO;
+ }
- if (timeout < 0) {
- dev_warn(smbus->dev, "Timeout, status 0x%08x\n", status);
- reg_write(smbus, REG_SMSTA, status);
+ /* Peripheral timeout? */
+ if (status & SMSTA_MTO) {
+ dev_err(smbus->dev, "Peripheral timeout, status 0x%08x\n", status);
return -ETIME;
}
+ /* Still stuck in a transaction? */
+ if (status & SMSTA_XIP) {
+ dev_err(smbus->dev, "Bus stuck, status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Arbitration loss? */
+ if (status & SMSTA_MTA) {
+ dev_err(smbus->dev, "Arbitration loss, status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Got NACK? */
+ if (status & SMSTA_MTN) {
+ dev_err(smbus->dev, "NACK, status 0x%08x\n", status);
+ return -ENXIO;
+ }
+
/* Clear XEN */
reg_write(smbus, REG_SMSTA, SMSTA_XEN);
@@ -177,9 +244,9 @@ static int pasemi_i2c_xfer(struct i2c_adapter *adapter,
struct pasemi_smbus *smbus = adapter->algo_data;
int ret, i;
- pasemi_smb_clear(smbus);
-
- ret = 0;
+ ret = pasemi_smb_clear(smbus);
+ if (ret)
+ return ret;
for (i = 0; i < num && !ret; i++)
ret = pasemi_i2c_xfer_msg(adapter, &msgs[i], (i == (num - 1)));
@@ -200,7 +267,9 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
addr <<= 1;
read_flag = read_write == I2C_SMBUS_READ;
- pasemi_smb_clear(smbus);
+ err = pasemi_smb_clear(smbus);
+ if (err)
+ return err;
switch (size) {
case I2C_SMBUS_QUICK:
diff --git a/drivers/i2c/busses/i2c-pasemi-pci.c b/drivers/i2c/busses/i2c-pasemi-pci.c
index 77f90c7436ed..b9ccb54ec77e 100644
--- a/drivers/i2c/busses/i2c-pasemi-pci.c
+++ b/drivers/i2c/busses/i2c-pasemi-pci.c
@@ -5,15 +5,15 @@
* SMBus host driver for PA Semi PWRficient
*/
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
#include <linux/sched.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/io.h>
+#include <linux/stddef.h>
#include "i2c-pasemi-core.h"
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index dd75916157f0..9d3a4dc2bd60 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -34,6 +34,7 @@
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/io.h>
+#include <asm/amd/fch.h>
#include "i2c-piix4.h"
@@ -80,12 +81,11 @@
#define SB800_PIIX4_PORT_IDX_MASK 0x06
#define SB800_PIIX4_PORT_IDX_SHIFT 1
-/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
-#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
-#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
+/* SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ (FCH_PM_DECODEEN + 0x02)
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ (FCH_PM_DECODEEN_SMBUS0SEL >> 16)
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
-#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300
#define SB800_PIIX4_FCH_PM_SIZE 8
#define SB800_ASF_ACPI_PATH "\\_SB.ASFC"
@@ -162,19 +162,19 @@ int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_c
if (mmio_cfg->use_mmio) {
void __iomem *addr;
- if (!request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
+ if (!request_mem_region_muxed(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE,
"sb800_piix4_smb")) {
dev_err(dev,
"SMBus base address memory region 0x%x already in use.\n",
- SB800_PIIX4_FCH_PM_ADDR);
+ FCH_PM_BASE);
return -EBUSY;
}
- addr = ioremap(SB800_PIIX4_FCH_PM_ADDR,
+ addr = ioremap(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE);
if (!addr) {
- release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+ release_mem_region(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE);
dev_err(dev, "SMBus base address mapping failed.\n");
return -ENOMEM;
@@ -201,7 +201,7 @@ void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_
{
if (mmio_cfg->use_mmio) {
iounmap(mmio_cfg->addr);
- release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+ release_mem_region(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE);
return;
}
@@ -971,7 +971,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
* This would allow the ee1004 to be probed incorrectly.
*/
if (port == 0)
- i2c_register_spd(adap);
+ i2c_register_spd_write_enable(adap);
*padap = adap;
return 0;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 9a867c817db0..f99a2cc721a8 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -349,7 +349,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap,
/* Fill out the rest of the info structure */
info.addr = addr;
info.irq = irq_of_parse_and_map(node, 0);
- info.of_node = of_node_get(node);
+ info.fwnode = of_fwnode_handle(of_node_get(node));
newdev = i2c_new_client_device(adap, &info);
if (IS_ERR(newdev)) {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 515a784c951c..ccea575fb783 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -71,7 +71,6 @@ enum geni_i2c_err_code {
<< 5)
#define I2C_AUTO_SUSPEND_DELAY 250
-#define KHZ(freq) (1000 * freq)
#define PACKING_BYTES_PW 4
#define ABORT_TIMEOUT HZ
@@ -148,18 +147,18 @@ struct geni_i2c_clk_fld {
* source_clock = 19.2 MHz
*/
static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = {
- {KHZ(100), 7, 10, 12, 26},
- {KHZ(400), 2, 5, 11, 22},
- {KHZ(1000), 1, 2, 8, 18},
- {},
+ { I2C_MAX_STANDARD_MODE_FREQ, 7, 10, 12, 26 },
+ { I2C_MAX_FAST_MODE_FREQ, 2, 5, 11, 22 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 1, 2, 8, 18 },
+ {}
};
/* source_clock = 32 MHz */
static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = {
- {KHZ(100), 8, 14, 18, 40},
- {KHZ(400), 4, 3, 11, 20},
- {KHZ(1000), 2, 3, 6, 15},
- {},
+ { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 40 },
+ { I2C_MAX_FAST_MODE_FREQ, 4, 3, 11, 20 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 6, 15 },
+ {}
};
static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
@@ -812,7 +811,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
&gi2c->clk_freq_out);
if (ret) {
dev_info(dev, "Bus frequency not specified, default to 100kHz.\n");
- gi2c->clk_freq_out = KHZ(100);
+ gi2c->clk_freq_out = I2C_MAX_STANDARD_MODE_FREQ;
}
if (has_acpi_companion(dev))
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index d7dddd6c296a..23375f7fe3ad 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -52,6 +52,8 @@
#define ICCR1_ICE BIT(7)
#define ICCR1_IICRST BIT(6)
#define ICCR1_SOWP BIT(4)
+#define ICCR1_SCLO BIT(3)
+#define ICCR1_SDAO BIT(2)
#define ICCR1_SCLI BIT(1)
#define ICCR1_SDAI BIT(0)
@@ -151,11 +153,11 @@ static int riic_bus_barrier(struct riic_dev *riic)
ret = readb_poll_timeout(riic->base + riic->info->regs[RIIC_ICCR2], val,
!(val & ICCR2_BBSY), 10, riic->adapter.timeout);
if (ret)
- return ret;
+ return i2c_recover_bus(&riic->adapter);
if ((riic_readb(riic, RIIC_ICCR1) & (ICCR1_SDAI | ICCR1_SCLI)) !=
(ICCR1_SDAI | ICCR1_SCLI))
- return -EBUSY;
+ return i2c_recover_bus(&riic->adapter);
return 0;
}
@@ -439,6 +441,52 @@ static int riic_init_hw(struct riic_dev *riic)
return 0;
}
+static int riic_get_scl(struct i2c_adapter *adap)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SCLI);
+}
+
+static int riic_get_sda(struct i2c_adapter *adap)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SDAI);
+}
+
+static void riic_set_scl(struct i2c_adapter *adap, int val)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ if (val)
+ riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SCLO, RIIC_ICCR1);
+ else
+ riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SCLO, 0, RIIC_ICCR1);
+
+ riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1);
+}
+
+static void riic_set_sda(struct i2c_adapter *adap, int val)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ if (val)
+ riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SDAO, RIIC_ICCR1);
+ else
+ riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SDAO, 0, RIIC_ICCR1);
+
+ riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1);
+}
+
+static struct i2c_bus_recovery_info riic_bri = {
+ .recover_bus = i2c_generic_scl_recovery,
+ .get_scl = riic_get_scl,
+ .set_scl = riic_set_scl,
+ .get_sda = riic_get_sda,
+ .set_sda = riic_set_sda,
+};
+
static const struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
@@ -495,6 +543,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
adap->algo = &riic_algo;
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
+ adap->bus_recovery_info = &riic_bri;
init_completion(&riic->msg_done);
diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c
index 53762cc56d28..b0e9c0b62429 100644
--- a/drivers/i2c/busses/i2c-rzv2m.c
+++ b/drivers/i2c/busses/i2c-rzv2m.c
@@ -402,7 +402,7 @@ static const struct i2c_adapter_quirks rzv2m_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
-static struct i2c_algorithm rzv2m_i2c_algo = {
+static const struct i2c_algorithm rzv2m_i2c_algo = {
.xfer = rzv2m_i2c_xfer,
.functionality = rzv2m_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index efe29621b8d7..adfcee6c9fdc 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/* Transmit operation: */
/* */
@@ -409,7 +410,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
pd->sr |= sr; /* remember state */
dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr,
- (pd->msg->flags & I2C_M_RD) ? "read" : "write",
+ str_read_write(pd->msg->flags & I2C_M_RD),
pd->pos, pd->msg->len);
/* Kick off TxDMA after preface was done */
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 87976e99e6d0..049b4d154c23 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1395,6 +1395,11 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE);
if (ret)
break;
+
+ /* Validate message length before proceeding */
+ if (msgs[i].buf[0] == 0 || msgs[i].buf[0] > I2C_SMBUS_BLOCK_MAX)
+ break;
+
/* Set the msg length from first byte */
msgs[i].len += msgs[i].buf[0];
dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len);
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 143d012fa43e..3959f23fc440 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -168,6 +168,9 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->roff.twsi_int = 0x1010;
i2c->roff.sw_twsi_ext = 0x1018;
i2c->roff.mode = 0x1038;
+ i2c->roff.block_ctl = 0x1048;
+ i2c->roff.block_sts = 0x1050;
+ i2c->roff.block_fifo = 0x1058;
i2c->dev = dev;
pci_set_drvdata(pdev, i2c);
@@ -175,7 +178,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_request_all_regions(pdev, DRV_NAME);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0f2ed181b266..a18eab0992a1 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
/* include interfaces to usb layer */
@@ -71,7 +72,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
dev_dbg(&adapter->dev,
" %d: %s (flags %d) %d bytes to 0x%02x\n",
- i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ i, str_read_write(pmsg->flags & I2C_M_RD),
pmsg->flags, pmsg->len, pmsg->addr);
/* and directly send the message */
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index d877f5a1f579..ca0358e8f928 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -532,22 +532,16 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
- return -EINVAL;
- }
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ)
+ return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed);
priv->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to enable clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n");
clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ if (!clk_rate)
+ return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n");
priv->clk_cycle = clk_rate / bus_speed;
init_completion(&priv->comp);
@@ -565,10 +559,8 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0,
pdev->name, priv);
- if (ret) {
- dev_err(dev, "failed to request irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %d\n", irq);
return i2c_add_adapter(&priv->adap);
}
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index b95d50d4d7db..9d49a3d5d612 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -327,22 +327,16 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
- return -EINVAL;
- }
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ)
+ return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed);
priv->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to enable clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n");
clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ if (!clk_rate)
+ return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n");
priv->clk_cycle = clk_rate / bus_speed;
init_completion(&priv->comp);
@@ -359,10 +353,8 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, uniphier_i2c_interrupt, 0, pdev->name,
priv);
- if (ret) {
- dev_err(dev, "failed to request irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %d\n", irq);
return i2c_add_adapter(&priv->adap);
}
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 7ed29992a97f..2c26a57883f2 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,10 +89,9 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
u8 rev;
int res;
- if (pm_io_base) {
- dev_err(&dev->dev, "i2c-via: Will only support one host\n");
- return -ENODEV;
- }
+ if (pm_io_base)
+ return dev_err_probe(&dev->dev, -ENODEV,
+ "Will only support one host\n");
pci_read_config_byte(dev, PM_CFG_REVID, &rev);
@@ -113,10 +112,10 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_read_config_word(dev, base, &pm_io_base);
pm_io_base &= (0xff << 8);
- if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name)) {
- dev_err(&dev->dev, "IO 0x%x-0x%x already in use\n", I2C_DIR, I2C_DIR + IOSPACE);
- return -ENODEV;
- }
+ if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name))
+ return dev_err_probe(&dev->dev, -ENODEV,
+ "IO 0x%x-0x%x already in use\n",
+ I2C_DIR, I2C_DIR + IOSPACE);
outb(inb(I2C_DIR) & ~(I2C_SDA | I2C_SCL), I2C_DIR);
outb(inb(I2C_OUT) & ~(I2C_SDA | I2C_SCL), I2C_OUT);
diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c
index 4eb740faf268..2cf3cc0165fb 100644
--- a/drivers/i2c/busses/i2c-viai2c-wmt.c
+++ b/drivers/i2c/busses/i2c-viai2c-wmt.c
@@ -44,16 +44,13 @@ static int wmt_i2c_reset_hardware(struct viai2c *i2c)
int err;
err = clk_prepare_enable(i2c->clk);
- if (err) {
- dev_err(i2c->dev, "failed to enable clock\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(i2c->dev, err, "failed to enable clock\n");
err = clk_set_rate(i2c->clk, 20000000);
if (err) {
- dev_err(i2c->dev, "failed to set clock = 20Mhz\n");
clk_disable_unprepare(i2c->clk);
- return err;
+ return dev_err_probe(i2c->dev, err, "failed to set clock = 20Mhz\n");
}
writew(0, i2c->base + VIAI2C_REG_CR);
@@ -121,10 +118,9 @@ static int wmt_i2c_probe(struct platform_device *pdev)
"failed to request irq %i\n", i2c->irq);
i2c->clk = of_clk_get(np, 0);
- if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "unable to request clock\n");
- return PTR_ERR(i2c->clk);
- }
+ if (IS_ERR(i2c->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
+ "unable to request clock\n");
err = of_property_read_u32(np, "clock-frequency", &clk_rate);
if (!err && clk_rate == I2C_MAX_FAST_MODE_FREQ)
@@ -139,10 +135,8 @@ static int wmt_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
err = wmt_i2c_reset_hardware(i2c);
- if (err) {
- dev_err(&pdev->dev, "error initializing hardware\n");
+ if (err)
return err;
- }
err = i2c_add_adapter(adap);
if (err)
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 2cc7bba3b8bf..c58843609107 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -330,30 +330,27 @@ static int vt596_probe(struct pci_dev *pdev,
SMBHSTCFG = 0x84;
} else {
/* no matches at all */
- dev_err(&pdev->dev, "Cannot configure "
- "SMBus I/O Base address\n");
- return -ENODEV;
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Cannot configure "
+ "SMBus I/O Base address\n");
}
}
vt596_smba &= 0xfff0;
- if (vt596_smba == 0) {
- dev_err(&pdev->dev, "SMBus base address "
- "uninitialized - upgrade BIOS or use "
- "force_addr=0xaddr\n");
- return -ENODEV;
- }
+ if (vt596_smba == 0)
+ return dev_err_probe(&pdev->dev, -ENODEV, "SMBus base address "
+ "uninitialized - upgrade BIOS or use "
+ "force_addr=0xaddr\n");
found:
error = acpi_check_region(vt596_smba, 8, vt596_driver.name);
if (error)
return -ENODEV;
- if (!request_region(vt596_smba, 8, vt596_driver.name)) {
- dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n",
- vt596_smba);
- return -ENODEV;
- }
+ if (!request_region(vt596_smba, 8, vt596_driver.name))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "SMBus region 0x%x already in use!\n",
+ vt596_smba);
pci_read_config_byte(pdev, SMBHSTCFG, &temp);
/* If force_addr is set, we program the new address here. Just to make
@@ -375,10 +372,10 @@ found:
pci_write_config_byte(pdev, SMBHSTCFG, temp | 0x01);
dev_info(&pdev->dev, "Enabling SMBus device\n");
} else {
- dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
- "controller not enabled! - upgrade BIOS or "
- "use force=1\n");
- error = -ENODEV;
+ error = dev_err_probe(&pdev->dev, -ENODEV,
+ "SMBUS: Error: Host SMBus "
+ "controller not enabled! - "
+ "upgrade BIOS or use force=1\n");
goto release_region;
}
}
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 503e2f4d6f84..1bd602852e35 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -278,7 +279,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
dev_dbg(&i2c->dev,
" %d: %s (flags %d) %d bytes to 0x%02x\n",
- i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ i, str_read_write(pmsg->flags & I2C_M_RD),
pmsg->flags, pmsg->len, pmsg->addr);
mutex_lock(&vb->lock);
@@ -384,15 +385,13 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
VPRBRD_USB_TIMEOUT_MS);
- if (ret != 1) {
- dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n",
- i2c_bus_freq);
- return -EIO;
- }
+ if (ret != 1)
+ return dev_err_probe(&pdev->dev, -EIO,
+ "failure setting i2c_bus_freq to %d\n",
+ i2c_bus_freq);
} else {
- dev_err(&pdev->dev,
- "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
- return -EIO;
+ return dev_err_probe(&pdev->dev, -EIO,
+ "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
}
vb_i2c->i2c.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 2a351f961b89..9b05ff53d3d7 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -192,10 +192,9 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
struct virtio_i2c *vi;
int ret;
- if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) {
- dev_err(&vdev->dev, "Zero-length request feature is mandatory\n");
- return -EINVAL;
- }
+ if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST))
+ return dev_err_probe(&vdev->dev, -EINVAL,
+ "Zero-length request feature is mandatory\n");
vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL);
if (!vi)
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 663fe5604dd6..b29dec66b2c3 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -101,8 +101,6 @@ struct slimpro_i2c_dev {
struct completion rd_complete;
u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
- phys_addr_t comm_base_addr;
- void *pcc_comm_addr;
};
#define to_slimpro_i2c_dev(cl) \
@@ -148,7 +146,8 @@ static void slimpro_i2c_rx_cb(struct mbox_client *cl, void *mssg)
static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
{
struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl);
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
/* Check if platform sends interrupt */
if (!xgene_word_tst_and_clr(&generic_comm_base->status,
@@ -169,7 +168,8 @@ static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg)
{
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
u32 *ptr = (void *)(generic_comm_base + 1);
u16 status;
int i;
@@ -457,22 +457,18 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = true;
cl->rx_callback = slimpro_i2c_rx_cb;
ctx->mbox_chan = mbox_request_channel(cl, MAILBOX_I2C_INDEX);
- if (IS_ERR(ctx->mbox_chan)) {
- dev_err(&pdev->dev, "i2c mailbox channel request failed\n");
- return PTR_ERR(ctx->mbox_chan);
- }
+ if (IS_ERR(ctx->mbox_chan))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ctx->mbox_chan),
+ "i2c mailbox channel request failed\n");
} else {
struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
- int version = XGENE_SLIMPRO_I2C_V1;
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
if (!acpi_id)
return -EINVAL;
- version = (int)acpi_id->driver_data;
-
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx))
ctx->mbox_idx = MAILBOX_I2C_INDEX;
@@ -480,48 +476,19 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = false;
cl->rx_callback = slimpro_i2c_pcc_rx_cb;
pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(pcc_chan)) {
- dev_err(&pdev->dev, "PCC mailbox channel request failed\n");
- return PTR_ERR(pcc_chan);
- }
+ if (IS_ERR(pcc_chan))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pcc_chan),
+ "PCC mailbox channel request failed\n");
ctx->pcc_chan = pcc_chan;
ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
- dev_err(&pdev->dev, "PCC IRQ not supported\n");
- rc = -ENOENT;
+ rc = dev_err_probe(&pdev->dev, -ENOENT,
+ "PCC IRQ not supported\n");
goto mbox_err;
}
- /*
- * This is the shared communication region
- * for the OS and Platform to communicate over.
- */
- ctx->comm_base_addr = pcc_chan->shmem_base_addr;
- if (ctx->comm_base_addr) {
- if (version == XGENE_SLIMPRO_I2C_V2)
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WT);
- else
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
- } else {
- dev_err(&pdev->dev, "Failed to get PCC comm region\n");
- rc = -ENOENT;
- goto mbox_err;
- }
-
- if (!ctx->pcc_comm_addr) {
- dev_err(&pdev->dev,
- "Failed to ioremap PCC comm region\n");
- rc = -ENOMEM;
- goto mbox_err;
- }
}
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index dc1e46d834dc..6bc1575cea6c 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -1489,7 +1489,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
pdev->name, i2c);
if (ret < 0) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ dev_err_probe(&pdev->dev, ret, "Cannot claim IRQ\n");
goto err_pm_disable;
}
@@ -1510,7 +1510,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
ret = xiic_reinit(i2c);
if (ret < 0) {
- dev_err(&pdev->dev, "Cannot xiic_reinit\n");
+ dev_err_probe(&pdev->dev, ret, "Cannot xiic_reinit\n");
goto err_pm_disable;
}
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 4d6abd7e92ce..06cf221557f2 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -500,10 +500,8 @@ static int scx200_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't fetch device resource info\n");
- return -ENODEV;
- }
+ if (!res)
+ return dev_err_probe(&pdev->dev, -ENODEV, "can't fetch device resource info\n");
iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev);
if (!iface)
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index 783fb8df2ebe..be7d6d41e0b2 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -16,32 +16,65 @@
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/lockdep.h>
#define ATR_MAX_ADAPTERS 100 /* Just a sanity limit */
#define ATR_MAX_SYMLINK_LEN 11 /* Longest name is 10 chars: "channel-99" */
/**
- * struct i2c_atr_alias_pair - Holds the alias assigned to a client.
+ * struct i2c_atr_alias_pair - Holds the alias assigned to a client address.
* @node: List node
- * @client: Pointer to the client on the child bus
+ * @addr: Address of the client on the child bus.
* @alias: I2C alias address assigned by the driver.
* This is the address that will be used to issue I2C transactions
* on the parent (physical) bus.
+ * @fixed: Alias pair cannot be replaced during dynamic address attachment.
+ * This flag is necessary for situations where a single I2C transaction
+ * contains more distinct target addresses than the ATR channel can handle.
+ * It marks addresses that have already been attached to an alias so
+ * that their alias pair is not evicted by a subsequent address in the same
+ * transaction.
+ *
*/
struct i2c_atr_alias_pair {
struct list_head node;
- const struct i2c_client *client;
+ bool fixed;
+ u16 addr;
u16 alias;
};
/**
+ * struct i2c_atr_alias_pool - Pool of client aliases available for an ATR.
+ * @size: Total number of aliases
+ * @shared: Indicates if this alias pool is shared by multiple channels
+ *
+ * @lock: Lock protecting @aliases and @use_mask
+ * @aliases: Array of aliases, must hold exactly @size elements
+ * @use_mask: Mask of used aliases
+ */
+struct i2c_atr_alias_pool {
+ size_t size;
+ bool shared;
+
+ /* Protects aliases and use_mask */
+ spinlock_t lock;
+ u16 *aliases;
+ unsigned long *use_mask;
+};
+
+/**
* struct i2c_atr_chan - Data for a channel.
* @adap: The &struct i2c_adapter for the channel
* @atr: The parent I2C ATR
* @chan_id: The ID of this channel
- * @alias_list: List of @struct i2c_atr_alias_pair containing the
+ * @alias_pairs_lock: Mutex protecting @alias_pairs
+ * @alias_pairs_lock_key: Lock key for @alias_pairs_lock
+ * @alias_pairs: List of @struct i2c_atr_alias_pair containing the
* assigned aliases
+ * @alias_pool: Pool of available client aliases
+ *
* @orig_addrs_lock: Mutex protecting @orig_addrs
+ * @orig_addrs_lock_key: Lock key for @orig_addrs_lock
* @orig_addrs: Buffer used to store the original addresses during transmit
* @orig_addrs_size: Size of @orig_addrs
*/
@@ -50,10 +83,15 @@ struct i2c_atr_chan {
struct i2c_atr *atr;
u32 chan_id;
- struct list_head alias_list;
+ /* Lock alias_pairs during attach/detach */
+ struct mutex alias_pairs_lock;
+ struct lock_class_key alias_pairs_lock_key;
+ struct list_head alias_pairs;
+ struct i2c_atr_alias_pool *alias_pool;
/* Lock orig_addrs during xfer */
struct mutex orig_addrs_lock;
+ struct lock_class_key orig_addrs_lock_key;
u16 *orig_addrs;
unsigned int orig_addrs_size;
};
@@ -66,11 +104,10 @@ struct i2c_atr_chan {
* @priv: Private driver data, set with i2c_atr_set_driver_data()
* @algo: The &struct i2c_algorithm for adapters
* @lock: Lock for the I2C bus segment (see &struct i2c_lock_operations)
+ * @lock_key: Lock key for @lock
* @max_adapters: Maximum number of adapters this I2C ATR can have
- * @num_aliases: Number of aliases in the aliases array
- * @aliases: The aliases array
- * @alias_mask_lock: Lock protecting alias_use_mask
- * @alias_use_mask: Bitmask for used aliases in aliases array
+ * @flags: Flags for ATR
+ * @alias_pool: Optional common pool of available client aliases
* @i2c_nb: Notifier for remote client add & del events
* @adapter: Array of adapters
*/
@@ -84,27 +121,135 @@ struct i2c_atr {
struct i2c_algorithm algo;
/* lock for the I2C bus segment (see struct i2c_lock_operations) */
struct mutex lock;
+ struct lock_class_key lock_key;
int max_adapters;
+ u32 flags;
- size_t num_aliases;
- const u16 *aliases;
- /* Protects alias_use_mask */
- spinlock_t alias_mask_lock;
- unsigned long *alias_use_mask;
+ struct i2c_atr_alias_pool *alias_pool;
struct notifier_block i2c_nb;
struct i2c_adapter *adapter[] __counted_by(max_adapters);
};
+static struct i2c_atr_alias_pool *i2c_atr_alloc_alias_pool(size_t num_aliases, bool shared)
+{
+ struct i2c_atr_alias_pool *alias_pool;
+ int ret;
+
+ alias_pool = kzalloc(sizeof(*alias_pool), GFP_KERNEL);
+ if (!alias_pool)
+ return ERR_PTR(-ENOMEM);
+
+ alias_pool->size = num_aliases;
+
+ alias_pool->aliases = kcalloc(num_aliases, sizeof(*alias_pool->aliases), GFP_KERNEL);
+ if (!alias_pool->aliases) {
+ ret = -ENOMEM;
+ goto err_free_alias_pool;
+ }
+
+ alias_pool->use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL);
+ if (!alias_pool->use_mask) {
+ ret = -ENOMEM;
+ goto err_free_aliases;
+ }
+
+ alias_pool->shared = shared;
+
+ spin_lock_init(&alias_pool->lock);
+
+ return alias_pool;
+
+err_free_aliases:
+ kfree(alias_pool->aliases);
+err_free_alias_pool:
+ kfree(alias_pool);
+ return ERR_PTR(ret);
+}
+
+static void i2c_atr_free_alias_pool(struct i2c_atr_alias_pool *alias_pool)
+{
+ bitmap_free(alias_pool->use_mask);
+ kfree(alias_pool->aliases);
+ kfree(alias_pool);
+}
+
+/* Must be called with alias_pairs_lock held */
+static struct i2c_atr_alias_pair *i2c_atr_create_c2a(struct i2c_atr_chan *chan,
+ u16 alias, u16 addr)
+{
+ struct i2c_atr_alias_pair *c2a;
+
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ c2a = kzalloc(sizeof(*c2a), GFP_KERNEL);
+ if (!c2a)
+ return NULL;
+
+ c2a->addr = addr;
+ c2a->alias = alias;
+
+ list_add(&c2a->node, &chan->alias_pairs);
+
+ return c2a;
+}
+
+/* Must be called with alias_pairs_lock held */
+static void i2c_atr_destroy_c2a(struct i2c_atr_alias_pair **pc2a)
+{
+ list_del(&(*pc2a)->node);
+ kfree(*pc2a);
+ *pc2a = NULL;
+}
+
+static int i2c_atr_reserve_alias(struct i2c_atr_alias_pool *alias_pool)
+{
+ unsigned long idx;
+ u16 alias;
+
+ spin_lock(&alias_pool->lock);
+
+ idx = find_first_zero_bit(alias_pool->use_mask, alias_pool->size);
+ if (idx >= alias_pool->size) {
+ spin_unlock(&alias_pool->lock);
+ return -EBUSY;
+ }
+
+ set_bit(idx, alias_pool->use_mask);
+
+ alias = alias_pool->aliases[idx];
+
+ spin_unlock(&alias_pool->lock);
+ return alias;
+}
+
+static void i2c_atr_release_alias(struct i2c_atr_alias_pool *alias_pool, u16 alias)
+{
+ unsigned int idx;
+
+ spin_lock(&alias_pool->lock);
+
+ for (idx = 0; idx < alias_pool->size; ++idx) {
+ if (alias_pool->aliases[idx] == alias) {
+ clear_bit(idx, alias_pool->use_mask);
+ spin_unlock(&alias_pool->lock);
+ return;
+ }
+ }
+
+ spin_unlock(&alias_pool->lock);
+}
+
static struct i2c_atr_alias_pair *
-i2c_atr_find_mapping_by_client(const struct list_head *list,
- const struct i2c_client *client)
+i2c_atr_find_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
{
struct i2c_atr_alias_pair *c2a;
- list_for_each_entry(c2a, list, node) {
- if (c2a->client == client)
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ list_for_each_entry(c2a, &chan->alias_pairs, node) {
+ if (c2a->addr == addr)
return c2a;
}
@@ -112,18 +257,107 @@ i2c_atr_find_mapping_by_client(const struct list_head *list,
}
static struct i2c_atr_alias_pair *
-i2c_atr_find_mapping_by_addr(const struct list_head *list, u16 phys_addr)
+i2c_atr_create_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
{
+ struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
+ u16 alias;
+ int ret;
- list_for_each_entry(c2a, list, node) {
- if (c2a->client->addr == phys_addr)
- return c2a;
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ ret = i2c_atr_reserve_alias(chan->alias_pool);
+ if (ret < 0)
+ return NULL;
+
+ alias = ret;
+
+ c2a = i2c_atr_create_c2a(chan, alias, addr);
+ if (!c2a)
+ goto err_release_alias;
+
+ ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias);
+ if (ret) {
+ dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n",
+ addr, chan->chan_id, ret);
+ goto err_del_c2a;
}
+ return c2a;
+
+err_del_c2a:
+ i2c_atr_destroy_c2a(&c2a);
+err_release_alias:
+ i2c_atr_release_alias(chan->alias_pool, alias);
return NULL;
}
+static struct i2c_atr_alias_pair *
+i2c_atr_replace_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
+{
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+ struct list_head *alias_pairs;
+ bool found = false;
+ u16 alias;
+ int ret;
+
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ alias_pairs = &chan->alias_pairs;
+
+ if (unlikely(list_empty(alias_pairs)))
+ return NULL;
+
+ list_for_each_entry_reverse(c2a, alias_pairs, node) {
+ if (!c2a->fixed) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return NULL;
+
+ atr->ops->detach_addr(atr, chan->chan_id, c2a->addr);
+ c2a->addr = addr;
+
+ list_move(&c2a->node, alias_pairs);
+
+ alias = c2a->alias;
+
+ ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias);
+ if (ret) {
+ dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n",
+ addr, chan->chan_id, ret);
+ i2c_atr_destroy_c2a(&c2a);
+ i2c_atr_release_alias(chan->alias_pool, alias);
+ return NULL;
+ }
+
+ return c2a;
+}
+
+static struct i2c_atr_alias_pair *
+i2c_atr_get_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
+{
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+
+ c2a = i2c_atr_find_mapping_by_addr(chan, addr);
+ if (c2a)
+ return c2a;
+
+ if (atr->flags & I2C_ATR_F_STATIC)
+ return NULL;
+
+ c2a = i2c_atr_create_mapping_by_addr(chan, addr);
+ if (c2a)
+ return c2a;
+
+ return i2c_atr_replace_mapping_by_addr(chan, addr);
+}
+
/*
* Replace all message addresses with their aliases, saving the original
* addresses.
@@ -136,7 +370,7 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
{
struct i2c_atr *atr = chan->atr;
static struct i2c_atr_alias_pair *c2a;
- int i;
+ int i, ret = 0;
/* Ensure we have enough room to save the original addresses */
if (unlikely(chan->orig_addrs_size < num)) {
@@ -152,25 +386,36 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
chan->orig_addrs_size = num;
}
+ mutex_lock(&chan->alias_pairs_lock);
+
for (i = 0; i < num; i++) {
chan->orig_addrs[i] = msgs[i].addr;
- c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list,
- msgs[i].addr);
+ c2a = i2c_atr_get_mapping_by_addr(chan, msgs[i].addr);
+
if (!c2a) {
+ if (atr->flags & I2C_ATR_F_PASSTHROUGH)
+ continue;
+
dev_err(atr->dev, "client 0x%02x not mapped!\n",
msgs[i].addr);
while (i--)
msgs[i].addr = chan->orig_addrs[i];
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_unlock;
}
+ // Prevent c2a from being overwritten by another client in this transaction
+ c2a->fixed = true;
+
msgs[i].addr = c2a->alias;
}
- return 0;
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
+ return ret;
}
/*
@@ -183,10 +428,24 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
static void i2c_atr_unmap_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
int num)
{
+ struct i2c_atr_alias_pair *c2a;
int i;
for (i = 0; i < num; i++)
msgs[i].addr = chan->orig_addrs[i];
+
+ mutex_lock(&chan->alias_pairs_lock);
+
+ if (unlikely(list_empty(&chan->alias_pairs)))
+ goto out_unlock;
+
+ // unfix c2a entries so that subsequent transfers can reuse their aliases
+ list_for_each_entry(c2a, &chan->alias_pairs, node) {
+ c2a->fixed = false;
+ }
+
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
}
static int i2c_atr_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
@@ -224,14 +483,23 @@ static int i2c_atr_smbus_xfer(struct i2c_adapter *adap, u16 addr,
struct i2c_atr *atr = chan->atr;
struct i2c_adapter *parent = atr->parent;
struct i2c_atr_alias_pair *c2a;
+ u16 alias;
- c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, addr);
- if (!c2a) {
+ mutex_lock(&chan->alias_pairs_lock);
+
+ c2a = i2c_atr_get_mapping_by_addr(chan, addr);
+
+ if (!c2a && !(atr->flags & I2C_ATR_F_PASSTHROUGH)) {
dev_err(atr->dev, "client 0x%02x not mapped!\n", addr);
+ mutex_unlock(&chan->alias_pairs_lock);
return -ENXIO;
}
- return i2c_smbus_xfer(parent, c2a->alias, flags, read_write, command,
+ alias = c2a ? c2a->alias : addr;
+
+ mutex_unlock(&chan->alias_pairs_lock);
+
+ return i2c_smbus_xfer(parent, alias, flags, read_write, command,
size, data);
}
@@ -273,112 +541,60 @@ static const struct i2c_lock_operations i2c_atr_lock_ops = {
.unlock_bus = i2c_atr_unlock_bus,
};
-static int i2c_atr_reserve_alias(struct i2c_atr *atr)
-{
- unsigned long idx;
-
- spin_lock(&atr->alias_mask_lock);
-
- idx = find_first_zero_bit(atr->alias_use_mask, atr->num_aliases);
- if (idx >= atr->num_aliases) {
- spin_unlock(&atr->alias_mask_lock);
- dev_err(atr->dev, "failed to find a free alias\n");
- return -EBUSY;
- }
-
- set_bit(idx, atr->alias_use_mask);
-
- spin_unlock(&atr->alias_mask_lock);
-
- return atr->aliases[idx];
-}
-
-static void i2c_atr_release_alias(struct i2c_atr *atr, u16 alias)
-{
- unsigned int idx;
-
- spin_lock(&atr->alias_mask_lock);
-
- for (idx = 0; idx < atr->num_aliases; ++idx) {
- if (atr->aliases[idx] == alias) {
- clear_bit(idx, atr->alias_use_mask);
- spin_unlock(&atr->alias_mask_lock);
- return;
- }
- }
-
- spin_unlock(&atr->alias_mask_lock);
-
- /* This should never happen */
- dev_warn(atr->dev, "Unable to find mapped alias\n");
-}
-
-static int i2c_atr_attach_client(struct i2c_adapter *adapter,
- const struct i2c_client *client)
+static int i2c_atr_attach_addr(struct i2c_adapter *adapter,
+ u16 addr)
{
struct i2c_atr_chan *chan = adapter->algo_data;
struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
- u16 alias;
- int ret;
+ int ret = 0;
- ret = i2c_atr_reserve_alias(atr);
- if (ret < 0)
- return ret;
+ mutex_lock(&chan->alias_pairs_lock);
- alias = ret;
+ c2a = i2c_atr_create_mapping_by_addr(chan, addr);
+ if (!c2a && !(atr->flags & I2C_ATR_F_STATIC))
+ c2a = i2c_atr_replace_mapping_by_addr(chan, addr);
- c2a = kzalloc(sizeof(*c2a), GFP_KERNEL);
if (!c2a) {
- ret = -ENOMEM;
- goto err_release_alias;
+ dev_err(atr->dev, "failed to find a free alias\n");
+ ret = -EBUSY;
+ goto out_unlock;
}
- ret = atr->ops->attach_client(atr, chan->chan_id, client, alias);
- if (ret)
- goto err_free;
-
- dev_dbg(atr->dev, "chan%u: client 0x%02x mapped at alias 0x%02x (%s)\n",
- chan->chan_id, client->addr, alias, client->name);
-
- c2a->client = client;
- c2a->alias = alias;
- list_add(&c2a->node, &chan->alias_list);
-
- return 0;
-
-err_free:
- kfree(c2a);
-err_release_alias:
- i2c_atr_release_alias(atr, alias);
+ dev_dbg(atr->dev, "chan%u: using alias 0x%02x for addr 0x%02x\n",
+ chan->chan_id, c2a->alias, addr);
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
return ret;
}
-static void i2c_atr_detach_client(struct i2c_adapter *adapter,
- const struct i2c_client *client)
+static void i2c_atr_detach_addr(struct i2c_adapter *adapter,
+ u16 addr)
{
struct i2c_atr_chan *chan = adapter->algo_data;
struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
- atr->ops->detach_client(atr, chan->chan_id, client);
+ atr->ops->detach_addr(atr, chan->chan_id, addr);
+
+ mutex_lock(&chan->alias_pairs_lock);
- c2a = i2c_atr_find_mapping_by_client(&chan->alias_list, client);
+ c2a = i2c_atr_find_mapping_by_addr(chan, addr);
if (!c2a) {
- /* This should never happen */
- dev_warn(atr->dev, "Unable to find address mapping\n");
+ mutex_unlock(&chan->alias_pairs_lock);
return;
}
- i2c_atr_release_alias(atr, c2a->alias);
+ i2c_atr_release_alias(chan->alias_pool, c2a->alias);
dev_dbg(atr->dev,
- "chan%u: client 0x%02x unmapped from alias 0x%02x (%s)\n",
- chan->chan_id, client->addr, c2a->alias, client->name);
+ "chan%u: detached alias 0x%02x from addr 0x%02x\n",
+ chan->chan_id, c2a->alias, addr);
- list_del(&c2a->node);
- kfree(c2a);
+ i2c_atr_destroy_c2a(&c2a);
+
+ mutex_unlock(&chan->alias_pairs_lock);
}
static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
@@ -405,7 +621,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
switch (event) {
case BUS_NOTIFY_ADD_DEVICE:
- ret = i2c_atr_attach_client(client->adapter, client);
+ ret = i2c_atr_attach_addr(client->adapter, client->addr);
if (ret)
dev_err(atr->dev,
"Failed to attach remote client '%s': %d\n",
@@ -413,7 +629,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
break;
case BUS_NOTIFY_REMOVED_DEVICE:
- i2c_atr_detach_client(client->adapter, client);
+ i2c_atr_detach_addr(client->adapter, client->addr);
break;
default:
@@ -425,29 +641,43 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
static int i2c_atr_parse_alias_pool(struct i2c_atr *atr)
{
+ struct i2c_atr_alias_pool *alias_pool;
struct device *dev = atr->dev;
- unsigned long *alias_use_mask;
size_t num_aliases;
unsigned int i;
u32 *aliases32;
- u16 *aliases16;
int ret;
- ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool");
- if (ret < 0) {
- dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n",
- ret);
+ if (!fwnode_property_present(dev_fwnode(dev), "i2c-alias-pool")) {
+ num_aliases = 0;
+ } else {
+ ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool");
+ if (ret < 0) {
+ dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n",
+ ret);
+ return ret;
+ }
+
+ num_aliases = ret;
+ }
+
+ alias_pool = i2c_atr_alloc_alias_pool(num_aliases, true);
+ if (IS_ERR(alias_pool)) {
+ ret = PTR_ERR(alias_pool);
+ dev_err(dev, "Failed to allocate alias pool, err %d\n", ret);
return ret;
}
- num_aliases = ret;
+ atr->alias_pool = alias_pool;
- if (!num_aliases)
+ if (!alias_pool->size)
return 0;
aliases32 = kcalloc(num_aliases, sizeof(*aliases32), GFP_KERNEL);
- if (!aliases32)
- return -ENOMEM;
+ if (!aliases32) {
+ ret = -ENOMEM;
+ goto err_free_alias_pool;
+ }
ret = fwnode_property_read_u32_array(dev_fwnode(dev), "i2c-alias-pool",
aliases32, num_aliases);
@@ -457,48 +687,33 @@ static int i2c_atr_parse_alias_pool(struct i2c_atr *atr)
goto err_free_aliases32;
}
- aliases16 = kcalloc(num_aliases, sizeof(*aliases16), GFP_KERNEL);
- if (!aliases16) {
- ret = -ENOMEM;
- goto err_free_aliases32;
- }
-
for (i = 0; i < num_aliases; i++) {
if (!(aliases32[i] & 0xffff0000)) {
- aliases16[i] = aliases32[i];
+ alias_pool->aliases[i] = aliases32[i];
continue;
}
dev_err(dev, "Failed to parse 'i2c-alias-pool' property: I2C flags are not supported\n");
ret = -EINVAL;
- goto err_free_aliases16;
- }
-
- alias_use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL);
- if (!alias_use_mask) {
- ret = -ENOMEM;
- goto err_free_aliases16;
+ goto err_free_aliases32;
}
kfree(aliases32);
- atr->num_aliases = num_aliases;
- atr->aliases = aliases16;
- atr->alias_use_mask = alias_use_mask;
-
- dev_dbg(dev, "i2c-alias-pool has %zu aliases", atr->num_aliases);
+ dev_dbg(dev, "i2c-alias-pool has %zu aliases\n", alias_pool->size);
return 0;
-err_free_aliases16:
- kfree(aliases16);
err_free_aliases32:
kfree(aliases32);
+err_free_alias_pool:
+ i2c_atr_free_alias_pool(alias_pool);
return ret;
}
struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
- const struct i2c_atr_ops *ops, int max_adapters)
+ const struct i2c_atr_ops *ops, int max_adapters,
+ u32 flags)
{
struct i2c_atr *atr;
int ret;
@@ -506,20 +721,21 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
if (max_adapters > ATR_MAX_ADAPTERS)
return ERR_PTR(-EINVAL);
- if (!ops || !ops->attach_client || !ops->detach_client)
+ if (!ops || !ops->attach_addr || !ops->detach_addr)
return ERR_PTR(-EINVAL);
atr = kzalloc(struct_size(atr, adapter, max_adapters), GFP_KERNEL);
if (!atr)
return ERR_PTR(-ENOMEM);
- mutex_init(&atr->lock);
- spin_lock_init(&atr->alias_mask_lock);
+ lockdep_register_key(&atr->lock_key);
+ mutex_init_with_key(&atr->lock, &atr->lock_key);
atr->parent = parent;
atr->dev = dev;
atr->ops = ops;
atr->max_adapters = max_adapters;
+ atr->flags = flags;
if (parent->algo->master_xfer)
atr->algo.master_xfer = i2c_atr_master_xfer;
@@ -534,15 +750,15 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
atr->i2c_nb.notifier_call = i2c_atr_bus_notifier_call;
ret = bus_register_notifier(&i2c_bus_type, &atr->i2c_nb);
if (ret)
- goto err_free_aliases;
+ goto err_free_alias_pool;
return atr;
-err_free_aliases:
- bitmap_free(atr->alias_use_mask);
- kfree(atr->aliases);
+err_free_alias_pool:
+ i2c_atr_free_alias_pool(atr->alias_pool);
err_destroy_mutex:
mutex_destroy(&atr->lock);
+ lockdep_unregister_key(&atr->lock_key);
kfree(atr);
return ERR_PTR(ret);
@@ -557,22 +773,22 @@ void i2c_atr_delete(struct i2c_atr *atr)
WARN_ON(atr->adapter[i]);
bus_unregister_notifier(&i2c_bus_type, &atr->i2c_nb);
- bitmap_free(atr->alias_use_mask);
- kfree(atr->aliases);
+ i2c_atr_free_alias_pool(atr->alias_pool);
mutex_destroy(&atr->lock);
+ lockdep_unregister_key(&atr->lock_key);
kfree(atr);
}
EXPORT_SYMBOL_NS_GPL(i2c_atr_delete, "I2C_ATR");
-int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
- struct device *adapter_parent,
- struct fwnode_handle *bus_handle)
+int i2c_atr_add_adapter(struct i2c_atr *atr, struct i2c_atr_adap_desc *desc)
{
+ struct fwnode_handle *bus_handle = desc->bus_handle;
struct i2c_adapter *parent = atr->parent;
+ char symlink_name[ATR_MAX_SYMLINK_LEN];
struct device *dev = atr->dev;
+ u32 chan_id = desc->chan_id;
struct i2c_atr_chan *chan;
- char symlink_name[ATR_MAX_SYMLINK_LEN];
- int ret;
+ int ret, idx;
if (chan_id >= atr->max_adapters) {
dev_err(dev, "No room for more i2c-atr adapters\n");
@@ -588,20 +804,23 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
if (!chan)
return -ENOMEM;
- if (!adapter_parent)
- adapter_parent = dev;
+ if (!desc->parent)
+ desc->parent = dev;
chan->atr = atr;
chan->chan_id = chan_id;
- INIT_LIST_HEAD(&chan->alias_list);
- mutex_init(&chan->orig_addrs_lock);
+ INIT_LIST_HEAD(&chan->alias_pairs);
+ lockdep_register_key(&chan->alias_pairs_lock_key);
+ lockdep_register_key(&chan->orig_addrs_lock_key);
+ mutex_init_with_key(&chan->alias_pairs_lock, &chan->alias_pairs_lock_key);
+ mutex_init_with_key(&chan->orig_addrs_lock, &chan->orig_addrs_lock_key);
snprintf(chan->adap.name, sizeof(chan->adap.name), "i2c-%d-atr-%d",
i2c_adapter_id(parent), chan_id);
chan->adap.owner = THIS_MODULE;
chan->adap.algo = &atr->algo;
chan->adap.algo_data = chan;
- chan->adap.dev.parent = adapter_parent;
+ chan->adap.dev.parent = desc->parent;
chan->adap.retries = parent->retries;
chan->adap.timeout = parent->timeout;
chan->adap.quirks = parent->quirks;
@@ -628,13 +847,26 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
fwnode_handle_put(atr_node);
}
+ if (desc->num_aliases > 0) {
+ chan->alias_pool = i2c_atr_alloc_alias_pool(desc->num_aliases, false);
+ if (IS_ERR(chan->alias_pool)) {
+ ret = PTR_ERR(chan->alias_pool);
+ goto err_fwnode_put;
+ }
+
+ for (idx = 0; idx < desc->num_aliases; idx++)
+ chan->alias_pool->aliases[idx] = desc->aliases[idx];
+ } else {
+ chan->alias_pool = atr->alias_pool;
+ }
+
atr->adapter[chan_id] = &chan->adap;
ret = i2c_add_adapter(&chan->adap);
if (ret) {
dev_err(dev, "failed to add atr-adapter %u (error=%d)\n",
chan_id, ret);
- goto err_fwnode_put;
+ goto err_free_alias_pool;
}
snprintf(symlink_name, sizeof(symlink_name), "channel-%u",
@@ -651,9 +883,15 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
return 0;
+err_free_alias_pool:
+ if (!chan->alias_pool->shared)
+ i2c_atr_free_alias_pool(chan->alias_pool);
err_fwnode_put:
fwnode_handle_put(dev_fwnode(&chan->adap.dev));
mutex_destroy(&chan->orig_addrs_lock);
+ mutex_destroy(&chan->alias_pairs_lock);
+ lockdep_unregister_key(&chan->orig_addrs_lock_key);
+ lockdep_unregister_key(&chan->alias_pairs_lock_key);
kfree(chan);
return ret;
}
@@ -683,10 +921,16 @@ void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id)
i2c_del_adapter(adap);
+ if (!chan->alias_pool->shared)
+ i2c_atr_free_alias_pool(chan->alias_pool);
+
atr->adapter[chan_id] = NULL;
fwnode_handle_put(fwnode);
mutex_destroy(&chan->orig_addrs_lock);
+ mutex_destroy(&chan->alias_pairs_lock);
+ lockdep_unregister_key(&chan->orig_addrs_lock_key);
+ lockdep_unregister_key(&chan->alias_pairs_lock_key);
kfree(chan->orig_addrs);
kfree(chan);
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7ad1ad5c8c3f..2ad2b1838f0f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -26,14 +26,13 @@
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/irqflags.h>
+#include <linux/irq.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/pm_domain.h>
@@ -42,6 +41,7 @@
#include <linux/property.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "i2c-core.h"
@@ -490,6 +490,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
static int i2c_device_probe(struct device *dev)
{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
bool do_power_on;
@@ -508,11 +509,11 @@ static int i2c_device_probe(struct device *dev)
/* Keep adapter active when Host Notify is required */
pm_runtime_get_sync(&client->adapter->dev);
irq = i2c_smbus_host_notify_to_irq(client);
- } else if (dev->of_node) {
- irq = of_irq_get_byname(dev->of_node, "irq");
+ } else if (is_of_node(fwnode)) {
+ irq = fwnode_irq_get_byname(fwnode, "irq");
if (irq == -EINVAL || irq == -ENODATA)
- irq = of_irq_get(dev->of_node, 0);
- } else if (ACPI_COMPANION(dev)) {
+ irq = fwnode_irq_get(fwnode, 0);
+ } else if (is_acpi_device_node(fwnode)) {
bool wake_capable;
irq = i2c_acpi_get_irq(client, &wake_capable);
@@ -520,7 +521,7 @@ static int i2c_device_probe(struct device *dev)
client->flags |= I2C_CLIENT_WAKE;
}
if (irq == -EPROBE_DEFER) {
- status = irq;
+ status = dev_err_probe(dev, irq, "can't get irq\n");
goto put_sync_adapter;
}
@@ -546,9 +547,9 @@ static int i2c_device_probe(struct device *dev)
if (client->flags & I2C_CLIENT_WAKE) {
int wakeirq;
- wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ wakeirq = fwnode_irq_get_byname(fwnode, "wakeup");
if (wakeirq == -EPROBE_DEFER) {
- status = wakeirq;
+ status = dev_err_probe(dev, wakeirq, "can't get wakeirq\n");
goto put_sync_adapter;
}
@@ -567,7 +568,7 @@ static int i2c_device_probe(struct device *dev)
dev_dbg(dev, "probe\n");
- status = of_clk_set_defaults(dev->of_node, false);
+ status = of_clk_set_defaults(to_of_node(fwnode), false);
if (status < 0)
goto err_clear_wakeup_irq;
@@ -961,6 +962,7 @@ static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr,
struct i2c_client *
i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
{
+ struct fwnode_handle *fwnode = info->fwnode;
struct i2c_client *client;
bool need_put = false;
int status;
@@ -1001,18 +1003,18 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
- client->dev.of_node = of_node_get(info->of_node);
- client->dev.fwnode = info->fwnode;
device_enable_async_suspend(&client->dev);
+ device_set_node(&client->dev, fwnode_handle_get(fwnode));
+
if (info->swnode) {
status = device_add_software_node(&client->dev, info->swnode);
if (status) {
dev_err(&adap->dev,
"Failed to add software node to client %s: %d\n",
client->name, status);
- goto out_err_put_of_node;
+ goto out_err_put_fwnode;
}
}
@@ -1031,8 +1033,8 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
out_remove_swnode:
device_remove_software_node(&client->dev);
need_put = true;
-out_err_put_of_node:
- of_node_put(info->of_node);
+out_err_put_fwnode:
+ fwnode_handle_put(fwnode);
out_err:
dev_err(&adap->dev,
"Failed to register i2c client %s at 0x%02x (%d)\n",
@@ -1054,16 +1056,17 @@ EXPORT_SYMBOL_GPL(i2c_new_client_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ struct fwnode_handle *fwnode;
+
if (IS_ERR_OR_NULL(client))
return;
- if (client->dev.of_node) {
- of_node_clear_flag(client->dev.of_node, OF_POPULATED);
- of_node_put(client->dev.of_node);
- }
-
- if (ACPI_COMPANION(&client->dev))
- acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
+ fwnode = dev_fwnode(&client->dev);
+ if (is_of_node(fwnode))
+ of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
+ else if (is_acpi_device_node(fwnode))
+ acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
+ fwnode_handle_put(fwnode);
device_remove_software_node(&client->dev);
device_unregister(&client->dev);
@@ -1209,11 +1212,9 @@ struct i2c_client *i2c_new_ancillary_device(struct i2c_client *client,
u32 addr = default_addr;
int i;
- if (np) {
- i = of_property_match_string(np, "reg-names", name);
- if (i >= 0)
- of_property_read_u32_index(np, "reg", i, &addr);
- }
+ i = of_property_match_string(np, "reg-names", name);
+ if (i >= 0)
+ of_property_read_u32_index(np, "reg", i, &addr);
dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr);
return i2c_new_dummy_device(client->adapter, addr);
@@ -1651,12 +1652,10 @@ int i2c_add_adapter(struct i2c_adapter *adapter)
struct device *dev = &adapter->dev;
int id;
- if (dev->of_node) {
- id = of_alias_get_id(dev->of_node, "i2c");
- if (id >= 0) {
- adapter->nr = id;
- return __i2c_add_numbered_adapter(adapter);
- }
+ id = of_alias_get_id(dev->of_node, "i2c");
+ if (id >= 0) {
+ adapter->nr = id;
+ return __i2c_add_numbered_adapter(adapter);
}
mutex_lock(&core_lock);
@@ -2146,7 +2145,7 @@ static int i2c_quirk_error(struct i2c_adapter *adap, struct i2c_msg *msg, char *
{
dev_err_ratelimited(&adap->dev, "adapter quirk: %s (addr 0x%04x, size %u, %s)\n",
err_msg, msg->addr, msg->len,
- msg->flags & I2C_M_RD ? "read" : "write");
+ str_read_write(msg->flags & I2C_M_RD));
return -EOPNOTSUPP;
}
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 02feee6c9ba9..eb7fb202355f 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -49,7 +49,6 @@ int of_i2c_get_board_info(struct device *dev, struct device_node *node,
}
info->addr = addr;
- info->of_node = node;
info->fwnode = of_fwnode_handle(node);
if (of_property_read_bool(node, "host-notify"))
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index faefe1dfa8e5..7ee6b992b835 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/of.h>
+#include <linux/property.h>
#include "i2c-core.h"
@@ -108,15 +109,18 @@ EXPORT_SYMBOL_GPL(i2c_slave_event);
*/
bool i2c_detect_slave_mode(struct device *dev)
{
- if (IS_BUILTIN(CONFIG_OF) && dev->of_node) {
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (is_of_node(fwnode)) {
+ struct fwnode_handle *child __free(fwnode_handle) = NULL;
u32 reg;
- for_each_child_of_node_scoped(dev->of_node, child) {
- of_property_read_u32(child, "reg", &reg);
+ fwnode_for_each_child_node(fwnode, child) {
+ fwnode_property_read_u32(child, "reg", &reg);
if (reg & I2C_OWN_SLAVE_ADDRESS)
return true;
}
- } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) {
+ } else if (is_acpi_device_node(fwnode)) {
dev_dbg(dev, "ACPI slave is not supported yet\n");
}
return false;
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index e73afbefe222..71eb1ef56f0c 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -16,6 +16,7 @@
#include <linux/i2c-smbus.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "i2c-core.h"
@@ -433,7 +434,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
case I2C_SMBUS_I2C_BLOCK_DATA:
if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
dev_err(&adapter->dev, "Invalid block %s size %d\n",
- read_write == I2C_SMBUS_READ ? "read" : "write",
+ str_read_write(read_write == I2C_SMBUS_READ),
data->block[0]);
return -EINVAL;
}
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 7d40e7aa3799..0316b347f9e7 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -372,12 +372,13 @@ EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device);
* - Only works on systems with 1 to 8 memory slots
*/
#if IS_ENABLED(CONFIG_DMI)
-void i2c_register_spd(struct i2c_adapter *adap)
+static void i2c_register_spd(struct i2c_adapter *adap, bool write_disabled)
{
int n, slot_count = 0, dimm_count = 0;
u16 handle;
u8 common_mem_type = 0x0, mem_type;
u64 mem_size;
+ bool instantiate = true;
const char *name;
while ((handle = dmi_memdev_handle(slot_count)) != 0xffff) {
@@ -438,6 +439,7 @@ void i2c_register_spd(struct i2c_adapter *adap)
case 0x22: /* DDR5 */
case 0x23: /* LPDDR5 */
name = "spd5118";
+ instantiate = !write_disabled;
break;
default:
dev_info(&adap->dev,
@@ -461,6 +463,9 @@ void i2c_register_spd(struct i2c_adapter *adap)
addr_list[0] = 0x50 + n;
addr_list[1] = I2C_CLIENT_END;
+ if (!instantiate)
+ continue;
+
if (!IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL))) {
dev_info(&adap->dev,
"Successfully instantiated SPD at 0x%hx\n",
@@ -469,7 +474,19 @@ void i2c_register_spd(struct i2c_adapter *adap)
}
}
}
-EXPORT_SYMBOL_GPL(i2c_register_spd);
+
+void i2c_register_spd_write_disable(struct i2c_adapter *adap)
+{
+ i2c_register_spd(adap, true);
+}
+EXPORT_SYMBOL_GPL(i2c_register_spd_write_disable);
+
+void i2c_register_spd_write_enable(struct i2c_adapter *adap)
+{
+ i2c_register_spd(adap, false);
+}
+EXPORT_SYMBOL_GPL(i2c_register_spd_write_enable);
+
#endif
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 8a87f19bf5d5..c688af270a11 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -85,13 +85,13 @@ static int ltc4306_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(1 - offset));
}
-static void ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ltc4306 *data = gpiochip_get_data(chip);
- regmap_update_bits(data->regmap, LTC_REG_CONFIG, BIT(5 - offset),
- value ? BIT(5 - offset) : 0);
+ return regmap_update_bits(data->regmap, LTC_REG_CONFIG,
+ BIT(5 - offset), value ? BIT(5 - offset) : 0);
}
static int ltc4306_gpio_get_direction(struct gpio_chip *chip,
@@ -164,7 +164,7 @@ static int ltc4306_gpio_init(struct ltc4306 *data)
data->gpiochip.direction_input = ltc4306_gpio_direction_input;
data->gpiochip.direction_output = ltc4306_gpio_direction_output;
data->gpiochip.get = ltc4306_gpio_get;
- data->gpiochip.set = ltc4306_gpio_set;
+ data->gpiochip.set_rv = ltc4306_gpio_set;
data->gpiochip.set_config = ltc4306_gpio_set_config;
data->gpiochip.owner = THIS_MODULE;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index db95113a5b49..5bb26af0f532 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -442,9 +442,9 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
raw_spin_lock_init(&data->lock);
- data->irq = irq_domain_add_linear(client->dev.of_node,
- data->chip->nchans,
- &irq_domain_simple_ops, data);
+ data->irq = irq_domain_create_linear(of_fwnode_handle(client->dev.of_node),
+ data->chip->nchans,
+ &irq_domain_simple_ops, data);
if (!data->irq)
return -ENODEV;
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 77da199c7413..7b30db3253af 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config CDNS_I3C_MASTER
tristate "Cadence I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -9,7 +8,6 @@ config CDNS_I3C_MASTER
config DW_I3C_MASTER
tristate "Synospsys DesignWare I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
# ALPHA and PARISC needs {read,write}sl()
@@ -38,7 +36,6 @@ config AST2600_I3C_MASTER
config SVC_I3C_MASTER
tristate "Silvaco I3C Dual-Role Master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -46,7 +43,6 @@ config SVC_I3C_MASTER
config MIPI_I3C_HCI
tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
- depends on I3C
depends on HAS_IOMEM
help
Support for hardware following the MIPI Aliance's I3C Host Controller
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index a71226d7ca59..bc4538694540 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -78,7 +78,7 @@
#define INTR_SIGNAL_ENABLE 0x28
#define INTR_FORCE 0x2c
#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
-#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
+#define INTR_HC_SEQ_CANCEL BIT(11) /* HC Cancelled Transaction Sequence */
#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
#define DAT_SECTION 0x30 /* Device Address Table */
@@ -590,26 +590,27 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
u32 val;
val = reg_read(INTR_STATUS);
+ reg_write(INTR_STATUS, val);
DBG("INTR_STATUS = %#x", val);
- if (val) {
- reg_write(INTR_STATUS, val);
- }
+ if (val)
+ result = IRQ_HANDLED;
- if (val & INTR_HC_RESET_CANCEL) {
- DBG("cancelled reset");
- val &= ~INTR_HC_RESET_CANCEL;
+ if (val & INTR_HC_SEQ_CANCEL) {
+ dev_dbg(&hci->master.dev,
+ "Host Controller Cancelled Transaction Sequence\n");
+ val &= ~INTR_HC_SEQ_CANCEL;
}
if (val & INTR_HC_INTERNAL_ERR) {
dev_err(&hci->master.dev, "Host Controller Internal Error\n");
val &= ~INTR_HC_INTERNAL_ERR;
}
- hci->io->irq_handler(hci);
-
if (val)
- dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
- else
+ dev_warn_once(&hci->master.dev,
+ "unexpected INTR_STATUS %#x\n", val);
+
+ if (hci->io->irq_handler(hci))
result = IRQ_HANDLED;
return result;
@@ -699,9 +700,14 @@ static int i3c_hci_init(struct i3c_hci *hci)
if (ret)
return -ENXIO;
- /* Disable all interrupts and allow all signal updates */
+ /* Disable all interrupts */
reg_write(INTR_SIGNAL_ENABLE, 0x0);
- reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+ /*
+ * Only allow bit 31:10 signal updates because
+ * Bit 0:9 are reserved in IP version >= 0.8
+ * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code
+ */
+ reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10));
/* Make sure our data ordering fits the host's */
regval = reg_read(HC_CONTROL);
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 85e16de208d3..7e1a7cb94b43 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -201,11 +201,10 @@ struct svc_i3c_drvdata {
* @addrs: Array containing the dynamic addresses of each attached device
* @descs: Array of descriptors, one per attached device
* @hj_work: Hot-join work
- * @ibi_work: IBI work
* @irq: Main interrupt
- * @pclk: System clock
+ * @num_clks: I3C clock number
* @fclk: Fast clock (bus)
- * @sclk: Slow clock (other events)
+ * @clks: I3C clock array
* @xferqueue: Transfer queue structure
* @xferqueue.list: List member
* @xferqueue.cur: Current ongoing transfer
@@ -229,11 +228,10 @@ struct svc_i3c_master {
u8 addrs[SVC_I3C_MAX_DEVS];
struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
struct work_struct hj_work;
- struct work_struct ibi_work;
int irq;
- struct clk *pclk;
+ int num_clks;
struct clk *fclk;
- struct clk *sclk;
+ struct clk_bulk_data *clks;
struct {
struct list_head list;
struct svc_i3c_xfer *cur;
@@ -487,9 +485,8 @@ static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 msta
return ret;
}
-static void svc_i3c_master_ibi_work(struct work_struct *work)
+static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
{
- struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
struct svc_i3c_i2c_dev_data *data;
unsigned int ibitype, ibiaddr;
struct i3c_dev_desc *dev;
@@ -504,7 +501,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
* schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
* any irq or schedule happen during transaction.
*/
- guard(spinlock_irqsave)(&master->xferqueue.lock);
+ guard(spinlock)(&master->xferqueue.lock);
/*
* IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
@@ -530,7 +527,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
if (ret) {
dev_err(master->dev, "Timeout when polling for IBIWON\n");
svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
+ return;
}
status = readl(master->regs + SVC_I3C_MSTATUS);
@@ -574,17 +571,17 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
+ return;
}
/* Handle the non critical tasks */
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ svc_i3c_master_emit_stop(master);
if (dev) {
i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
master->ibi.tbq_slot = NULL;
}
- svc_i3c_master_emit_stop(master);
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
svc_i3c_master_emit_stop(master);
@@ -597,9 +594,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
default:
break;
}
-
-reenable_ibis:
- svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
}
static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
@@ -618,10 +612,12 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
!SVC_I3C_MSTATUS_STATE_SLVREQ(active))
return IRQ_HANDLED;
- svc_i3c_master_disable_interrupts(master);
-
- /* Handle the interrupt in a non atomic context */
- queue_work(master->base.wq, &master->ibi_work);
+ /*
+ * The SDA line remains low until the request is processed.
+ * Receive the request in the interrupt context to respond promptly
+ * and restore the bus to idle state.
+ */
+ svc_i3c_master_ibi_isr(master);
return IRQ_HANDLED;
}
@@ -1281,9 +1277,9 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
bool rnw, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
- unsigned int *actual_len, bool continued)
+ unsigned int *actual_len, bool continued, bool repeat_start)
{
- int retry = 2;
+ int retry = repeat_start ? 1 : 2;
u32 reg;
int ret;
@@ -1468,7 +1464,7 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
cmd->addr, cmd->in, cmd->out,
cmd->len, &cmd->actual_len,
- cmd->continued);
+ cmd->continued, i > 0);
/* cmd->xfer is NULL if I2C or CCC transfer */
if (cmd->xfer)
cmd->xfer->actual_len = cmd->actual_len;
@@ -1875,42 +1871,11 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.set_speed = svc_i3c_master_set_speed,
};
-static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
-{
- int ret = 0;
-
- ret = clk_prepare_enable(master->pclk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(master->fclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- return ret;
- }
-
- ret = clk_prepare_enable(master->sclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- return ret;
- }
-
- return 0;
-}
-
-static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
-{
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- clk_disable_unprepare(master->sclk);
-}
-
static int svc_i3c_master_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct svc_i3c_master *master;
- int ret;
+ int ret, i;
master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
if (!master)
@@ -1924,30 +1889,33 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ master->num_clks = devm_clk_bulk_get_all(dev, &master->clks);
+ if (master->num_clks < 0)
+ return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n");
- master->fclk = devm_clk_get(dev, "fast_clk");
+ for (i = 0; i < master->num_clks; i++) {
+ if (!strcmp(master->clks[i].id, "fast_clk"))
+ break;
+ }
+
+ if (i == master->num_clks)
+ return dev_err_probe(dev, -EINVAL,
+ "can't get I3C peripheral clock\n");
+
+ master->fclk = master->clks[i].clk;
if (IS_ERR(master->fclk))
return PTR_ERR(master->fclk);
- master->sclk = devm_clk_get(dev, "slow_clk");
- if (IS_ERR(master->sclk))
- return PTR_ERR(master->sclk);
-
master->irq = platform_get_irq(pdev, 0);
if (master->irq < 0)
return master->irq;
master->dev = dev;
-
- ret = svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "can't enable I3C clocks\n");
INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
- INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
mutex_init(&master->lock);
ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
@@ -1998,7 +1966,7 @@ rpm_disable:
pm_runtime_set_suspended(&pdev->dev);
err_disable_clks:
- svc_i3c_master_unprepare_clks(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
return ret;
}
@@ -2036,7 +2004,7 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
struct svc_i3c_master *master = dev_get_drvdata(dev);
svc_i3c_save_regs(master);
- svc_i3c_master_unprepare_clks(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -2045,9 +2013,12 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
+ int ret;
pinctrl_pm_select_default_state(dev);
- svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
+ if (ret)
+ return ret;
svc_i3c_restore_regs(master);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 976f5be54e36..8ccb483204fa 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -48,14 +48,17 @@
#include <trace/events/power.h>
#include <linux/sched.h>
#include <linux/sched/smt.h>
+#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
-#include <asm/cpuid.h>
+#include <linux/sysfs.h>
+#include <asm/cpuid/api.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
+#include <asm/msr.h>
#include <asm/tsc.h>
#include <asm/fpu/api.h>
#include <asm/smp.h>
@@ -92,9 +95,15 @@ struct idle_cpu {
*/
unsigned long auto_demotion_disable_flags;
bool disable_promotion_to_c1e;
+ bool c1_demotion_supported;
bool use_acpi;
};
+static bool c1_demotion_supported;
+static DEFINE_MUTEX(c1_demotion_mutex);
+
+static struct device *sysfs_root __initdata;
+
static const struct idle_cpu *icpu __initdata;
static struct cpuidle_state *cpuidle_state_table __initdata;
@@ -1549,18 +1558,21 @@ static const struct idle_cpu idle_cpu_gmt __initconst = {
static const struct idle_cpu idle_cpu_spr __initconst = {
.state_table = spr_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_gnr __initconst = {
.state_table = gnr_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_gnrd __initconst = {
.state_table = gnrd_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
@@ -1599,12 +1611,14 @@ static const struct idle_cpu idle_cpu_snr __initconst = {
static const struct idle_cpu idle_cpu_grr __initconst = {
.state_table = grr_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_srf __initconst = {
.state_table = srf_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
@@ -1928,35 +1942,35 @@ static void __init bxt_idle_state_table_update(void)
unsigned long long msr;
unsigned int usec;
- rdmsrl(MSR_PKGC6_IRTL, msr);
+ rdmsrq(MSR_PKGC6_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[2].exit_latency = usec;
bxt_cstates[2].target_residency = usec;
}
- rdmsrl(MSR_PKGC7_IRTL, msr);
+ rdmsrq(MSR_PKGC7_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[3].exit_latency = usec;
bxt_cstates[3].target_residency = usec;
}
- rdmsrl(MSR_PKGC8_IRTL, msr);
+ rdmsrq(MSR_PKGC8_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[4].exit_latency = usec;
bxt_cstates[4].target_residency = usec;
}
- rdmsrl(MSR_PKGC9_IRTL, msr);
+ rdmsrq(MSR_PKGC9_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[5].exit_latency = usec;
bxt_cstates[5].target_residency = usec;
}
- rdmsrl(MSR_PKGC10_IRTL, msr);
+ rdmsrq(MSR_PKGC10_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[6].exit_latency = usec;
@@ -1984,7 +1998,7 @@ static void __init sklh_idle_state_table_update(void)
if ((mwait_substates & (0xF << 28)) == 0)
return;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
/* PC10 is not enabled in PKG C-state limit */
if ((msr & 0xF) != 8)
@@ -1996,7 +2010,7 @@ static void __init sklh_idle_state_table_update(void)
/* if SGX is present */
if (ebx & (1 << 2)) {
- rdmsrl(MSR_IA32_FEAT_CTL, msr);
+ rdmsrq(MSR_IA32_FEAT_CTL, msr);
/* if SGX is enabled */
if (msr & (1 << 18))
@@ -2015,7 +2029,7 @@ static void __init skx_idle_state_table_update(void)
{
unsigned long long msr;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
/*
* 000b: C0/C1 (no package C-state support)
@@ -2068,7 +2082,7 @@ static void __init spr_idle_state_table_update(void)
* C6. However, if PC6 is disabled, we update the numbers to match
* core C6.
*/
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
/* Limit value 2 and above allow for PC6. */
if ((msr & 0x7) < 2) {
@@ -2082,8 +2096,8 @@ static void __init spr_idle_state_table_update(void)
*/
static void __init byt_cht_auto_demotion_disable(void)
{
- wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
- wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrq(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrq(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
}
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
@@ -2241,27 +2255,27 @@ static void auto_demotion_disable(void)
{
unsigned long long msr_bits;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
msr_bits &= ~auto_demotion_disable_flags;
- wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
}
static void c1e_promotion_enable(void)
{
unsigned long long msr_bits;
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ rdmsrq(MSR_IA32_POWER_CTL, msr_bits);
msr_bits |= 0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ wrmsrq(MSR_IA32_POWER_CTL, msr_bits);
}
static void c1e_promotion_disable(void)
{
unsigned long long msr_bits;
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ rdmsrq(MSR_IA32_POWER_CTL, msr_bits);
msr_bits &= ~0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ wrmsrq(MSR_IA32_POWER_CTL, msr_bits);
}
/**
@@ -2324,6 +2338,88 @@ static void __init intel_idle_cpuidle_devices_uninit(void)
cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
}
+static void intel_c1_demotion_toggle(void *enable)
+{
+ unsigned long long msr_val;
+
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
+ /*
+ * Enable/disable C1 undemotion along with C1 demotion, as this is the
+ * most sensible configuration in general.
+ */
+ if (enable)
+ msr_val |= NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE;
+ else
+ msr_val &= ~(NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE);
+ wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
+}
+
+static ssize_t intel_c1_demotion_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool enable;
+ int err;
+
+ err = kstrtobool(buf, &enable);
+ if (err)
+ return err;
+
+ mutex_lock(&c1_demotion_mutex);
+ /* Enable/disable C1 demotion on all CPUs */
+ on_each_cpu(intel_c1_demotion_toggle, (void *)enable, 1);
+ mutex_unlock(&c1_demotion_mutex);
+
+ return count;
+}
+
+static ssize_t intel_c1_demotion_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long long msr_val;
+
+ /*
+ * Read the MSR value for a CPU and assume it is the same for all CPUs. Any other
+ * configuration would be a BIOS bug.
+ */
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
+ return sysfs_emit(buf, "%d\n", !!(msr_val & NHM_C1_AUTO_DEMOTE));
+}
+static DEVICE_ATTR_RW(intel_c1_demotion);
+
+static int __init intel_idle_sysfs_init(void)
+{
+ int err;
+
+ if (!c1_demotion_supported)
+ return 0;
+
+ sysfs_root = bus_get_dev_root(&cpu_subsys);
+ if (!sysfs_root)
+ return 0;
+
+ err = sysfs_add_file_to_group(&sysfs_root->kobj,
+ &dev_attr_intel_c1_demotion.attr,
+ "cpuidle");
+ if (err) {
+ put_device(sysfs_root);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __init intel_idle_sysfs_uninit(void)
+{
+ if (!sysfs_root)
+ return;
+
+ sysfs_remove_file_from_group(&sysfs_root->kobj,
+ &dev_attr_intel_c1_demotion.attr,
+ "cpuidle");
+ put_device(sysfs_root);
+}
+
static int __init intel_idle_init(void)
{
const struct x86_cpu_id *id;
@@ -2374,6 +2470,8 @@ static int __init intel_idle_init(void)
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
if (icpu->disable_promotion_to_c1e)
c1e_promotion = C1E_PROMOTION_DISABLE;
+ if (icpu->c1_demotion_supported)
+ c1_demotion_supported = true;
if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) {
@@ -2387,6 +2485,10 @@ static int __init intel_idle_init(void)
if (!intel_idle_cpuidle_devices)
return -ENOMEM;
+ retval = intel_idle_sysfs_init();
+ if (retval)
+ pr_warn("failed to initialized sysfs");
+
intel_idle_cpuidle_driver_init(&intel_idle_driver);
retval = cpuidle_register_driver(&intel_idle_driver);
@@ -2411,6 +2513,7 @@ hp_setup_fail:
intel_idle_cpuidle_devices_uninit();
cpuidle_unregister_driver(&intel_idle_driver);
init_driver_fail:
+ intel_idle_sysfs_uninit();
free_percpu(intel_idle_cpuidle_devices);
return retval;
diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c
index 63ebaf13ef19..f61ad0510f04 100644
--- a/drivers/iio/adc/qcom-spmi-rradc.c
+++ b/drivers/iio/adc/qcom-spmi-rradc.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Linaro Limited.
- * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ * Author: Casey Connolly <casey.connolly@linaro.org>
*
* This driver is for the Round Robin ADC found in the pmi8998 and pm660 PMICs.
*/
@@ -1016,5 +1016,5 @@ static struct platform_driver rradc_driver = {
module_platform_driver(rradc_driver);
MODULE_DESCRIPTION("QCOM SPMI PMIC RR ADC driver");
-MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 0914148d1a22..bd3458965bff 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -421,9 +421,10 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
return priv->irq[i];
}
- priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
- &stm32_adc_domain_ops,
- priv);
+ priv->domain = irq_domain_create_simple(of_fwnode_handle(np),
+ STM32_ADC_MAX_ADCS, 0,
+ &stm32_adc_domain_ops,
+ priv);
if (!priv->domain) {
dev_err(&pdev->dev, "Failed to add irq domain\n");
return -ENOMEM;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 142170473e75..8670e58675c6 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -36,6 +36,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
#define CM_DIRECT_RETRY_CTX ((void *) 1UL)
+#define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
@@ -167,7 +168,7 @@ struct cm_port {
struct cm_device {
struct kref kref;
struct list_head list;
- spinlock_t mad_agent_lock;
+ rwlock_t mad_agent_lock;
struct ib_device *ib_device;
u8 ack_delay;
int going_down;
@@ -241,7 +242,6 @@ struct cm_id_private {
u8 initiator_depth;
u8 retry_count;
u8 rnr_retry_count;
- u8 service_timeout;
u8 target_ack_delay;
struct list_head work_list;
@@ -285,7 +285,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return ERR_PTR(-EINVAL);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
mad_agent = cm_id_priv->av.port->mad_agent;
if (!mad_agent) {
m = ERR_PTR(-EINVAL);
@@ -311,7 +311,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
m->ah = ah;
out:
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return m;
}
@@ -1297,10 +1297,10 @@ static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return cpu_to_be64(low_tid);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
if (cm_id_priv->av.port->mad_agent)
hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return cpu_to_be64(hi_tid | low_tid);
}
@@ -1872,7 +1872,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
static void cm_format_mra(struct cm_mra_msg *mra_msg,
struct cm_id_private *cm_id_priv,
- enum cm_msg_response msg_mraed, u8 service_timeout,
+ enum cm_msg_response msg_mraed,
const void *private_data, u8 private_data_len)
{
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
@@ -1881,7 +1881,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
be32_to_cpu(cm_id_priv->id.local_id));
IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
- IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
+ IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, CM_MRA_SETTING);
if (private_data && private_data_len)
IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
@@ -1960,7 +1960,7 @@ static void cm_dup_req_handler(struct cm_work *work,
switch (cm_id_priv->id.state) {
case IB_CM_MRA_REQ_SENT:
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REQ,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
break;
@@ -2454,7 +2454,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
cm_id_priv->private_data_len);
else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REP,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else
@@ -3094,26 +3094,13 @@ out:
return -EINVAL;
}
-int ib_send_cm_mra(struct ib_cm_id *cm_id,
- u8 service_timeout,
- const void *private_data,
- u8 private_data_len)
+int ib_prepare_cm_mra(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
enum ib_cm_state cm_state;
enum ib_cm_lap_state lap_state;
- enum cm_msg_response msg_response;
- void *data;
unsigned long flags;
- int ret;
-
- if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
- return -EINVAL;
-
- data = cm_copy_private_data(private_data, private_data_len);
- if (IS_ERR(data))
- return PTR_ERR(data);
+ int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3122,58 +3109,33 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REQ;
break;
case IB_CM_REP_RCVD:
cm_state = IB_CM_MRA_REP_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
- msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
fallthrough;
default:
- trace_icm_send_mra_unknown_err(&cm_id_priv->id);
+ trace_icm_prepare_mra_unknown_err(&cm_id_priv->id);
ret = -EINVAL;
goto error_unlock;
}
- if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
- msg = cm_alloc_msg(cm_id_priv);
- if (IS_ERR(msg)) {
- ret = PTR_ERR(msg);
- goto error_unlock;
- }
-
- cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- msg_response, service_timeout,
- private_data, private_data_len);
- trace_icm_send_mra(cm_id);
- ret = ib_post_send_mad(msg, NULL);
- if (ret)
- goto error_free_msg;
- }
-
cm_id->state = cm_state;
cm_id->lap_state = lap_state;
- cm_id_priv->service_timeout = service_timeout;
- cm_set_private_data(cm_id_priv, data, private_data_len);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return 0;
+ cm_set_private_data(cm_id_priv, NULL, 0);
-error_free_msg:
- cm_free_msg(msg);
error_unlock:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
return ret;
}
-EXPORT_SYMBOL(ib_send_cm_mra);
+EXPORT_SYMBOL(ib_prepare_cm_mra);
static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
@@ -3377,7 +3339,6 @@ static int cm_lap_handler(struct cm_work *work)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_OTHER,
- cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
@@ -3786,7 +3747,8 @@ static void cm_process_send_error(struct cm_id_private *cm_id_priv,
spin_lock_irq(&cm_id_priv->lock);
if (msg != cm_id_priv->msg) {
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_priv_msg(msg);
+ cm_free_msg(msg);
+ cm_deref_id(cm_id_priv);
return;
}
cm_free_priv_msg(msg);
@@ -4378,7 +4340,7 @@ static int cm_add_one(struct ib_device *ib_device)
return -ENOMEM;
kref_init(&cm_dev->kref);
- spin_lock_init(&cm_dev->mad_agent_lock);
+ rwlock_init(&cm_dev->mad_agent_lock);
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
cm_dev->going_down = 0;
@@ -4494,9 +4456,9 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
* The above ensures no call paths from the work are running,
* the remaining paths all take the mad_agent_lock.
*/
- spin_lock(&cm_dev->mad_agent_lock);
+ write_lock(&cm_dev->mad_agent_lock);
port->mad_agent = NULL;
- spin_unlock(&cm_dev->mad_agent_lock);
+ write_unlock(&cm_dev->mad_agent_lock);
ib_unregister_mad_agent(mad_agent);
ib_port_unregister_client_groups(ib_device, i,
cm_counter_groups);
diff --git a/drivers/infiniband/core/cm_trace.h b/drivers/infiniband/core/cm_trace.h
index 944d9071245d..4a4987da69d4 100644
--- a/drivers/infiniband/core/cm_trace.h
+++ b/drivers/infiniband/core/cm_trace.h
@@ -229,7 +229,7 @@ DEFINE_CM_ERR_EVENT(send_drep);
DEFINE_CM_ERR_EVENT(dreq_unknown);
DEFINE_CM_ERR_EVENT(send_unknown_rej);
DEFINE_CM_ERR_EVENT(rej_unknown);
-DEFINE_CM_ERR_EVENT(send_mra_unknown);
+DEFINE_CM_ERR_EVENT(prepare_mra_unknown);
DEFINE_CM_ERR_EVENT(mra_unknown);
DEFINE_CM_ERR_EVENT(qp_init);
DEFINE_CM_ERR_EVENT(qp_rtr);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ab31eefa916b..9b471548e7ae 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -46,7 +46,6 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_MAX_CM_RETRIES 15
-#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 16
#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
@@ -146,19 +145,6 @@ struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
}
EXPORT_SYMBOL(rdma_iw_cm_id);
-/**
- * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
- * @res: rdma resource tracking entry pointer
- */
-struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
-{
- struct rdma_id_private *id_priv =
- container_of(res, struct rdma_id_private, res);
-
- return &id_priv->id;
-}
-EXPORT_SYMBOL(rdma_res_to_id);
-
static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
@@ -2214,8 +2200,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
case IB_CM_REP_RECEIVED:
if (state == RDMA_CM_CONNECT &&
(id_priv->id.qp_type != IB_QPT_UD)) {
- trace_cm_send_mra(id_priv);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(id_priv);
+ ib_prepare_cm_mra(cm_id);
}
if (id_priv->id.qp) {
event.status = cma_rep_recv(id_priv);
@@ -2476,8 +2462,8 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
conn_id->id.qp_type != IB_QPT_UD) {
- trace_cm_send_mra(cm_id->context);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(cm_id->context);
+ ib_prepare_cm_mra(cm_id);
}
mutex_unlock(&conn_id->handler_mutex);
@@ -5245,7 +5231,8 @@ static int cma_netevent_callback(struct notifier_block *self,
neigh->ha, ETH_ALEN))
continue;
cma_id_get(current_id);
- queue_work(cma_wq, &current_id->id.net_work);
+ if (!queue_work(cma_wq, &current_id->id.net_work))
+ cma_id_put(current_id);
}
out:
spin_unlock_irqrestore(&id_table_lock, flags);
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
index dc622f3778be..3456d5f3aa47 100644
--- a/drivers/infiniband/core/cma_trace.h
+++ b/drivers/infiniband/core/cma_trace.h
@@ -55,7 +55,7 @@ DECLARE_EVENT_CLASS(cma_fsm_class,
DEFINE_CMA_FSM_EVENT(send_rtu);
DEFINE_CMA_FSM_EVENT(send_rej);
-DEFINE_CMA_FSM_EVENT(send_mra);
+DEFINE_CMA_FSM_EVENT(prepare_mra);
DEFINE_CMA_FSM_EVENT(send_sidr_req);
DEFINE_CMA_FSM_EVENT(send_sidr_rep);
DEFINE_CMA_FSM_EVENT(disconnect);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index f4486cbd8f45..62410578dec3 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -368,12 +368,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
/*
* CM_ID <-- DESTROYING
*
- * Clean up all resources associated with the connection and release
- * the initial reference taken by iw_create_cm_id.
- *
- * Returns true if and only if the last cm_id_priv reference has been dropped.
+ * Clean up all resources associated with the connection.
*/
-static bool destroy_cm_id(struct iw_cm_id *cm_id)
+static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
struct ib_qp *qp;
@@ -442,20 +439,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
}
-
- return iwcm_deref_id(cm_id_priv);
}
/*
- * This function is only called by the application thread and cannot
- * be called by the event thread. The function will wait for all
- * references to be released on the cm_id and then kfree the cm_id
- * object.
+ * Destroy cm_id. If the cm_id still has other references, wait for all
+ * references to be released on the cm_id and then release the initial
+ * reference taken by iw_create_cm_id.
*/
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{
- if (!destroy_cm_id(cm_id))
+ struct iwcm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ destroy_cm_id(cm_id);
+ if (refcount_read(&cm_id_priv->refcount) > 1)
flush_workqueue(iwcm_wq);
+ iwcm_deref_id(cm_id_priv);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -1035,8 +1034,10 @@ static void cm_work_handler(struct work_struct *_work)
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
ret = process_event(cm_id_priv, &levent);
- if (ret)
- WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
+ if (ret) {
+ destroy_cm_id(&cm_id_priv->id);
+ WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
+ }
} else
pr_debug("dropping event %d\n", levent.event);
if (iwcm_deref_id(cm_id_priv))
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 8af0619a39cd..b4b10e8a6495 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -158,7 +158,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah))
- return (void *) ah;
+ return ERR_CAST(ah);
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index c48ef6083020..c752ae9fad6c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -41,67 +41,72 @@
#include <linux/hugetlb.h>
#include <linux/interval_tree.h>
#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
#include <linux/pagemap.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
-static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
- const struct mmu_interval_notifier_ops *ops)
+static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp)
{
- int ret;
+ umem_odp->is_implicit_odp = 1;
+ umem_odp->umem.is_odp = 1;
+ mutex_init(&umem_odp->umem_mutex);
+}
+
+static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ const struct mmu_interval_notifier_ops *ops)
+{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+ size_t page_size = 1UL << umem_odp->page_shift;
+ struct hmm_dma_map *map;
+ unsigned long start;
+ unsigned long end;
+ size_t nr_entries;
+ int ret = 0;
umem_odp->umem.is_odp = 1;
mutex_init(&umem_odp->umem_mutex);
- if (!umem_odp->is_implicit_odp) {
- size_t page_size = 1UL << umem_odp->page_shift;
- unsigned long start;
- unsigned long end;
- size_t ndmas, npfns;
-
- start = ALIGN_DOWN(umem_odp->umem.address, page_size);
- if (check_add_overflow(umem_odp->umem.address,
- (unsigned long)umem_odp->umem.length,
- &end))
- return -EOVERFLOW;
- end = ALIGN(end, page_size);
- if (unlikely(end < page_size))
- return -EOVERFLOW;
-
- ndmas = (end - start) >> umem_odp->page_shift;
- if (!ndmas)
- return -EINVAL;
-
- npfns = (end - start) >> PAGE_SHIFT;
- umem_odp->pfn_list = kvcalloc(
- npfns, sizeof(*umem_odp->pfn_list),
- GFP_KERNEL | __GFP_NOWARN);
- if (!umem_odp->pfn_list)
- return -ENOMEM;
-
- umem_odp->dma_list = kvcalloc(
- ndmas, sizeof(*umem_odp->dma_list),
- GFP_KERNEL | __GFP_NOWARN);
- if (!umem_odp->dma_list) {
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+ if (check_add_overflow(umem_odp->umem.address,
+ (unsigned long)umem_odp->umem.length, &end))
+ return -EOVERFLOW;
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
+ return -EOVERFLOW;
+
+ nr_entries = (end - start) >> PAGE_SHIFT;
+ if (!(nr_entries * PAGE_SIZE / page_size))
+ return -EINVAL;
+
+ map = &umem_odp->map;
+ if (ib_uses_virt_dma(dev)) {
+ map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!map->pfn_list)
ret = -ENOMEM;
- goto out_pfn_list;
- }
+ } else
+ ret = hmm_dma_map_alloc(dev->dma_device, map,
+ (end - start) >> PAGE_SHIFT,
+ 1 << umem_odp->page_shift);
+ if (ret)
+ return ret;
- ret = mmu_interval_notifier_insert(&umem_odp->notifier,
- umem_odp->umem.owning_mm,
- start, end - start, ops);
- if (ret)
- goto out_dma_list;
- }
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm, start,
+ end - start, ops);
+ if (ret)
+ goto out_free_map;
return 0;
-out_dma_list:
- kvfree(umem_odp->dma_list);
-out_pfn_list:
- kvfree(umem_odp->pfn_list);
+out_free_map:
+ if (ib_uses_virt_dma(dev))
+ kfree(map->pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, map);
return ret;
}
@@ -120,7 +125,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
{
struct ib_umem *umem;
struct ib_umem_odp *umem_odp;
- int ret;
if (access & IB_ACCESS_HUGETLB)
return ERR_PTR(-EINVAL);
@@ -132,16 +136,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
umem->ibdev = device;
umem->writable = ib_access_writable(access);
umem->owning_mm = current->mm;
- umem_odp->is_implicit_odp = 1;
umem_odp->page_shift = PAGE_SHIFT;
umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- ret = ib_init_umem_odp(umem_odp, NULL);
- if (ret) {
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
- return ERR_PTR(ret);
- }
+ ib_init_umem_implicit_odp(umem_odp);
return umem_odp;
}
EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
@@ -262,74 +260,41 @@ err_put_pid:
}
EXPORT_SYMBOL(ib_umem_odp_get);
-void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
+static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+
/*
* Ensure that no more pages are mapped in the umem.
*
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- if (!umem_odp->is_implicit_odp) {
- mutex_lock(&umem_odp->umem_mutex);
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- mutex_unlock(&umem_odp->umem_mutex);
- mmu_interval_notifier_remove(&umem_odp->notifier);
- kvfree(umem_odp->dma_list);
- kvfree(umem_odp->pfn_list);
- }
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
+ mutex_lock(&umem_odp->umem_mutex);
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
+ mutex_unlock(&umem_odp->umem_mutex);
+ mmu_interval_notifier_remove(&umem_odp->notifier);
+ if (ib_uses_virt_dma(dev))
+ kfree(umem_odp->map.pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, &umem_odp->map);
}
-EXPORT_SYMBOL(ib_umem_odp_release);
-/*
- * Map for DMA and insert a single page into the on-demand paging page tables.
- *
- * @umem: the umem to insert the page to.
- * @dma_index: index in the umem to add the dma to.
- * @page: the page struct to map and add.
- * @access_mask: access permissions needed for this page.
- *
- * The function returns -EFAULT if the DMA mapping operation fails.
- *
- */
-static int ib_umem_odp_map_dma_single_page(
- struct ib_umem_odp *umem_odp,
- unsigned int dma_index,
- struct page *page,
- u64 access_mask)
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_device *dev = umem_odp->umem.ibdev;
- dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
-
- if (*dma_addr) {
- /*
- * If the page is already dma mapped it means it went through
- * a non-invalidating trasition, like read-only to writable.
- * Resync the flags.
- */
- *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
- return 0;
- }
+ if (!umem_odp->is_implicit_odp)
+ ib_umem_odp_free(umem_odp);
- *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
- DMA_BIDIRECTIONAL);
- if (ib_dma_mapping_error(dev, *dma_addr)) {
- *dma_addr = 0;
- return -EFAULT;
- }
- umem_odp->npages++;
- *dma_addr |= access_mask;
- return 0;
+ put_pid(umem_odp->tgid);
+ kfree(umem_odp);
}
+EXPORT_SYMBOL(ib_umem_odp_release);
/**
* ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
*
* Maps the range passed in the argument to DMA addresses.
- * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
* Upon success the ODP MR will be locked to let caller complete its device
* page table update.
*
@@ -357,9 +322,6 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
struct hmm_range range = {};
unsigned long timeout;
- if (access_mask == 0)
- return -EINVAL;
-
if (user_virt < ib_umem_start(umem_odp) ||
user_virt + bcnt > ib_umem_end(umem_odp))
return -EFAULT;
@@ -385,11 +347,11 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
if (fault) {
range.default_flags = HMM_PFN_REQ_FAULT;
- if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ if (access_mask & HMM_PFN_WRITE)
range.default_flags |= HMM_PFN_REQ_WRITE;
}
- range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
+ range.hmm_pfns = &(umem_odp->map.pfn_list[pfn_start_idx]);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
@@ -417,22 +379,17 @@ retry:
for (pfn_index = 0; pfn_index < num_pfns;
pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
- if (fault) {
- /*
- * Since we asked for hmm_range_fault() to populate
- * pages it shouldn't return an error entry on success.
- */
- WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
- WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
- } else {
- if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
- WARN_ON(umem_odp->dma_list[dma_index]);
- continue;
- }
- access_mask = ODP_READ_ALLOWED_BIT;
- if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
- }
+ /*
+ * Since we asked for hmm_range_fault() to populate
+ * pages it shouldn't return an error entry on success.
+ */
+ WARN_ON(fault && range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
+ WARN_ON(fault && !(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
+ if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID))
+ continue;
+
+ if (range.hmm_pfns[pfn_index] & HMM_PFN_DMA_MAPPED)
+ continue;
hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
/* If a hugepage was detected and ODP wasn't set for, the umem
@@ -445,15 +402,6 @@ retry:
__func__, hmm_order, page_shift);
break;
}
-
- ret = ib_umem_odp_map_dma_single_page(
- umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
- access_mask);
- if (ret < 0) {
- ibdev_dbg(umem_odp->umem.ibdev,
- "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
- break;
- }
}
/* upon success lock should stay on hold for the callee */
if (!ret)
@@ -473,45 +421,38 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
- dma_addr_t dma_addr;
- dma_addr_t dma;
- int idx;
- u64 addr;
struct ib_device *dev = umem_odp->umem.ibdev;
+ u64 addr;
lockdep_assert_held(&umem_odp->umem_mutex);
virt = max_t(u64, virt, ib_umem_start(umem_odp));
bound = min_t(u64, bound, ib_umem_end(umem_odp));
for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
- idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- dma = umem_odp->dma_list[idx];
-
- /* The access flags guaranteed a valid DMA address in case was NULL */
- if (dma) {
- unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
- struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
-
- dma_addr = dma & ODP_DMA_ADDR_MASK;
- ib_dma_unmap_page(dev, dma_addr,
- BIT(umem_odp->page_shift),
- DMA_BIDIRECTIONAL);
- if (dma & ODP_WRITE_ALLOWED_BIT) {
- struct page *head_page = compound_head(page);
- /*
- * set_page_dirty prefers being called with
- * the page lock. However, MMU notifiers are
- * called sometimes with and sometimes without
- * the lock. We rely on the umem_mutex instead
- * to prevent other mmu notifiers from
- * continuing and allowing the page mapping to
- * be removed.
- */
- set_page_dirty(head_page);
- }
- umem_odp->dma_list[idx] = 0;
- umem_odp->npages--;
+ u64 offset = addr - ib_umem_start(umem_odp);
+ size_t idx = offset >> umem_odp->page_shift;
+ unsigned long pfn = umem_odp->map.pfn_list[idx];
+
+ if (!hmm_dma_unmap_pfn(dev->dma_device, &umem_odp->map, idx))
+ goto clear;
+
+ if (pfn & HMM_PFN_WRITE) {
+ struct page *page = hmm_pfn_to_page(pfn);
+ struct page *head_page = compound_head(page);
+ /*
+ * set_page_dirty prefers being called with
+ * the page lock. However, MMU notifiers are
+ * called sometimes with and sometimes without
+ * the lock. We rely on the umem_mutex instead
+ * to prevent other mmu notifiers from
+ * continuing and allowing the page mapping to
+ * be removed.
+ */
+ set_page_dirty(head_page);
}
+ umem_odp->npages--;
+clear:
+ umem_odp->map.pfn_list[idx] &= ~HMM_PFN_FLAGS;
}
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3c3bb670c805..bc9fe3ceca4d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -193,7 +193,7 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
fd, attrs);
if (IS_ERR(uobj))
- return (void *)uobj;
+ return ERR_CAST(uobj);
uverbs_uobject_get(uobj);
uobj_put_read(uobj);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c5e78bbefbd0..75fde0fe9989 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -572,7 +572,7 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
GFP_KERNEL : GFP_ATOMIC);
if (IS_ERR(slave)) {
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
- return (void *)slave;
+ return ERR_CAST(slave);
}
ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
rdma_lag_put_ah_roce_slave(slave);
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
index af91d16c3c77..e632f1661b92 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.c
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -170,6 +170,9 @@ static int map_cc_config_offset_gen0_ext0(u32 offset, struct bnxt_qplib_cc_param
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
*val = ccparam->tcp_cp;
break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ *val = ccparam->inact_th;
+ break;
default:
return -EINVAL;
}
@@ -203,7 +206,7 @@ static ssize_t bnxt_re_cc_config_get(struct file *filp, char __user *buffer,
return simple_read_from_buffer(buffer, usr_buf_len, ppos, (u8 *)(buf), rc);
}
-static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
+static int bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
{
u32 modify_mask;
@@ -247,7 +250,9 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
ccparam->tcp_cp = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE:
+ return -EOPNOTSUPP;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ ccparam->inact_th = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE:
ccparam->time_pph = val;
@@ -258,17 +263,20 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
}
ccparam->mask = modify_mask;
+ return 0;
}
static int bnxt_re_configure_cc(struct bnxt_re_dev *rdev, u32 gen_ext, u32 offset, u32 val)
{
struct bnxt_qplib_cc_param ccparam = { };
+ int rc;
- /* Supporting only Gen 0 now */
- if (gen_ext == CC_CONFIG_GEN0_EXT0)
- bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
- else
- return -EINVAL;
+ if (gen_ext != CC_CONFIG_GEN0_EXT0)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
+ if (rc)
+ return rc;
bnxt_qplib_modify_cc(&rdev->qplib_res, &ccparam);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 457eecb99f96..be34c605d516 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1113,7 +1113,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
- if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
+ if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
req.qp_flags = cpu_to_le32(qp_flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index f231e886ad9d..9efd32a3dc55 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -846,7 +846,12 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
req.resp_addr = cpu_to_le64(sbuf.dma_addr);
- req.function_id = cpu_to_le32(fid);
+ if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf)
+ req.function_id =
+ cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID |
+ (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT));
+ else
+ req.function_id = cpu_to_le32(fid);
req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index b6e3141253c4..d6dde762921a 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -124,7 +124,6 @@ struct opa_mad_notice_attr {
} __packed ntc_2048;
};
- u8 class_data[];
};
#define IB_VLARB_LOWPRI_0_31 1
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 5a91cbda4aee..764286da2ce8 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1361,16 +1361,6 @@ void sc_flush(struct send_context *sc)
sc_wait_for_packet_egress(sc, 1);
}
-/* drop all packets on the context, no waiting until they are sent */
-void sc_drop(struct send_context *sc)
-{
- if (!sc)
- return;
-
- dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
- __func__, sc->sw_index, sc->hw_context);
-}
-
/*
* Start the software reaction to a context halt or SPC freeze:
* - mark the context as halted or frozen
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index d07cc6ea7c63..ab0f9a3a8d12 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -246,7 +246,6 @@ void sc_disable(struct send_context *sc);
int sc_restart(struct send_context *sc);
void sc_return_credits(struct send_context *sc);
void sc_flush(struct send_context *sc);
-void sc_drop(struct send_context *sc);
void sc_stop(struct send_context *sc, int bit);
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
pio_release_cb cb, void *arg);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 0d2b39b7c8b5..16a749d16ee9 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1521,24 +1521,6 @@ void sdma_all_running(struct hfi1_devdata *dd)
}
/**
- * sdma_all_idle() - called when the link goes down
- * @dd: hfi1_devdata
- *
- * This routine moves all engines to the idle state.
- */
-void sdma_all_idle(struct hfi1_devdata *dd)
-{
- struct sdma_engine *sde;
- unsigned int i;
-
- /* idle all engines */
- for (i = 0; i < dd->num_sdma; ++i) {
- sde = &dd->per_sdma[i];
- sdma_process_event(sde, sdma_event_e70_go_idle);
- }
-}
-
-/**
* sdma_start() - called to kick off state processing for all engines
* @dd: hfi1_devdata
*
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index d77246b48434..91dfd5d0c419 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -373,7 +373,6 @@ void sdma_start(struct hfi1_devdata *dd);
void sdma_exit(struct hfi1_devdata *dd);
void sdma_clean(struct hfi1_devdata *dd, size_t num_engines);
void sdma_all_running(struct hfi1_devdata *dd);
-void sdma_all_idle(struct hfi1_devdata *dd);
void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
void sdma_freeze(struct hfi1_devdata *dd);
void sdma_unfreeze(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index cf2d29098406..62b4f16dab27 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -53,7 +53,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
int ret = 0;
fd->entry_to_rb = kcalloc(uctxt->expected_count,
- sizeof(struct rb_node *),
+ sizeof(*fd->entry_to_rb),
GFP_KERNEL);
if (!fd->entry_to_rb)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 7917af8e6380..baf592e6f21b 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(src)
hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 4fc5b9d5fea8..307c35888b30 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -33,7 +33,6 @@
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 560a1d9de408..1dcc9cbb4678 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1027,6 +1027,26 @@ struct hns_roce_dev {
atomic64_t *dfx_cnt;
};
+enum hns_roce_trace_type {
+ TRACE_SQ,
+ TRACE_RQ,
+ TRACE_SRQ,
+};
+
+static inline const char *trace_type_to_str(enum hns_roce_trace_type type)
+{
+ switch (type) {
+ case TRACE_SQ:
+ return "SQ";
+ case TRACE_RQ:
+ return "RQ";
+ case TRACE_SRQ:
+ return "SRQ";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
{
return container_of(ib_dev, struct hns_roce_dev, ib_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 160e8927d364..fa8747656f25 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -43,13 +43,15 @@
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
+#define CREATE_TRACE_POINTS
+#include "hns_roce_trace.h"
+
enum {
CMD_RST_PRC_OTHERS,
CMD_RST_PRC_SUCCESS,
@@ -738,6 +740,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
else
ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+ trace_hns_sq_wqe(qp->qpn, wqe_idx, wqe, 1 << qp->sq.wqe_shift,
+ wr->wr_id, TRACE_SQ);
if (unlikely(ret)) {
*bad_wr = wr;
goto out;
@@ -807,6 +811,9 @@ static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
+
+ trace_hns_rq_wqe(hr_qp->qpn, wqe_idx, wqe, 1 << hr_qp->rq.wqe_shift,
+ wr->wr_id, TRACE_RQ);
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
@@ -943,7 +950,7 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
static void update_srq_db(struct hns_roce_srq *srq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- struct hns_roce_v2_db db;
+ struct hns_roce_v2_db db = {};
hr_reg_write(&db, DB_TAG, srq->srqn);
hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
@@ -984,6 +991,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
fill_wqe_idx(srq, wqe_idx);
srq->wrid[wqe_idx] = wr->wr_id;
+
+ trace_hns_srq_wqe(srq->srqn, wqe_idx, wqe, 1 << srq->wqe_shift,
+ wr->wr_id, TRACE_SRQ);
}
if (likely(nreq)) {
@@ -1311,6 +1321,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
tail = csq->head;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_req(hr_dev, &desc[i]);
+
csq->desc[csq->head++] = desc[i];
if (csq->head == csq->desc_num)
csq->head = 0;
@@ -1325,6 +1337,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_resp(hr_dev, &csq->desc[tail]);
+
/* check the result of hardware write back */
desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
@@ -4302,8 +4316,7 @@ static inline int get_pdn(struct ib_pd *ib_pd)
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
- struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+ struct hns_roce_v2_qp_context *context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -5122,7 +5135,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
- modify_qp_reset_to_init(ibqp, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, context);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
@@ -5313,6 +5326,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ trace_hns_sq_flush_cqe(hr_qp->qpn, hr_qp->sq.head, TRACE_SQ);
hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
hr_qp->state = IB_QPS_ERR;
@@ -5322,6 +5336,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+ trace_hns_rq_flush_cqe(hr_qp->qpn, hr_qp->rq.head, TRACE_RQ);
hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
@@ -6248,6 +6263,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
eq->sub_type = sub_type;
++eq->cons_index;
aeqe_found = IRQ_HANDLED;
+ trace_hns_ae_info(event_type, aeqe, eq->eqe_size);
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE_CNT]);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 91a5665465ff..bc7466830eaf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -34,6 +34,7 @@
#define _HNS_ROCE_HW_V2_H
#include <linux/bitops.h>
+#include "hnae3.h"
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 8d0b63d4b50a..e7a497cc125c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -37,7 +37,6 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 09da3496843b..93a48b41955b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -38,6 +38,7 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
+#include "hns_roce_trace.h"
static u32 hw_index_to_key(int ind)
{
@@ -159,6 +160,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ trace_hns_mr(mr);
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
@@ -1146,6 +1148,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
+ trace_hns_buf_attr(buf_attr);
/* The caller has its own buffer list and invokes the hns_roce_mtr_map()
* to finish the MTT configuration.
*/
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 356d98816949..f637b73b946e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -4,7 +4,6 @@
#include <rdma/rdma_cm.h>
#include <rdma/restrack.h>
#include <uapi/rdma/rdma_netlink.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_trace.h b/drivers/infiniband/hw/hns/hns_roce_trace.h
new file mode 100644
index 000000000000..59ceb591b3a1
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_trace.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2025 Hisilicon Limited.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns_roce
+
+#if !defined(__HNS_ROCE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HNS_ROCE_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/string_choices.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+
+DECLARE_EVENT_CLASS(flush_head_template,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, pi)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->pi = pi;
+ __entry->type = type;
+ ),
+
+ TP_printk("%s 0x%lx flush head 0x%x.",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->pi)
+);
+
+DEFINE_EVENT(flush_head_template, hns_sq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+DEFINE_EVENT(flush_head_template, hns_rq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+
+#define MAX_SGE_PER_WQE 64
+#define MAX_WQE_SIZE (MAX_SGE_PER_WQE * HNS_ROCE_SGE_SIZE)
+DECLARE_EVENT_CLASS(wqe_template,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len,
+ u64 id, enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, idx)
+ __array(u32, wqe,
+ MAX_WQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ __field(u64, id)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->idx = idx;
+ __entry->id = id;
+ __entry->len = len / sizeof(__le32);
+ __entry->type = type;
+ for (int i = 0; i < __entry->len; i++)
+ __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]);
+ ),
+
+ TP_printk("%s 0x%lx wqe(0x%x/0x%llx): %s",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->idx, __entry->id,
+ __print_array(__entry->wqe, __entry->len,
+ sizeof(__le32)))
+);
+
+DEFINE_EVENT(wqe_template, hns_sq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_rq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_srq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+
+TRACE_EVENT(hns_ae_info,
+ TP_PROTO(int event_type, void *aeqe, unsigned int len),
+ TP_ARGS(event_type, aeqe, len),
+
+ TP_STRUCT__entry(__field(int, event_type)
+ __array(u32, aeqe,
+ HNS_ROCE_V3_EQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(__entry->event_type = event_type;
+ __entry->len = len / sizeof(__le32);
+ for (int i = 0; i < __entry->len; i++)
+ __entry->aeqe[i] = le32_to_cpu(((__le32 *)aeqe)[i]);
+ ),
+
+ TP_printk("event %2d aeqe: %s", __entry->event_type,
+ __print_array(__entry->aeqe, __entry->len, sizeof(__le32)))
+);
+
+TRACE_EVENT(hns_mr,
+ TP_PROTO(struct hns_roce_mr *mr),
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(__field(u64, iova)
+ __field(u64, size)
+ __field(u32, key)
+ __field(u32, pd)
+ __field(u32, pbl_hop_num)
+ __field(u32, npages)
+ __field(int, type)
+ __field(int, enabled)
+ ),
+
+ TP_fast_assign(__entry->iova = mr->iova;
+ __entry->size = mr->size;
+ __entry->key = mr->key;
+ __entry->pd = mr->pd;
+ __entry->pbl_hop_num = mr->pbl_hop_num;
+ __entry->npages = mr->npages;
+ __entry->type = mr->type;
+ __entry->enabled = mr->enabled;
+ ),
+
+ TP_printk("iova:0x%llx, size:%llu, key:%u, pd:%u, pbl_hop:%u, npages:%u, type:%d, status:%d",
+ __entry->iova, __entry->size, __entry->key,
+ __entry->pd, __entry->pbl_hop_num, __entry->npages,
+ __entry->type, __entry->enabled)
+);
+
+TRACE_EVENT(hns_buf_attr,
+ TP_PROTO(struct hns_roce_buf_attr *attr),
+ TP_ARGS(attr),
+
+ TP_STRUCT__entry(__field(unsigned int, region_count)
+ __field(unsigned int, region0_size)
+ __field(int, region0_hopnum)
+ __field(unsigned int, region1_size)
+ __field(int, region1_hopnum)
+ __field(unsigned int, region2_size)
+ __field(int, region2_hopnum)
+ __field(unsigned int, page_shift)
+ __field(bool, mtt_only)
+ ),
+
+ TP_fast_assign(__entry->region_count = attr->region_count;
+ __entry->region0_size = attr->region[0].size;
+ __entry->region0_hopnum = attr->region[0].hopnum;
+ __entry->region1_size = attr->region[1].size;
+ __entry->region1_hopnum = attr->region[1].hopnum;
+ __entry->region2_size = attr->region[2].size;
+ __entry->region2_hopnum = attr->region[2].hopnum;
+ __entry->page_shift = attr->page_shift;
+ __entry->mtt_only = attr->mtt_only;
+ ),
+
+ TP_printk("rg cnt:%u, pg_sft:0x%x, mtt_only:%s, rg 0 (sz:%u, hop:%u), rg 1 (sz:%u, hop:%u), rg 2 (sz:%u, hop:%u)\n",
+ __entry->region_count, __entry->page_shift,
+ str_yes_no(__entry->mtt_only),
+ __entry->region0_size, __entry->region0_hopnum,
+ __entry->region1_size, __entry->region1_hopnum,
+ __entry->region2_size, __entry->region2_hopnum)
+);
+
+DECLARE_EVENT_CLASS(cmdq,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc),
+
+ TP_STRUCT__entry(__string(dev_name, dev_name(hr_dev->dev))
+ __field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __array(u32, data, 6)
+ ),
+
+ TP_fast_assign(__assign_str(dev_name);
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ for (int i = 0; i < 6; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);
+ ),
+
+ TP_printk("%s cmdq opcode:0x%x, flag:0x%x, retval:0x%x, data:%s\n",
+ __get_str(dev_name), __entry->opcode,
+ __entry->flag, __entry->retval,
+ __print_array(__entry->data, 6, sizeof(__le32)))
+);
+
+DEFINE_EVENT(cmdq, hns_cmdq_req,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+DEFINE_EVENT(cmdq, hns_cmdq_resp,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+
+#endif /* __HNS_ROCE_TRACE_H */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns_roce_trace
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 6aed6169c07d..99a7f1a6c0b5 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -3131,7 +3131,7 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
ibdev_dbg(to_ibdev(cqp->dev),
- "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
+ "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n",
cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
(u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
return 0;
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 7599e31b5743..1e840bbd619d 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
-#include "../../../net/ethernet/intel/ice/ice.h"
MODULE_ALIAS("i40iw");
-MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
MODULE_LICENSE("Dual BSD/GPL");
@@ -61,7 +59,7 @@ static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
}
static void irdma_fill_qos_info(struct irdma_l2params *l2params,
- struct iidc_qos_params *qos_info)
+ struct iidc_rdma_qos_params *qos_info)
{
int i;
@@ -85,12 +83,13 @@ static void irdma_fill_qos_info(struct irdma_l2params *l2params,
}
}
-static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event)
+static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
{
- struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev);
+ struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
struct irdma_l2params l2params = {};
- if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) {
+ if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
l2params.mtu = iwdev->netdev->mtu;
@@ -98,25 +97,26 @@ static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
irdma_change_l2params(&iwdev->vsi, &l2params);
}
- } else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) {
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
if (iwdev->vsi.tc_change_pending)
return;
irdma_prep_tc_change(iwdev);
- } else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) {
- struct iidc_qos_params qos_info = {};
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
+ struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
if (!iwdev->vsi.tc_change_pending)
return;
l2params.tc_changed = true;
ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
- ice_get_qos_params(pf, &qos_info);
- irdma_fill_qos_info(&l2params, &qos_info);
+
+ irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
- iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode;
+ iwdev->dcb_vlan_mode =
+ l2params.num_tc > 1 && !l2params.dscp_mode;
irdma_change_l2params(&iwdev->vsi, &l2params);
- } else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) {
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
event->reg);
if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
@@ -151,10 +151,8 @@ static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event
*/
static void irdma_request_reset(struct irdma_pci_f *rf)
{
- struct ice_pf *pf = rf->cdev;
-
ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
- ice_rdma_request_reset(pf, IIDC_PFR);
+ ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
}
/**
@@ -166,14 +164,15 @@ static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
- struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_core_dev_info *cdev_info;
struct iidc_rdma_qset_params qset = {};
int ret;
+ cdev_info = iwdev->rf->cdev;
qset.qs_handle = tc_node->qs_handle;
qset.tc = tc_node->traffic_class;
qset.vport_id = vsi->vsi_idx;
- ret = ice_add_rdma_qset(pf, &qset);
+ ret = ice_add_rdma_qset(cdev_info, &qset);
if (ret) {
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
return ret;
@@ -194,19 +193,20 @@ static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
- struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_core_dev_info *cdev_info;
struct iidc_rdma_qset_params qset = {};
+ cdev_info = iwdev->rf->cdev;
qset.qs_handle = tc_node->qs_handle;
qset.tc = tc_node->traffic_class;
qset.vport_id = vsi->vsi_idx;
qset.teid = tc_node->l2_sched_node_id;
- if (ice_del_rdma_qset(pf, &qset))
+ if (ice_del_rdma_qset(cdev_info, &qset))
ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
}
-static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
{
int i;
@@ -217,12 +217,12 @@ static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
return -ENOMEM;
for (i = 0; i < rf->msix_count; i++)
- if (ice_alloc_rdma_qvector(pf, &rf->msix_entries[i]))
+ if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
break;
if (i < IRDMA_MIN_MSIX) {
while (--i >= 0)
- ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
kfree(rf->msix_entries);
return -ENOMEM;
@@ -233,56 +233,65 @@ static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
return 0;
}
-static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
{
int i;
for (i = 0; i < rf->msix_count; i++)
- ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
kfree(rf->msix_entries);
}
static void irdma_remove(struct auxiliary_device *aux_dev)
{
- struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
- struct iidc_auxiliary_dev,
- adev);
- struct ice_pf *pf = iidc_adev->pf;
struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
irdma_ib_unregister_device(iwdev);
- ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
- irdma_deinit_interrupts(iwdev->rf, pf);
+ irdma_deinit_interrupts(iwdev->rf, cdev_info);
kfree(iwdev->rf);
- pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
+ pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn));
}
-static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
- struct ice_vsi *vsi)
+static void irdma_fill_device_info(struct irdma_device *iwdev,
+ struct iidc_rdma_core_dev_info *cdev_info)
{
+ struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
struct irdma_pci_f *rf = iwdev->rf;
- rf->cdev = pf;
+ rf->sc_dev.hw = &rf->hw;
+ rf->iwdev = iwdev;
+ rf->cdev = cdev_info;
+ rf->hw.hw_addr = iidc_priv->hw_addr;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->pf_id = iidc_priv->pf_id;
rf->gen_ops.register_qset = irdma_lan_register_qset;
rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
- rf->hw.hw_addr = pf->hw.hw_addr;
- rf->pcidev = pf->pdev;
- rf->pf_id = pf->hw.pf_id;
- rf->default_vsi.vsi_idx = vsi->vsi_num;
- rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
- IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
+
+ rf->default_vsi.vsi_idx = iidc_priv->vport_id;
+ rf->protocol_used =
+ cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
rf->rdma_ver = IRDMA_GEN_2;
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
rf->gen_ops.request_reset = irdma_request_reset;
rf->limits_sel = 7;
rf->iwdev = iwdev;
+
mutex_init(&iwdev->ah_tbl_lock);
- iwdev->netdev = vsi->netdev;
- iwdev->vsi_num = vsi->vsi_num;
+
+ iwdev->netdev = iidc_priv->netdev;
+ iwdev->vsi_num = iidc_priv->vport_id;
iwdev->init_state = INITIAL_STATE;
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
@@ -294,19 +303,18 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
{
- struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
- struct iidc_auxiliary_dev,
- adev);
- struct ice_pf *pf = iidc_adev->pf;
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct iidc_qos_params qos_info = {};
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *iidc_priv;
+ struct irdma_l2params l2params = {};
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
- struct irdma_l2params l2params = {};
int err;
- if (!vsi)
- return -EIO;
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ iidc_priv = cdev_info->iidc_priv;
+
iwdev = ib_alloc_device(irdma_device, ibdev);
if (!iwdev)
return -ENOMEM;
@@ -316,10 +324,10 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
return -ENOMEM;
}
- irdma_fill_device_info(iwdev, pf, vsi);
+ irdma_fill_device_info(iwdev, cdev_info);
rf = iwdev->rf;
- err = irdma_init_interrupts(rf, pf);
+ err = irdma_init_interrupts(rf, cdev_info);
if (err)
goto err_init_interrupts;
@@ -328,8 +336,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
goto err_ctrl_init;
l2params.mtu = iwdev->netdev->mtu;
- ice_get_qos_params(pf, &qos_info);
- irdma_fill_qos_info(&l2params, &qos_info);
+ irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
@@ -341,7 +348,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
if (err)
goto err_ibreg;
- ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true);
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
auxiliary_set_drvdata(aux_dev, iwdev);
@@ -353,7 +360,7 @@ err_ibreg:
err_rt_init:
irdma_ctrl_deinit_hw(rf);
err_ctrl_init:
- irdma_deinit_interrupts(rf, pf);
+ irdma_deinit_interrupts(rf, cdev_info);
err_init_interrupts:
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
@@ -369,7 +376,7 @@ static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
-static struct iidc_auxiliary_drv irdma_auxiliary_drv = {
+static struct iidc_rdma_core_auxiliary_drv irdma_auxiliary_drv = {
.adrv = {
.id_table = irdma_auxiliary_id_table,
.probe = irdma_probe,
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index bb0b6494ccb2..674acc952168 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -29,7 +29,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#endif
#include <linux/auxiliary_bus.h>
-#include <linux/net/intel/iidc.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_ice.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index 4b4f78288d12..3f73ceacccb6 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -5,8 +5,8 @@
#include <linux/pci.h>
#include <linux/bitfield.h>
-#include <linux/net/intel/iidc.h>
#include <rdma/ib_verbs.h>
+#include <net/dscp.h>
#define STATS_TIMER_DELAY 60000
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index e7ce6840755f..37ce35cb10e7 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -108,7 +108,7 @@ static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
chunk->vaddr = sd_entry->u.bp.addr.va + offset;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
ibdev_dbg(to_ibdev(dev),
- "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
+ "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%p fpm_addr = %llx\n",
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
return 0;
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 59b34afa867b..527c6da2c1ac 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -567,7 +567,7 @@ struct irdma_sc_vsi {
u8 qos_rel_bw;
u8 qos_prio_type;
u8 stats_idx;
- u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+ u8 dscp_map[DSCP_MAX];
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
bool dscp_mode:1;
@@ -695,7 +695,7 @@ struct irdma_l2params {
u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
u16 mtu;
u8 up2tc[IRDMA_MAX_USER_PRIORITY];
- u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+ u8 dscp_map[DSCP_MAX];
u8 num_tc;
u8 vsi_rel_bw;
u8 vsi_prio_type;
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 0fc4e2679218..28e154bbb50f 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -15,14 +15,12 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
bool is_rnic_cq;
u32 doorbell;
u32 buf_size;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
cq->cq_handle = INVALID_MANA_HANDLE;
@@ -65,7 +63,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
return err;
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
}
if (is_rnic_cq) {
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index b31089320aa5..165c0a1e67d1 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -101,103 +101,95 @@ static int mana_ib_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
+ struct gdma_context *gc = madev->mdev->gdma_context;
+ struct mana_context *mc = gc->mana.driver_data;
struct gdma_dev *mdev = madev->mdev;
struct net_device *ndev;
- struct mana_context *mc;
struct mana_ib_dev *dev;
u8 mac_addr[ETH_ALEN];
int ret;
- mc = mdev->driver_data;
-
dev = ib_alloc_device(mana_ib_dev, ib_dev);
if (!dev)
return -ENOMEM;
ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
-
- dev->ib_dev.phys_port_cnt = mc->num_ports;
-
- ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
- mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
-
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
-
- /*
- * num_comp_vectors needs to set to the max MSIX index
- * when interrupts and event queues are implemented
- */
- dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
- dev->ib_dev.dev.parent = mdev->gdma_context->dev;
-
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- if (!ndev) {
- ret = -ENODEV;
- ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
- goto free_ib_device;
- }
- ether_addr_copy(mac_addr, ndev->dev_addr);
- addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
- ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
- goto free_ib_device;
- }
-
- ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
- ret);
- goto free_ib_device;
- }
- dev->gdma_dev = &mdev->gdma_context->mana_ib;
-
- dev->nb.notifier_call = mana_ib_netdev_event;
- ret = register_netdevice_notifier(&dev->nb);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
- ret);
- goto deregister_device;
- }
-
- ret = mana_ib_gd_query_adapter_caps(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
- ret);
- goto deregister_net_notifier;
- }
-
- ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
-
- ret = mana_ib_create_eqs(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
- goto deregister_net_notifier;
- }
-
- ret = mana_ib_gd_create_rnic_adapter(dev);
- if (ret)
- goto destroy_eqs;
-
+ dev->ib_dev.num_comp_vectors = gc->max_num_queues;
+ dev->ib_dev.dev.parent = gc->dev;
+ dev->gdma_dev = mdev;
xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ);
- ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
- ret);
- goto destroy_rnic;
+
+ if (mana_ib_is_rnic(dev)) {
+ dev->ib_dev.phys_port_cnt = 1;
+ ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
+ if (!ndev) {
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
+ goto free_ib_device;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto free_ib_device;
+ }
+
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
+ ret);
+ goto free_ib_device;
+ }
+
+ ret = mana_ib_gd_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+
+ ret = mana_ib_create_eqs(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ret = mana_ib_gd_create_rnic_adapter(dev);
+ if (ret)
+ goto destroy_eqs;
+
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ goto destroy_rnic;
+ }
+ } else {
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+ ret = mana_eth_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret);
+ goto free_ib_device;
+ }
}
- dev->av_pool = dma_pool_create("mana_ib_av", mdev->gdma_context->dev,
- MANA_AV_BUFFER_SIZE, MANA_AV_BUFFER_SIZE, 0);
+ dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE,
+ MANA_AV_BUFFER_SIZE, 0);
if (!dev->av_pool) {
ret = -ENOMEM;
goto destroy_rnic;
}
- ret = ib_register_device(&dev->ib_dev, "mana_%d",
- mdev->gdma_context->dev);
+ ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
+ mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
+
+ ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d",
+ gc->dev);
if (ret)
goto deallocate_pool;
@@ -208,15 +200,16 @@ static int mana_ib_probe(struct auxiliary_device *adev,
deallocate_pool:
dma_pool_destroy(dev->av_pool);
destroy_rnic:
- xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
- mana_ib_destroy_eqs(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_destroy_eqs(dev);
deregister_net_notifier:
- unregister_netdevice_notifier(&dev->nb);
-deregister_device:
- mana_gd_deregister_device(dev->gdma_dev);
+ if (mana_ib_is_rnic(dev))
+ unregister_netdevice_notifier(&dev->nb);
free_ib_device:
+ xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
return ret;
}
@@ -227,25 +220,24 @@ static void mana_ib_remove(struct auxiliary_device *adev)
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
+ if (mana_ib_is_rnic(dev)) {
+ mana_ib_gd_destroy_rnic_adapter(dev);
+ mana_ib_destroy_eqs(dev);
+ unregister_netdevice_notifier(&dev->nb);
+ }
xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
- mana_ib_destroy_eqs(dev);
- unregister_netdevice_notifier(&dev->nb);
- mana_gd_deregister_device(dev->gdma_dev);
ib_dealloc_device(&dev->ib_dev);
}
static const struct auxiliary_device_id mana_id_table[] = {
- {
- .name = "mana.rdma",
- },
+ { .name = "mana.rdma", },
+ { .name = "mana.eth", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mana_id_table);
static struct auxiliary_driver mana_driver = {
- .name = "rdma",
.probe = mana_ib_probe,
.remove = mana_ib_remove,
.id_table = mana_id_table,
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index eda9c5b971de..41a24a186f9d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -4,6 +4,7 @@
*/
#include "mana_ib.h"
+#include "linux/pci.h"
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
@@ -243,7 +244,6 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
struct mana_ib_queue *queue)
{
- struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue_spec spec = {};
int err;
@@ -252,7 +252,7 @@ int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
spec.type = type;
spec.monitor_avl_buf = false;
spec.queue_size = size;
- err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem);
+ err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
if (err)
return err;
/* take ownership into mana_ib from mana */
@@ -479,7 +479,7 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
{
unsigned long page_sz;
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -494,7 +494,7 @@ int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_ume
unsigned long page_sz;
/* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -551,6 +551,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct ib_port_attr attr;
int err;
@@ -560,10 +561,12 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
- if (port_num == 1) {
- immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ if (mana_ib_is_rnic(dev)) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
}
return 0;
@@ -572,12 +575,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- struct mana_ib_dev *dev = container_of(ibdev,
- struct mana_ib_dev, ib_dev);
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
memset(props, 0, sizeof(*props));
+ props->vendor_id = pdev->vendor;
+ props->vendor_part_id = dev->gdma_dev->dev_id.type;
props->max_mr_size = MANA_IB_MAX_MR_SIZE;
- props->page_size_cap = PAGE_SZ_BM;
+ props->page_size_cap = dev->adapter_caps.page_size_cap;
props->max_qp = dev->adapter_caps.max_qp_count;
props->max_qp_wr = dev->adapter_caps.max_qp_wr;
props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
@@ -596,6 +601,8 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
props->max_ah = INT_MAX;
props->max_pkeys = 1;
props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
+ if (!mana_ib_is_rnic(dev))
+ props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
return 0;
}
@@ -603,6 +610,7 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
if (!ndev)
@@ -623,7 +631,7 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_EDR;
props->pkey_tbl_len = 1;
- if (port == 1) {
+ if (mana_ib_is_rnic(dev)) {
props->gid_tbl_len = 16;
props->port_cap_flags = IB_PORT_CM_SUP;
props->ip_gids = true;
@@ -696,6 +704,41 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
caps->max_recv_sge_count = resp.max_recv_sge_count;
caps->feature_flags = resp.feature_flags;
+ caps->page_size_cap = PAGE_SZ_BM;
+ if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
+ caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
+
+ return 0;
+}
+
+int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
+{
+ struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
+ struct gdma_query_max_resources_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
+ sizeof(req), sizeof(resp));
+
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&dev->ib_dev,
+ "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
+ caps->max_cq_count = resp.max_cq;
+ caps->max_mr_count = resp.max_mst;
+ caps->max_pd_count = 0x6000;
+ caps->max_qp_wr = min_t(u32,
+ 0x100000 / GDMA_MAX_SQE_SIZE,
+ 0x100000 / GDMA_MAX_RQE_SIZE);
+ caps->max_send_sge_count = 30;
+ caps->max_recv_sge_count = 15;
+ caps->page_size_cap = PAGE_SZ_BM;
+
return 0;
}
@@ -740,7 +783,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
spec.eq.msix_index = 0;
- err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
if (err)
return err;
@@ -791,7 +834,7 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
req.hdr.req.msg_version = GDMA_MESSAGE_V2;
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.notify_eq_id = mdev->fatal_err_eq->id;
if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
@@ -816,7 +859,7 @@ int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
gc = mdev_to_gc(mdev);
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -843,7 +886,7 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_ADD;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -873,7 +916,7 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_REMOVE;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -896,7 +939,7 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = op;
copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
@@ -917,8 +960,11 @@ int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 do
struct mana_rnic_create_cq_req req = {};
int err;
+ if (!mdev->eqs)
+ return -EINVAL;
+
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.gdma_region = cq->queue.gdma_region;
req.eq_id = mdev->eqs[cq->comp_vector]->id;
@@ -950,7 +996,7 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
return 0;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.cq_handle = cq->cq_handle;
@@ -976,7 +1022,7 @@ int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1012,7 +1058,7 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.rc_qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -1035,7 +1081,7 @@ int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1070,7 +1116,7 @@ int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6903946677e5..42bebd6cd4f7 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -60,6 +60,7 @@ struct mana_ib_adapter_caps {
u32 max_recv_sge_count;
u32 max_inline_data_size;
u64 feature_flags;
+ u64 page_size_cap;
};
struct mana_ib_queue {
@@ -543,6 +544,11 @@ static inline void mana_put_qp_ref(struct mana_ib_qp *qp)
complete(&qp->free);
}
+static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB;
+}
+
static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
@@ -642,6 +648,7 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev);
int mana_ib_create_eqs(struct mana_ib_dev *mdev);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index f99557ec7767..6d974d0a8400 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -5,8 +5,8 @@
#include "mana_ib.h"
-#define VALID_MR_FLAGS \
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
+#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
@@ -24,6 +24,9 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags)
if (access_flags & IB_ACCESS_REMOTE_READ)
flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
+
return flags;
}
@@ -48,7 +51,10 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
req.gva.virtual_address = mr_params->gva.virtual_address;
req.gva.access_flags = mr_params->gva.access_flags;
break;
-
+ case GDMA_MR_TYPE_ZBVA:
+ req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
+ req.zbva.access_flags = mr_params->zbva.access_flags;
+ break;
default:
ibdev_dbg(&dev->ib_dev,
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
@@ -144,11 +150,18 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
- mr_params.mr_type = GDMA_MR_TYPE_GVA;
- mr_params.gva.dma_region_handle = dma_region_handle;
- mr_params.gva.virtual_address = iova;
- mr_params.gva.access_flags =
- mana_ib_verbs_to_gdma_access_flags(access_flags);
+ if (access_flags & IB_ZERO_BASED) {
+ mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
+ mr_params.zbva.dma_region_handle = dma_region_handle;
+ mr_params.zbva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ } else {
+ mr_params.mr_type = GDMA_MR_TYPE_GVA;
+ mr_params.gva.dma_region_handle = dma_region_handle;
+ mr_params.gva.virtual_address = iova;
+ mr_params.gva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ }
err = mana_ib_gd_create_mr(dev, mr, &mr_params);
if (err)
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index c928af58f38b..14fd7d6c54a2 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -635,7 +635,6 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
{
struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
- struct gdma_context *gc = mdev_to_gc(mdev);
u32 doorbell, queue_size;
int i, err;
@@ -654,7 +653,7 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto destroy_queues;
}
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
sizeof(struct ud_rq_shadow_wqe));
@@ -736,7 +735,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
req.qp_state = attr->qp_state;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 33f525b744f2..e279e69b9a51 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -43,7 +43,7 @@
#define MAX_VFS 80
#define MAX_PEND_REQS_PER_FUNC 4
-#define MAD_TIMEOUT_MS 2000
+#define MAD_TIMEOUT_SEC 2
#define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
#define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
@@ -270,7 +270,7 @@ static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -309,7 +309,7 @@ static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
for (i = 0; i < MAX_VFS; ++i)
clean_vf_mcast(ctx, i);
- end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
+ end = jiffies + secs_to_jiffies(MAD_TIMEOUT_SEC + 3);
do {
count = 0;
mutex_lock(&ctx->mcg_table_lock);
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 0ff9f18a71e8..680627f1de33 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1645,11 +1645,6 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
}
-enum {
- LEFTOVERS_MC,
- LEFTOVERS_UC,
-};
-
static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct ib_flow_attr *flow_attr,
@@ -1659,43 +1654,32 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
struct mlx5_ib_flow_handler *handler = NULL;
static struct {
- struct ib_flow_attr flow_attr;
struct ib_flow_spec_eth eth_flow;
- } leftovers_specs[] = {
- [LEFTOVERS_MC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {0x1} }
- }
- },
- [LEFTOVERS_UC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {} }
- }
- }
- };
+ struct ib_flow_attr flow_attr;
+ } leftovers_wc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_wc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = { 0x1 } } } };
- handler = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_MC].flow_attr,
- dst);
+ static struct {
+ struct ib_flow_spec_eth eth_flow;
+ struct ib_flow_attr flow_attr;
+ } leftovers_uc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_uc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = {} } } };
+
+ handler = create_flow_rule(dev, ft_prio, &leftovers_wc.flow_attr, dst);
if (!IS_ERR(handler) &&
flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
handler_ucast = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_UC].flow_attr,
- dst);
+ &leftovers_uc.flow_attr, dst);
if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d07cacaa0abd..ce7610740412 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -485,6 +485,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_1_200GBASE_CR1_KR1):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_HDR;
@@ -493,10 +497,18 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_2_400GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_800GAUI_4_800GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_XDR;
+ break;
default:
return -EINVAL;
}
@@ -4422,17 +4434,6 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
mlx5_core_native_port_num(dev->mdev) - 1);
}
-static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
-{
- dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
- return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
-}
-
-static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
-{
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
-}
-
static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
{
int err;
@@ -4662,9 +4663,6 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
@@ -4722,9 +4720,6 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index ace2df3e1d9f..fde859d207ae 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -351,6 +351,7 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_UPD_XLT_PD BIT(4)
#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
+#define MLX5_IB_UPD_XLT_DOWNGRADE BIT(7)
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
*
@@ -1005,7 +1006,6 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_ODP,
MLX5_IB_STAGE_COUNTERS,
MLX5_IB_STAGE_CONG_DEBUGFS,
- MLX5_IB_STAGE_UAR,
MLX5_IB_STAGE_BFREG,
MLX5_IB_STAGE_PRE_IB_REG_UMR,
MLX5_IB_STAGE_WHITELIST_UID,
@@ -1473,8 +1473,8 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags);
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags);
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
@@ -1495,8 +1495,11 @@ static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
{
return 0;
}
-static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags) {}
+static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ return -EOPNOTSUPP;
+}
static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 5fbebafc8774..6dd813bac5b2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -525,7 +525,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
ent->fill_to_high_water = false;
if (ent->pending)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
else
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
@@ -576,7 +576,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
"add keys command failed, err %d\n",
err);
queue_delayed_work(cache->wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
}
}
} else if (ent->mkeys_queue.ci > 2 * ent->limit) {
@@ -2051,7 +2051,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->in_use--;
if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(30 * 1000));
+ secs_to_jiffies(30));
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 86d8fa63bf69..eaa2f9f5f3a9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -34,6 +34,9 @@
#include <linux/kernel.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
+#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
+#include <linux/pci-p2pdma.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -158,41 +161,50 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
}
}
-static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
-{
- u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
-
- if (umem_dma & ODP_READ_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_READ;
- if (umem_dma & ODP_WRITE_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_WRITE;
-
- return mtt_entry;
-}
-
-static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+static int populate_mtt(__be64 *pas, size_t start, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- dma_addr_t pa;
+ bool downgrade = flags & MLX5_IB_UPD_XLT_DOWNGRADE;
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ struct ib_device *dev = odp->umem.ibdev;
size_t i;
if (flags & MLX5_IB_UPD_XLT_ZAP)
- return;
+ return 0;
for (i = 0; i < nentries; i++) {
- pa = odp->dma_list[idx + i];
- pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ unsigned long pfn = odp->map.pfn_list[start + i];
+ dma_addr_t dma_addr;
+
+ pfn = odp->map.pfn_list[start + i];
+ if (!(pfn & HMM_PFN_VALID))
+ /* ODP initialization */
+ continue;
+
+ dma_addr = hmm_dma_map_pfn(dev->dma_device, &odp->map,
+ start + i, &p2pdma_state);
+ if (ib_dma_mapping_error(dev, dma_addr))
+ return -EFAULT;
+
+ dma_addr |= MLX5_IB_MTT_READ;
+ if ((pfn & HMM_PFN_WRITE) && !downgrade)
+ dma_addr |= MLX5_IB_MTT_WRITE;
+
+ pas[i] = cpu_to_be64(dma_addr);
+ odp->npages++;
}
+ return 0;
}
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
populate_klm(xlt, idx, nentries, mr, flags);
+ return 0;
} else {
- populate_mtt(xlt, idx, nentries, mr, flags);
+ return populate_mtt(xlt, idx, nentries, mr, flags);
}
}
@@ -303,8 +315,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
* estimate the cost of another UMR vs. the cost of bigger
* UMR.
*/
- if (umem_odp->dma_list[idx] &
- (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) {
if (!in_block) {
blk_start_idx = idx;
in_block = 1;
@@ -687,7 +698,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
{
int page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- u64 access_mask;
+ u64 access_mask = 0;
u64 start_idx;
bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
@@ -695,12 +706,14 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+ if (flags & MLX5_PF_FLAGS_DOWNGRADE)
+ xlt_flags |= MLX5_IB_UPD_XLT_DOWNGRADE;
+
page_shift = odp->page_shift;
start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
- access_mask = ODP_READ_ALLOWED_BIT;
if (odp->umem.writable && !downgrade)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
if (np < 0)
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index d3dcc272200a..146d03ae40bd 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -21,8 +21,10 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
spin_lock_irqsave(&table->lock, flags);
common = radix_tree_lookup(&table->tree, rsn);
- if (common)
+ if (common && !common->invalid)
refcount_inc(&common->refcount);
+ else
+ common = NULL;
spin_unlock_irqrestore(&table->lock, flags);
@@ -178,6 +180,18 @@ static int create_resource_common(struct mlx5_ib_dev *dev,
return 0;
}
+static void modify_resource_common_state(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp,
+ bool invalid)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ qp->common.invalid = invalid;
+ spin_unlock_irqrestore(&table->lock, flags);
+}
+
static void destroy_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp)
{
@@ -609,8 +623,20 @@ err_destroy_rq:
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
{
+ int ret;
+
+ /* The rq destruction can be called again in case it fails, hence we
+ * mark the common resource as invalid and only once FW destruction
+ * is completed successfully we actually destroy the resources.
+ */
+ modify_resource_common_state(dev, rq, true);
+ ret = destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ if (ret) {
+ modify_resource_common_state(dev, rq, false);
+ return ret;
+ }
destroy_resource_common(dev, rq);
- return destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ return 0;
}
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 793f3c5c4d01..5be4426a2884 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -840,7 +840,17 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
size_to_map = npages * desc_size;
dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
- mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ /*
+ * npages is the maximum number of pages to map, but we
+ * can't guarantee that all pages are actually mapped.
+ *
+ * For example, if page is p2p of type which is not supported
+ * for mapping, the number of pages mapped will be less than
+ * requested.
+ */
+ err = mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ if (err)
+ return err;
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 192f83fd7c8a..dacb8ceeebe0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -144,7 +144,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index b9f4a2937c3a..2098de762bf5 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -90,7 +90,7 @@ static int create_file(const char *name, umode_t mode,
int error;
inode_lock(d_inode(parent));
- *dentry = lookup_one_len(name, parent, strlen(name));
+ *dentry = lookup_noperm(&QSTR(name), parent);
if (!IS_ERR(*dentry))
error = qibfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
@@ -433,7 +433,7 @@ static int remove_device_files(struct super_block *sb,
char unit[10];
snprintf(unit, sizeof(unit), "%u", dd->unit);
- dir = lookup_one_len_unlocked(unit, sb->s_root, strlen(unit));
+ dir = lookup_noperm_unlocked(&QSTR(unit), sb->s_root);
if (IS_ERR(dir)) {
pr_err("Lookup of %s failed\n", unit);
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index f948b76f984d..3fbf99757b11 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -56,7 +56,7 @@ static int usnic_uiom_dma_fault(struct iommu_domain *domain,
unsigned long iova, int flags,
void *token)
{
- usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
+ usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n",
dev_name(dev),
domain, iova, flags);
return -ENOSYS;
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index c180e7ebcfc5..1ed5b63f8afc 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
- depends on INET && PCI && INFINIBAND
+ depends on INET && PCI && INFINIBAND && 64BIT
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRC32
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index b248c68bf9b1..3a77d6db1720 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -101,6 +101,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_FLUSH;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC_WRITE;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0bc3fbb6554f..876702058c84 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -70,9 +70,9 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
void *addr, int length, enum rxe_mr_copy_dir dir);
int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
@@ -193,13 +193,16 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
/* rxe_odp.c */
extern const struct mmu_interval_notifier_ops rxe_mn_ops;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#if defined CONFIG_INFINIBAND_ON_DEMAND_PAGING
int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
u64 iova, int access_flags, struct rxe_mr *mr);
int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length);
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -212,9 +215,19 @@ static inline int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
{
return -EOPNOTSUPP;
}
-static inline int
+static inline enum resp_states
rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
+static inline int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ return -EOPNOTSUPP;
+}
+static inline enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr,
+ u64 iova, u64 value)
{
return RESPST_ERR_UNSUPPORTED_OPCODE;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 432d864c3ce9..bcb97b3ea58a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -424,7 +424,7 @@ err1:
return err;
}
-int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
+static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
{
unsigned int page_offset;
unsigned long index;
@@ -433,16 +433,6 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
int err;
u8 *va;
- /* mr must be valid even if length is zero */
- if (WARN_ON(!mr))
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- if (mr->ibmr.type == IB_MR_TYPE_DMA)
- return -EFAULT;
-
err = mr_check_range(mr, iova, length);
if (err)
return err;
@@ -454,7 +444,7 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
if (!page)
return -EFAULT;
bytes = min_t(unsigned int, length,
- mr_page_size(mr) - page_offset);
+ mr_page_size(mr) - page_offset);
va = kmap_local_page(page);
arch_wb_cache_pmem(va + page_offset, bytes);
@@ -468,11 +458,33 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
return 0;
}
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 start, unsigned int length)
+{
+ int err;
+
+ /* mr must be valid even if length is zero */
+ if (WARN_ON(!mr))
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA)
+ return -EFAULT;
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_flush_pmem_iova(mr, start, length);
+ else
+ err = rxe_mr_flush_pmem_iova(mr, start, length);
+
+ return err;
+}
+
/* Guarantee atomicity of atomic operations at the machine level. */
DEFINE_SPINLOCK(atomic_ops_lock);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
unsigned int page_offset;
struct page *page;
@@ -524,27 +536,15 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-#if defined CONFIG_64BIT
-/* only implemented or called for 64 bit architectures */
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
{
unsigned int page_offset;
struct page *page;
u64 *va;
- /* ODP is not supported right now. WIP. */
- if (is_odp_mr(mr))
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
- /* See IBA oA19-28 */
- if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state\n");
- return RESPST_ERR_RKEY_VIOLATION;
- }
-
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
page_offset = iova & (PAGE_SIZE - 1);
page = ib_virt_dma_to_page(iova);
@@ -572,20 +572,12 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
}
va = kmap_local_page(page);
-
/* Do atomic write after all prior operations have completed */
smp_store_release(&va[page_offset >> 3], value);
-
kunmap_local(va);
- return 0;
-}
-#else
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
-{
- return RESPST_ERR_UNSUPPORTED_OPCODE;
+ return RESPST_NONE;
}
-#endif
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index 9f6e2bb2a269..dbc5a5600eb7 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -4,6 +4,7 @@
*/
#include <linux/hmm.h>
+#include <linux/libnvdimm.h>
#include <rdma/ib_umem_odp.h>
@@ -26,7 +27,7 @@ static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni,
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
- /* update umem_odp->dma_list */
+ /* update umem_odp->map.pfn_list */
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
mutex_unlock(&umem_odp->umem_mutex);
@@ -44,12 +45,11 @@ static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcn
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool fault = !(flags & RXE_PAGEFAULT_SNAPSHOT);
- u64 access_mask;
+ u64 access_mask = 0;
int np;
- access_mask = ODP_READ_ALLOWED_BIT;
if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY))
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
/*
* ib_umem_odp_map_dma_and_lock() locks umem_mutex on success.
@@ -124,8 +124,8 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
return err;
}
-static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
- u64 iova, int length, u32 perm)
+static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova,
+ int length)
{
bool need_fault = false;
u64 addr;
@@ -137,7 +137,7 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
while (addr < iova + length) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- if (!(umem_odp->dma_list[idx] & perm)) {
+ if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) {
need_fault = true;
break;
}
@@ -147,23 +147,28 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
return need_fault;
}
+static unsigned long rxe_odp_iova_to_index(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+}
+
+static unsigned long rxe_odp_iova_to_page_offset(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return iova & (BIT(umem_odp->page_shift) - 1);
+}
+
static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u32 flags)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool need_fault;
- u64 perm;
int err;
if (unlikely(length < 1))
return -EINVAL;
- perm = ODP_READ_ALLOWED_BIT;
- if (!(flags & RXE_PAGEFAULT_RDONLY))
- perm |= ODP_WRITE_ALLOWED_BIT;
-
mutex_lock(&umem_odp->umem_mutex);
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault) {
mutex_unlock(&umem_odp->umem_mutex);
@@ -173,7 +178,7 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
if (err < 0)
return err;
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault)
return -EFAULT;
}
@@ -190,13 +195,13 @@ static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
size_t offset;
u8 *user_va;
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- offset = iova & (BIT(umem_odp->page_shift) - 1);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
while (length > 0) {
u8 *src, *dest;
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
user_va = kmap_local_page(page);
if (!user_va)
return -EFAULT;
@@ -255,8 +260,9 @@ int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return err;
}
-static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova,
+ int opcode, u64 compare,
+ u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
unsigned int page_offset;
@@ -277,9 +283,9 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return RESPST_ERR_RKEY_VIOLATION;
}
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- page_offset = iova & (BIT(umem_odp->page_shift) - 1);
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
if (!page)
return RESPST_ERR_RKEY_VIOLATION;
@@ -304,11 +310,11 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
int err;
@@ -324,3 +330,91 @@ int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return err;
}
+
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ unsigned int bytes;
+ int err;
+ u8 *va;
+
+ err = rxe_odp_map_range_and_lock(mr, iova, length,
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return err;
+
+ while (length > 0) {
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return -EFAULT;
+ }
+
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+
+ va = kmap_local_page(page);
+ arch_wb_cache_pmem(va + page_offset, bytes);
+ kunmap_local(va);
+
+ length -= bytes;
+ iova += bytes;
+ page_offset = 0;
+ }
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return 0;
+}
+
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ int err;
+ u64 *va;
+
+ /* See IBA oA19-28 */
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ err = rxe_odp_map_range_and_lock(mr, iova, sizeof(value),
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+ /* See IBA A19.4.2 */
+ if (unlikely(page_offset & 0x7)) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ rxe_dbg_mr(mr, "misaligned address\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+ /* Do atomic write after all prior operations have completed */
+ smp_store_release(&va[page_offset >> 3], value);
+ kunmap_local(va);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return RESPST_NONE;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 003f681e5dc0..767870568372 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -53,12 +53,9 @@ enum rxe_device_param {
| IB_DEVICE_MEM_WINDOW
| IB_DEVICE_FLUSH_GLOBAL
| IB_DEVICE_FLUSH_PERSISTENT
-#ifdef CONFIG_64BIT
| IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_ATOMIC_WRITE,
-#else
- | IB_DEVICE_MEM_WINDOW_TYPE_2B,
-#endif /* CONFIG_64BIT */
+
RXE_MAX_SGE = 32,
RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
sizeof(struct ib_sge) * RXE_MAX_SGE,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 7975fb0e2782..f2af3e0aef35 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -811,7 +811,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
spin_unlock_irqrestore(&qp->state_lock, flags);
qp->qp_timeout_jiffies = 0;
- if (qp_type(qp) == IB_QPT_RC) {
+ /* In the function timer_setup, .function is initialized. If .function
+ * is NULL, it indicates the function timer_setup is not called, the
+ * timer is not initialized. Or else, the timer is initialized.
+ */
+ if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function &&
+ qp->rnr_nak_timer.function) {
timer_delete_sync(&qp->retrans_timer);
timer_delete_sync(&qp->rnr_nak_timer);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5d9174e408db..711f73e0bbb1 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -649,10 +649,6 @@ static enum resp_states process_flush(struct rxe_qp *qp,
struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
- /* ODP is not supported right now. WIP. */
- if (is_odp_mr(mr))
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
/* oA19-14, oA19-15 */
if (res && res->replay)
return RESPST_ACKNOWLEDGE;
@@ -753,7 +749,16 @@ static enum resp_states atomic_write_reply(struct rxe_qp *qp,
value = *(u64 *)payload_addr(pkt);
iova = qp->resp.va + qp->resp.offset;
- err = rxe_mr_do_atomic_write(mr, iova, value);
+ /* See IBA oA19-28 */
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_do_atomic_write(mr, iova, value);
+ else
+ err = rxe_mr_do_atomic_write(mr, iova, value);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 80332638d9e3..6f8f353e9583 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -85,17 +85,17 @@ static bool is_done(struct rxe_task *task)
/* do_task is a wrapper for the three tasks (requester,
* completer, responder) and calls them in a loop until
- * they return a non-zero value. It is called either
- * directly by rxe_run_task or indirectly if rxe_sched_task
- * schedules the task. They must call __reserve_if_idle to
- * move the task to busy before calling or scheduling.
- * The task can also be moved to drained or invalid
- * by calls to rxe_cleanup_task or rxe_disable_task.
- * In that case tasks which get here are not executed but
- * just flushed. The tasks are designed to look to see if
- * there is work to do and then do part of it before returning
- * here with a return value of zero until all the work
- * has been consumed then it returns a non-zero value.
+ * they return a non-zero value. It is called indirectly
+ * when rxe_sched_task schedules the task. They must
+ * call __reserve_if_idle to move the task to busy before
+ * calling or scheduling. The task can also be moved to
+ * drained or invalid by calls to rxe_cleanup_task or
+ * rxe_disable_task. In that case tasks which get here
+ * are not executed but just flushed. The tasks are
+ * designed to look to see if there is work to do and
+ * then do part of it before returning here with a return
+ * value of zero until all the work has been consumed then
+ * it returns a non-zero value.
* The number of times the task can be run is limited by
* max iterations so one task cannot hold the cpu forever.
* If the limit is hit and work remains the task is rescheduled.
@@ -234,24 +234,6 @@ void rxe_cleanup_task(struct rxe_task *task)
spin_unlock_irqrestore(&task->lock, flags);
}
-/* run the task inline if it is currently idle
- * cannot call do_task holding the lock
- */
-void rxe_run_task(struct rxe_task *task)
-{
- unsigned long flags;
- bool run;
-
- WARN_ON(rxe_read(task->qp) <= 0);
-
- spin_lock_irqsave(&task->lock, flags);
- run = __reserve_if_idle(task);
- spin_unlock_irqrestore(&task->lock, flags);
-
- if (run)
- do_task(task);
-}
-
/* schedule the task to run later as a work queue entry.
* the queue_work call can be called holding
* the lock.
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index a63e258b3d66..a8c9a77b6027 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -47,8 +47,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
/* cleanup task */
void rxe_cleanup_task(struct rxe_task *task);
-void rxe_run_task(struct rxe_task *task);
-
void rxe_sched_task(struct rxe_task *task);
/* keep a task from scheduling */
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index ae4a953e2a03..186f182b80e7 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -3,6 +3,7 @@ config RDMA_SIW
depends on INET && INFINIBAND
depends on INFINIBAND_VIRT_DMA
select CRC32
+ select NET_CRC32C
help
This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 385067e07faf..f5fd71717b80 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -693,29 +693,9 @@ static inline void siw_crc_oneshot(const void *data, size_t len, u8 out[4])
return siw_crc_final(&crc, out);
}
-static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
-{
- return (__force __wsum)crc32c((__force __u32)sum, buff, len);
-}
-
-static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset,
- int len)
-{
- return (__force __wsum)crc32c_combine((__force __u32)csum,
- (__force __u32)csum2, len);
-}
-
static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
{
- const struct skb_checksum_ops siw_cs_ops = {
- .update = siw_csum_update,
- .combine = siw_csum_combine,
- };
- __wsum crc = (__force __wsum)srx->mpa_crc;
-
- crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc,
- &siw_cs_ops);
- srx->mpa_crc = (__force u32)crc;
+ srx->mpa_crc = skb_crc32c(srx->skb, srx->skb_offset, len, srx->mpa_crc);
}
#define siw_dbg(ibdev, fmt, ...) \
@@ -738,7 +718,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index f3c2226aff94..25b3c741b66b 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -72,7 +72,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
wc->opcode = map_wc_opcode[cqe->opcode];
wc->status = map_cqe_status[cqe->status].ib;
siw_dbg_cq(cq,
- "idx %u, type %d, flags %2x, id 0x%pK\n",
+ "idx %u, type %d, flags %2x, id 0x%p\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
cqe->flags, (void *)(uintptr_t)cqe->id);
} else {
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index dcb963607c8b..d5ddeb17bd22 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -18,30 +18,6 @@
#define SIW_STAG_MAX_INDEX 0x00ffffff
/*
- * The code avoids special Stag of zero and tries to randomize
- * STag values between 1 and SIW_STAG_MAX_INDEX.
- */
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m)
-{
- struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX);
- u32 id, next;
-
- get_random_bytes(&next, 4);
- next &= SIW_STAG_MAX_INDEX;
-
- if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next,
- GFP_KERNEL) < 0)
- return -ENOMEM;
-
- /* Set the STag index part */
- m->stag = id << 8;
-
- siw_dbg_mem(m, "new MEM object\n");
-
- return 0;
-}
-
-/*
* siw_mem_id2obj()
*
* resolves memory from stag given by id. might be called from:
@@ -181,10 +157,10 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
*/
if (addr < mem->va || addr + len > mem->va + mem->len) {
siw_dbg_pd(pd, "MEM interval len %d\n", len);
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] out of bounds\n",
(void *)(uintptr_t)addr,
(void *)(uintptr_t)(addr + len));
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] STag=0x%08x\n",
(void *)(uintptr_t)mem->va,
(void *)(uintptr_t)(mem->va + mem->len),
mem->stag);
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index e74cfcd6dbc1..8e769d30e2ac 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -12,7 +12,6 @@ void siw_umem_release(struct siw_umem *umem);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
enum ib_access_flags perms, int len);
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 32554eba1eac..a10820e33887 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,7 +38,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
p = siw_get_upage(umem, dest_addr);
if (unlikely(!p)) {
- pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
+ pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
__func__, qp_id(rx_qp(srx)),
(void *)(uintptr_t)dest_addr,
(void *)(uintptr_t)umem->fp_addr);
@@ -51,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
pg_off = dest_addr & ~PAGE_MASK;
bytes = min(len, (int)PAGE_SIZE - pg_off);
- siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
+ siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
dest = kmap_atomic(p);
rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -105,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
{
int rv;
- siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
+ siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
if (unlikely(rv)) {
- pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
+ pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
qp_id(rx_qp(srx)), __func__, len, kva, rv);
return rv;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index fd7b266a221b..2b2a7b8e93b0 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -936,7 +936,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
rv = -EINVAL;
break;
}
- siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
+ siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
sqe->opcode, sqe->flags,
(void *)(uintptr_t)sqe->id);
@@ -1102,7 +1102,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
siw_dbg_qp(qp, "error %d\n", rv);
*bad_wr = wr;
}
- return rv > 0 ? 0 : rv;
+ return rv;
}
int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
@@ -1332,7 +1332,7 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct siw_device *sdev = to_siw_dev(pd->device);
int rv;
- siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
+ siw_dbg_pd(pd, "start: 0x%p, va: 0x%p, len: %llu\n",
(void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
(unsigned long long)len);
@@ -1525,7 +1525,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
mem->len = base_mr->length;
mem->va = base_mr->iova;
siw_dbg_mem(mem,
- "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
+ "%llu bytes, start 0x%p, %u SLE to %u entries\n",
mem->len, (void *)(uintptr_t)mem->va, num_sle,
pbl->num_buf);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index abe0522b7df4..91f866e3fb8b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -329,14 +329,6 @@ struct ipoib_dev_priv {
unsigned long flags;
- /*
- * This protects access to the child_intfs list.
- * To READ from child_intfs the RTNL or vlan_rwsem read side must be
- * held. To WRITE RTNL and the vlan_rwsem write side must be held (in
- * that order) This lock exists because we have a few contexts where
- * we need the child_intfs, but do not want to grab the RTNL.
- */
- struct rw_semaphore vlan_rwsem;
struct mutex mcast_mutex;
struct rb_root path_tree;
@@ -399,6 +391,9 @@ struct ipoib_dev_priv {
struct ib_event_handler event_handler;
struct net_device *parent;
+ /* 'child_intfs' and 'list' membership of all child devices are
+ * protected by the netdev instance lock of 'dev'.
+ */
struct list_head child_intfs;
struct list_head list;
int child_type;
@@ -512,6 +507,8 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
+void ipoib_queue_work(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level);
void ipoib_ib_tx_timeout_work(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5cde275daa94..10b0dbda6cd5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -40,6 +40,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <net/netdev_lock.h>
#include <rdma/ib_cache.h>
#include "ipoib.h"
@@ -781,16 +782,20 @@ static void ipoib_napi_enable(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- napi_enable(&priv->recv_napi);
- napi_enable(&priv->send_napi);
+ netdev_lock_ops_to_full(dev);
+ napi_enable_locked(&priv->recv_napi);
+ napi_enable_locked(&priv->send_napi);
+ netdev_unlock_full_to_ops(dev);
}
static void ipoib_napi_disable(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- napi_disable(&priv->recv_napi);
- napi_disable(&priv->send_napi);
+ netdev_lock_ops_to_full(dev);
+ napi_disable_locked(&priv->recv_napi);
+ napi_disable_locked(&priv->send_napi);
+ netdev_unlock_full_to_ops(dev);
}
int ipoib_ib_dev_stop_default(struct net_device *dev)
@@ -1172,24 +1177,11 @@ out:
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
- enum ipoib_flush_level level,
- int nesting)
+ enum ipoib_flush_level level)
{
- struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
int result;
- down_read_nested(&priv->vlan_rwsem, nesting);
-
- /*
- * Flush any child interfaces too -- they might be up even if
- * the parent is down.
- */
- list_for_each_entry(cpriv, &priv->child_intfs, list)
- __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
-
- up_read(&priv->vlan_rwsem);
-
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
level != IPOIB_FLUSH_HEAVY) {
/* Make sure the dev_addr is set even if not flushing */
@@ -1253,10 +1245,14 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
+ netdev_lock_ops(dev);
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev);
- if (ipoib_ib_dev_open(dev))
+ result = ipoib_ib_dev_open(dev);
+ netdev_unlock_ops(dev);
+
+ if (result)
return;
if (netif_queue_stopped(dev))
@@ -1280,7 +1276,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
}
void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1288,7 +1284,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
}
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1297,10 +1293,35 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
container_of(work, struct ipoib_dev_priv, flush_heavy);
rtnl_lock();
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
rtnl_unlock();
}
+void ipoib_queue_work(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level)
+{
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ netdev_lock(priv->dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_queue_work(cpriv, level);
+ netdev_unlock(priv->dev);
+ }
+
+ switch (level) {
+ case IPOIB_FLUSH_LIGHT:
+ queue_work(ipoib_workqueue, &priv->flush_light);
+ break;
+ case IPOIB_FLUSH_NORMAL:
+ queue_work(ipoib_workqueue, &priv->flush_normal);
+ break;
+ case IPOIB_FLUSH_HEAVY:
+ queue_work(ipoib_workqueue, &priv->flush_heavy);
+ break;
+ }
+}
+
void ipoib_ib_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3b463db8ce39..f2f5465f2a90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -49,6 +49,7 @@
#include <linux/jhash.h>
#include <net/arp.h>
#include <net/addrconf.h>
+#include <net/netdev_lock.h>
#include <net/pkt_sched.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
@@ -132,6 +133,52 @@ static int ipoib_netdev_event(struct notifier_block *this,
}
#endif
+struct ipoib_ifupdown_work {
+ struct work_struct work;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ bool up;
+};
+
+static void ipoib_ifupdown_task(struct work_struct *work)
+{
+ struct ipoib_ifupdown_work *pwork =
+ container_of(work, struct ipoib_ifupdown_work, work);
+ struct net_device *dev = pwork->dev;
+ unsigned int flags;
+
+ rtnl_lock();
+ flags = dev->flags;
+ if (pwork->up)
+ flags |= IFF_UP;
+ else
+ flags &= ~IFF_UP;
+
+ if (dev->flags != flags)
+ dev_change_flags(dev, flags, NULL);
+ rtnl_unlock();
+ netdev_put(dev, &pwork->dev_tracker);
+ kfree(pwork);
+}
+
+static void ipoib_schedule_ifupdown_task(struct net_device *dev, bool up)
+{
+ struct ipoib_ifupdown_work *work;
+
+ if ((up && (dev->flags & IFF_UP)) ||
+ (!up && !(dev->flags & IFF_UP)))
+ return;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return;
+ work->dev = dev;
+ netdev_hold(dev, &work->dev_tracker, GFP_KERNEL);
+ work->up = up;
+ INIT_WORK(&work->work, ipoib_ifupdown_task);
+ queue_work(ipoib_workqueue, &work->work);
+}
+
int ipoib_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -154,17 +201,10 @@ int ipoib_open(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
- down_read(&priv->vlan_rwsem);
- list_for_each_entry(cpriv, &priv->child_intfs, list) {
- int flags;
-
- flags = cpriv->dev->flags;
- if (flags & IFF_UP)
- continue;
-
- dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
- }
- up_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_schedule_ifupdown_task(cpriv->dev, true);
+ netdev_unlock_full_to_ops(dev);
} else if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
@@ -199,17 +239,10 @@ static int ipoib_stop(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
- down_read(&priv->vlan_rwsem);
- list_for_each_entry(cpriv, &priv->child_intfs, list) {
- int flags;
-
- flags = cpriv->dev->flags;
- if (!(flags & IFF_UP))
- continue;
-
- dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
- }
- up_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_schedule_ifupdown_task(cpriv->dev, false);
+ netdev_unlock_full_to_ops(dev);
}
return 0;
@@ -426,17 +459,20 @@ static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
}
}
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ return matches;
+
/* Check child interfaces */
- down_read_nested(&priv->vlan_rwsem, nesting);
+ netdev_lock(priv->dev);
list_for_each_entry(child_priv, &priv->child_intfs, list) {
matches += ipoib_match_gid_pkey_addr(child_priv, gid,
- pkey_index, addr,
- nesting + 1,
- found_net_dev);
+ pkey_index, addr,
+ nesting + 1,
+ found_net_dev);
if (matches > 1)
break;
}
- up_read(&priv->vlan_rwsem);
+ netdev_unlock(priv->dev);
return matches;
}
@@ -531,9 +567,11 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
+ netdev_lock_ops(dev);
netdev_update_features(dev);
- dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_mtu(dev, ipoib_cm_max_mtu(dev));
netif_set_real_num_tx_queues(dev, 1);
+ netdev_unlock_ops(dev);
rtnl_unlock();
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -543,9 +581,11 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ netdev_lock_ops(dev);
netdev_update_features(dev);
- dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
+ netdev_unlock_ops(dev);
rtnl_unlock();
ipoib_flush_paths(dev);
return (!rtnl_trylock()) ? -EBUSY : 0;
@@ -1212,6 +1252,7 @@ void ipoib_ib_tx_timeout_work(struct work_struct *work)
int err;
rtnl_lock();
+ netdev_lock_ops(priv->dev);
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
goto unlock;
@@ -1226,6 +1267,7 @@ void ipoib_ib_tx_timeout_work(struct work_struct *work)
netif_tx_wake_all_queues(priv->dev);
unlock:
+ netdev_unlock_ops(priv->dev);
rtnl_unlock();
}
@@ -1992,9 +2034,9 @@ static int ipoib_ndo_init(struct net_device *ndev)
dev_hold(priv->parent);
- down_write(&ppriv->vlan_rwsem);
+ netdev_lock(priv->parent);
list_add_tail(&priv->list, &ppriv->child_intfs);
- up_write(&ppriv->vlan_rwsem);
+ netdev_unlock(priv->parent);
}
return 0;
@@ -2004,8 +2046,6 @@ static void ipoib_ndo_uninit(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- ASSERT_RTNL();
-
/*
* ipoib_remove_one guarantees the children are removed before the
* parent, and that is the only place where a parent can be removed.
@@ -2015,9 +2055,9 @@ static void ipoib_ndo_uninit(struct net_device *dev)
if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
- down_write(&ppriv->vlan_rwsem);
+ netdev_lock(ppriv->dev);
list_del(&priv->list);
- up_write(&ppriv->vlan_rwsem);
+ netdev_unlock(ppriv->dev);
}
ipoib_neigh_hash_uninit(dev);
@@ -2167,7 +2207,6 @@ static void ipoib_build_priv(struct net_device *dev)
priv->dev = dev;
spin_lock_init(&priv->lock);
- init_rwsem(&priv->vlan_rwsem);
mutex_init(&priv->mcast_mutex);
INIT_LIST_HEAD(&priv->path_list);
@@ -2372,10 +2411,10 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
netif_addr_unlock_bh(netdev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
- down_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(priv->dev);
list_for_each_entry(child_priv, &priv->child_intfs, list)
set_base_guid(child_priv, gid);
- up_read(&priv->vlan_rwsem);
+ netdev_unlock_full_to_ops(priv->dev);
}
}
@@ -2415,6 +2454,14 @@ static int ipoib_set_mac(struct net_device *dev, void *addr)
set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ queue_work(ipoib_workqueue, &cpriv->flush_light);
+ netdev_unlock_full_to_ops(dev);
+ }
queue_work(ipoib_workqueue, &priv->flush_light);
return 0;
@@ -2526,7 +2573,7 @@ static struct net_device *ipoib_add_port(const char *format,
ib_register_event_handler(&priv->event_handler);
/* call event handler to ensure pkey in sync */
- queue_work(ipoib_workqueue, &priv->flush_heavy);
+ ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY);
ndev->rtnl_link_ops = ipoib_get_link_ops();
@@ -2624,9 +2671,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
rtnl_lock();
+ netdev_lock(priv->dev);
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
list)
unregister_netdevice_queue(cpriv->dev, &head);
+ netdev_unlock(priv->dev);
unregister_netdevice_queue(priv->dev, &head);
unregister_netdevice_many(&head);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 368e5d77416d..86983080d28b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -280,15 +280,15 @@ void ipoib_event(struct ib_event_handler *handler,
dev_name(&record->device->dev), record->element.port_num);
if (record->event == IB_EVENT_CLIENT_REREGISTER) {
- queue_work(ipoib_workqueue, &priv->flush_light);
+ ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT);
} else if (record->event == IB_EVENT_PORT_ERR ||
record->event == IB_EVENT_PORT_ACTIVE ||
record->event == IB_EVENT_LID_CHANGE) {
- queue_work(ipoib_workqueue, &priv->flush_normal);
+ ipoib_queue_work(priv, IPOIB_FLUSH_NORMAL);
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
- queue_work(ipoib_workqueue, &priv->flush_heavy);
+ ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY);
} else if (record->event == IB_EVENT_GID_CHANGE &&
!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
- queue_work(ipoib_workqueue, &priv->flush_light);
+ ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 562df2b3ef18..243e8f555eca 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -53,8 +53,7 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv,
struct ipoib_dev_priv *priv)
{
struct ipoib_dev_priv *tpriv;
-
- ASSERT_RTNL();
+ bool result = true;
/*
* Since the legacy sysfs interface uses pkey for deletion it cannot
@@ -73,13 +72,17 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv,
if (ppriv->pkey == priv->pkey)
return false;
+ netdev_lock(ppriv->dev);
list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
if (tpriv->pkey == priv->pkey &&
- tpriv->child_type == IPOIB_LEGACY_CHILD)
- return false;
+ tpriv->child_type == IPOIB_LEGACY_CHILD) {
+ result = false;
+ break;
+ }
}
+ netdev_unlock(ppriv->dev);
- return true;
+ return result;
}
/*
@@ -98,8 +101,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
int result;
struct rdma_netdev *rn = netdev_priv(ndev);
- ASSERT_RTNL();
-
/*
* We do not need to touch priv if register_netdevice fails, so just
* always use this flow.
@@ -267,6 +268,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = ipoib_priv(pdev);
rc = -ENODEV;
+ netdev_lock(ppriv->dev);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -278,9 +280,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
goto out;
}
- down_write(&ppriv->vlan_rwsem);
list_del_init(&priv->list);
- up_write(&ppriv->vlan_rwsem);
work->dev = priv->dev;
INIT_WORK(&work->work, ipoib_vlan_delete_task);
queue_work(ipoib_workqueue, &work->work);
@@ -291,6 +291,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
}
out:
+ netdev_unlock(ppriv->dev);
rtnl_unlock();
return rc;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 57a5ff3d1992..1008858f78e2 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -290,6 +290,8 @@ static const struct xpad_device {
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
+ { 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ { 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
@@ -354,6 +356,7 @@ static const struct xpad_device {
{ 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x400b, "PowerA FUSION Pro 4 Wired Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x20d6, 0x890b, "PowerA MOGA XP-Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index d760af4cc12e..f1947f03b06a 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -4,6 +4,7 @@
* Copyright (C) 2016 Zodiac Inflight Innovations
*/
+#include "linux/device.h"
#include <linux/kernel.h>
#include <linux/rmi.h>
#include <linux/firmware.h>
@@ -289,39 +290,30 @@ static int rmi_f34_update_firmware(struct f34_data *f34,
return rmi_f34_flash_firmware(f34, syn_fw);
}
-static int rmi_f34_status(struct rmi_function *fn)
-{
- struct f34_data *f34 = dev_get_drvdata(&fn->dev);
-
- /*
- * The status is the percentage complete, or once complete,
- * zero for success or a negative return code.
- */
- return f34->update_status;
-}
-
static ssize_t rmi_driver_bootloader_id_show(struct device *dev,
struct device_attribute *dattr,
char *buf)
{
struct rmi_driver_data *data = dev_get_drvdata(dev);
- struct rmi_function *fn = data->f34_container;
+ struct rmi_function *fn;
struct f34_data *f34;
- if (fn) {
- f34 = dev_get_drvdata(&fn->dev);
-
- if (f34->bl_version == 5)
- return sysfs_emit(buf, "%c%c\n",
- f34->bootloader_id[0],
- f34->bootloader_id[1]);
- else
- return sysfs_emit(buf, "V%d.%d\n",
- f34->bootloader_id[1],
- f34->bootloader_id[0]);
- }
+ fn = data->f34_container;
+ if (!fn)
+ return -ENODEV;
- return 0;
+ f34 = dev_get_drvdata(&fn->dev);
+ if (!f34)
+ return -ENODEV;
+
+ if (f34->bl_version == 5)
+ return sysfs_emit(buf, "%c%c\n",
+ f34->bootloader_id[0],
+ f34->bootloader_id[1]);
+ else
+ return sysfs_emit(buf, "V%d.%d\n",
+ f34->bootloader_id[1],
+ f34->bootloader_id[0]);
}
static DEVICE_ATTR(bootloader_id, 0444, rmi_driver_bootloader_id_show, NULL);
@@ -334,13 +326,16 @@ static ssize_t rmi_driver_configuration_id_show(struct device *dev,
struct rmi_function *fn = data->f34_container;
struct f34_data *f34;
- if (fn) {
- f34 = dev_get_drvdata(&fn->dev);
+ fn = data->f34_container;
+ if (!fn)
+ return -ENODEV;
+
+ f34 = dev_get_drvdata(&fn->dev);
+ if (!f34)
+ return -ENODEV;
- return sysfs_emit(buf, "%s\n", f34->configuration_id);
- }
- return 0;
+ return sysfs_emit(buf, "%s\n", f34->configuration_id);
}
static DEVICE_ATTR(configuration_id, 0444,
@@ -356,10 +351,14 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
if (!data->f34_container) {
dev_warn(dev, "%s: No F34 present!\n", __func__);
- return -EINVAL;
+ return -ENODEV;
}
f34 = dev_get_drvdata(&data->f34_container->dev);
+ if (!f34) {
+ dev_warn(dev, "%s: No valid F34 present!\n", __func__);
+ return -ENODEV;
+ }
if (f34->bl_version >= 7) {
if (data->pdt_props & HAS_BSR) {
@@ -485,10 +484,18 @@ static ssize_t rmi_driver_update_fw_status_show(struct device *dev,
char *buf)
{
struct rmi_driver_data *data = dev_get_drvdata(dev);
- int update_status = 0;
+ struct f34_data *f34;
+ int update_status = -ENODEV;
- if (data->f34_container)
- update_status = rmi_f34_status(data->f34_container);
+ /*
+ * The status is the percentage complete, or once complete,
+ * zero for success or a negative return code.
+ */
+ if (data->f34_container) {
+ f34 = dev_get_drvdata(&data->f34_container->dev);
+ if (f34)
+ update_status = f34->update_status;
+ }
return sysfs_emit(buf, "%d\n", update_status);
}
@@ -508,33 +515,21 @@ static const struct attribute_group rmi_firmware_attr_group = {
.attrs = rmi_firmware_attrs,
};
-static int rmi_f34_probe(struct rmi_function *fn)
+static int rmi_f34v5_probe(struct f34_data *f34)
{
- struct f34_data *f34;
- unsigned char f34_queries[9];
+ struct rmi_function *fn = f34->fn;
+ u8 f34_queries[9];
bool has_config_id;
- u8 version = fn->fd.function_version;
- int ret;
-
- f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
- if (!f34)
- return -ENOMEM;
-
- f34->fn = fn;
- dev_set_drvdata(&fn->dev, f34);
-
- /* v5 code only supported version 0, try V7 probe */
- if (version > 0)
- return rmi_f34v7_probe(f34);
+ int error;
f34->bl_version = 5;
- ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
- f34_queries, sizeof(f34_queries));
- if (ret) {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (error) {
dev_err(&fn->dev, "%s: Failed to query properties\n",
__func__);
- return ret;
+ return error;
}
snprintf(f34->bootloader_id, sizeof(f34->bootloader_id),
@@ -560,11 +555,11 @@ static int rmi_f34_probe(struct rmi_function *fn)
f34->v5.config_blocks);
if (has_config_id) {
- ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
- f34_queries, sizeof(f34_queries));
- if (ret) {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (error) {
dev_err(&fn->dev, "Failed to read F34 config ID\n");
- return ret;
+ return error;
}
snprintf(f34->configuration_id, sizeof(f34->configuration_id),
@@ -573,12 +568,34 @@ static int rmi_f34_probe(struct rmi_function *fn)
f34_queries[2], f34_queries[3]);
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n",
- f34->configuration_id);
+ f34->configuration_id);
}
return 0;
}
+static int rmi_f34_probe(struct rmi_function *fn)
+{
+ struct f34_data *f34;
+ u8 version = fn->fd.function_version;
+ int error;
+
+ f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
+ if (!f34)
+ return -ENOMEM;
+
+ f34->fn = fn;
+
+ /* v5 code only supported version 0 */
+ error = version == 0 ? rmi_f34v5_probe(f34) : rmi_f34v7_probe(f34);
+ if (error)
+ return error;
+
+ dev_set_drvdata(&fn->dev, f34);
+
+ return 0;
+}
+
int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
{
return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index cd750f512dee..0a33d995d15d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -192,6 +192,7 @@ config MSM_IOMMU
If unsure, say N here.
source "drivers/iommu/amd/Kconfig"
+source "drivers/iommu/arm/Kconfig"
source "drivers/iommu/intel/Kconfig"
source "drivers/iommu/iommufd/Kconfig"
source "drivers/iommu/riscv/Kconfig"
@@ -199,7 +200,6 @@ source "drivers/iommu/riscv/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
- select DMAR_TABLE if INTEL_IOMMU
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
@@ -314,150 +314,6 @@ config APPLE_DART
Say Y here if you are using an Apple SoC.
-# ARM IOMMU support
-config ARM_SMMU
- tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on ARM64 || ARM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU if ARM
- help
- Support for implementations of the ARM System MMU architecture
- versions 1 and 2.
-
- Say Y here if your SoC includes an IOMMU device implementing
- the ARM SMMU architecture.
-
-config ARM_SMMU_LEGACY_DT_BINDINGS
- bool "Support the legacy \"mmu-masters\" devicetree bindings"
- depends on ARM_SMMU=y && OF
- help
- Support for the badly designed and deprecated "mmu-masters"
- devicetree bindings. This allows some DMA masters to attach
- to the SMMU but does not provide any support via the DMA API.
- If you're lucky, you might be able to get VFIO up and running.
-
- If you say Y here then you'll make me very sad. Instead, say N
- and move your firmware to the utopian future that was 2016.
-
-config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
- bool "Default to disabling bypass on ARM SMMU v1 and v2"
- depends on ARM_SMMU
- default y
- help
- Say Y here to (by default) disable bypass streams such that
- incoming transactions from devices that are not attached to
- an iommu domain will report an abort back to the device and
- will not be allowed to pass through the SMMU.
-
- Any old kernels that existed before this KConfig was
- introduced would default to _allowing_ bypass (AKA the
- equivalent of NO for this config). However the default for
- this option is YES because the old behavior is insecure.
-
- There are few reasons to allow unmatched stream bypass, and
- even fewer good ones. If saying YES here breaks your board
- you should work on fixing your board. This KConfig option
- is expected to be removed in the future and we'll simply
- hardcode the bypass disable in the code.
-
- NOTE: the kernel command line parameter
- 'arm-smmu.disable_bypass' will continue to override this
- config.
-
-config ARM_SMMU_MMU_500_CPRE_ERRATA
- bool "Enable errata workaround for CPRE in SMMU reset path"
- depends on ARM_SMMU
- default y
- help
- Say Y here (by default) to apply workaround to disable
- MMU-500's next-page prefetcher for sake of 4 known errata.
-
- Say N here only when it is sure that any errata related to
- prefetch enablement are not applicable on the platform.
- Refer silicon-errata.rst for info on errata IDs.
-
-config ARM_SMMU_QCOM
- def_tristate y
- depends on ARM_SMMU && ARCH_QCOM
- select QCOM_SCM
- help
- When running on a Qualcomm platform that has the custom variant
- of the ARM SMMU, this needs to be built into the SMMU driver.
-
-config ARM_SMMU_QCOM_DEBUG
- bool "ARM SMMU QCOM implementation defined debug support"
- depends on ARM_SMMU_QCOM=y
- help
- Support for implementation specific debug features in ARM SMMU
- hardware found in QTI platforms. This include support for
- the Translation Buffer Units (TBU) that can be used to obtain
- additional information when debugging memory management issues
- like context faults.
-
- Say Y here to enable debug for issues such as context faults
- or TLB sync timeouts which requires implementation defined
- register dumps.
-
-config ARM_SMMU_V3
- tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
- depends on ARM64
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select GENERIC_MSI_IRQ
- select IOMMUFD_DRIVER if IOMMUFD
- help
- Support for implementations of the ARM System MMU architecture
- version 3 providing translation support to a PCIe root complex.
-
- Say Y here if your system includes an IOMMU device implementing
- the ARM SMMUv3 architecture.
-
-if ARM_SMMU_V3
-config ARM_SMMU_V3_SVA
- bool "Shared Virtual Addressing support for the ARM SMMUv3"
- select IOMMU_SVA
- select IOMMU_IOPF
- select MMU_NOTIFIER
- help
- Support for sharing process address spaces with devices using the
- SMMUv3.
-
- Say Y here if your system supports SVA extensions such as PCIe PASID
- and PRI.
-
-config ARM_SMMU_V3_IOMMUFD
- bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
- depends on IOMMUFD
- help
- Support for IOMMUFD features intended to support virtual machines
- with accelerated virtual IOMMUs.
-
- Say Y here if you are doing development and testing on this feature.
-
-config ARM_SMMU_V3_KUNIT_TEST
- tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
- depends on KUNIT
- depends on ARM_SMMU_V3_SVA
- default KUNIT_ALL_TESTS
- help
- Enable this option to unit-test arm-smmu-v3 driver functions.
-
- If unsure, say N.
-
-config TEGRA241_CMDQV
- bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
- depends on ACPI
- help
- Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
- CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
- support, except with virtualization capabilities.
-
- Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
- CMDQ-V extension.
-endif
-
config S390_IOMMU
def_bool y if S390 && PCI
depends on S390 && PCI
@@ -494,18 +350,6 @@ config MTK_IOMMU_V1
if unsure, say N here.
-config QCOM_IOMMU
- # Note: iommu drivers cannot (yet?) be built as modules
- bool "Qualcomm IOMMU Support"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select QCOM_SCM
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU
- help
- Support for IOMMU on certain Qualcomm SoCs.
-
config HYPERV_IOMMU
bool "Hyper-V IRQ Handling"
depends on HYPERV && X86
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 5e5a83c6c2aa..355294fa9033 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += amd/ intel/ arm/ iommufd/ riscv/
+obj-y += arm/ iommufd/
+obj-$(CONFIG_AMD_IOMMU) += amd/
+obj-$(CONFIG_INTEL_IOMMU) += intel/
+obj-$(CONFIG_RISCV_IOMMU) += riscv/
obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu-pages.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index 9de33b2d42f5..59c04a67f398 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
+obj-y += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 220c598b7e14..29a8864381c3 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -147,6 +147,8 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
}
+bool amd_iommu_ht_range_ignore(void);
+
/*
* This must be called after device probe completes. During probe
* use rlookup_amd_iommu() get the iommu.
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5089b58e528a..ccbab3a4811a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -29,8 +29,6 @@
* some size calculation constants
*/
#define DEV_TABLE_ENTRY_SIZE 32
-#define ALIAS_TABLE_ENTRY_SIZE 2
-#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET 0x00
@@ -111,6 +109,7 @@
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
+#define FEATURE_HT_RANGE_IGNORE BIT_ULL(11)
#define FEATURE_NUM_INT_REMAP_SUP GENMASK_ULL(9, 8)
#define FEATURE_NUM_INT_REMAP_SUP_2K(x) \
@@ -316,6 +315,7 @@
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_REMAP_ENABLE 1ULL
+#define DTE_INTTAB_ALIGNMENT 128
#define DTE_INTTABLEN_MASK (0xfULL << 1)
#define DTE_INTTABLEN_VALUE_512 9ULL
#define DTE_INTTABLEN_512 (DTE_INTTABLEN_VALUE_512 << 1)
@@ -616,12 +616,6 @@ struct amd_iommu_pci_seg {
/* Size of the device table */
u32 dev_table_size;
- /* Size of the alias table */
- u32 alias_table_size;
-
- /* Size of the rlookup table */
- u32 rlookup_table_size;
-
/*
* device table virtual address
*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 14aa0d77df26..c06b62f87b9b 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -243,17 +243,14 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
-static inline unsigned long tbl_size(int entry_size, int last_bdf)
+int amd_iommu_get_num_iommus(void)
{
- unsigned shift = PAGE_SHIFT +
- get_order((last_bdf + 1) * entry_size);
-
- return 1UL << shift;
+ return amd_iommus_present;
}
-int amd_iommu_get_num_iommus(void)
+bool amd_iommu_ht_range_ignore(void)
{
- return amd_iommus_present;
+ return check_feature2(FEATURE_HT_RANGE_IGNORE);
}
/*
@@ -634,8 +631,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
/* Allocate per PCI segment device table */
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
+ pci_seg->dev_table_size);
if (!pci_seg->dev_table)
return -ENOMEM;
@@ -644,16 +641,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = NULL;
}
/* Allocate per PCI segment IOMMU rlookup table. */
static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->rlookup_table),
+ GFP_KERNEL);
if (pci_seg->rlookup_table == NULL)
return -ENOMEM;
@@ -662,17 +659,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->rlookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->rlookup_table);
pci_seg->rlookup_table = NULL;
}
static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
- kmemleak_alloc(pci_seg->irq_lookup_table,
- pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->irq_lookup_table),
+ GFP_KERNEL);
if (pci_seg->irq_lookup_table == NULL)
return -ENOMEM;
@@ -681,9 +676,7 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- kmemleak_free(pci_seg->irq_lookup_table);
- iommu_free_pages(pci_seg->irq_lookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->irq_lookup_table);
pci_seg->irq_lookup_table = NULL;
}
@@ -691,8 +684,9 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
int i;
- pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->alias_table),
+ GFP_KERNEL);
if (!pci_seg->alias_table)
return -ENOMEM;
@@ -707,8 +701,7 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->alias_table,
- get_order(pci_seg->alias_table_size));
+ kvfree(pci_seg->alias_table);
pci_seg->alias_table = NULL;
}
@@ -719,8 +712,7 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
*/
static int __init alloc_command_buffer(struct amd_iommu *iommu)
{
- iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
- get_order(CMD_BUFFER_SIZE));
+ iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
return iommu->cmd_buf ? 0 : -ENOMEM;
}
@@ -817,20 +809,22 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
static void __init free_command_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
+ iommu_free_pages(iommu->cmd_buf);
}
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
size_t size)
{
- int order = get_order(size);
- void *buf = iommu_alloc_pages(gfp, order);
+ void *buf;
- if (buf &&
- check_feature(FEATURE_SNP) &&
- set_memory_4k((unsigned long)buf, (1 << order))) {
- iommu_free_pages(buf, order);
- buf = NULL;
+ size = PAGE_ALIGN(size);
+ buf = iommu_alloc_pages_sz(gfp, size);
+ if (!buf)
+ return NULL;
+ if (check_feature(FEATURE_SNP) &&
+ set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
+ iommu_free_pages(buf);
+ return NULL;
}
return buf;
@@ -873,14 +867,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
static void __init free_event_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
+ iommu_free_pages(iommu->evt_buf);
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE));
- iommu_free_pages(iommu->ga_log_tail, get_order(8));
+ iommu_free_pages(iommu->ga_log);
+ iommu_free_pages(iommu->ga_log_tail);
#endif
}
@@ -925,11 +919,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
- iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
+ iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE);
if (!iommu->ga_log)
goto err_out;
- iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
+ iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8);
if (!iommu->ga_log_tail)
goto err_out;
@@ -950,7 +944,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
static void __init free_cwwb_sem(struct amd_iommu *iommu)
{
if (iommu->cmd_sem)
- iommu_free_page((void *)iommu->cmd_sem);
+ iommu_free_pages((void *)iommu->cmd_sem);
}
static void iommu_enable_xt(struct amd_iommu *iommu)
@@ -1024,8 +1018,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
if (!old_devtb)
return false;
- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
+ GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
@@ -1599,9 +1593,9 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
pci_seg->last_bdf = last_bdf;
DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
- pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->dev_table_size =
+ max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE),
+ SZ_4K);
pci_seg->id = id;
init_llist_head(&pci_seg->dev_data_list);
@@ -2789,8 +2783,7 @@ static void early_enable_iommus(void)
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
- iommu_free_pages(pci_seg->old_dev_tbl_cpy,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->old_dev_tbl_cpy);
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2803,8 +2796,7 @@ static void early_enable_iommus(void)
pr_info("Copied DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 26cf562dde11..4d308c071134 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -47,14 +47,7 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
return fpte;
}
-static void free_pt_page(u64 *pt, struct list_head *freelist)
-{
- struct page *p = virt_to_page(pt);
-
- list_add_tail(&p->lru, freelist);
-}
-
-static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
+static void free_pt_lvl(u64 *pt, struct iommu_pages_list *freelist, int lvl)
{
u64 *p;
int i;
@@ -77,20 +70,20 @@ static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
if (lvl > 2)
free_pt_lvl(p, freelist, lvl - 1);
else
- free_pt_page(p, freelist);
+ iommu_pages_list_add(freelist, p);
}
- free_pt_page(pt, freelist);
+ iommu_pages_list_add(freelist, pt);
}
-static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
+static void free_sub_pt(u64 *root, int mode, struct iommu_pages_list *freelist)
{
switch (mode) {
case PAGE_MODE_NONE:
case PAGE_MODE_7_LEVEL:
break;
case PAGE_MODE_1_LEVEL:
- free_pt_page(root, freelist);
+ iommu_pages_list_add(freelist, root);
break;
case PAGE_MODE_2_LEVEL:
case PAGE_MODE_3_LEVEL:
@@ -121,7 +114,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
bool ret = true;
u64 *pte;
- pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ pte = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp, SZ_4K);
if (!pte)
return false;
@@ -146,7 +139,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
out:
spin_unlock_irqrestore(&domain->lock, flags);
- iommu_free_page(pte);
+ iommu_free_pages(pte);
return ret;
}
@@ -213,7 +206,8 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ page = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp,
+ SZ_4K);
if (!page)
return NULL;
@@ -222,7 +216,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -299,7 +293,8 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
return pte;
}
-static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
+static void free_clear_pte(u64 *pte, u64 pteval,
+ struct iommu_pages_list *freelist)
{
u64 *pt;
int mode;
@@ -328,7 +323,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
int prot, gfp_t gfp, size_t *mapped)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
@@ -353,7 +348,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
for (i = 0; i < count; ++i)
free_clear_pte(&pte[i], pte[i], &freelist);
- if (!list_empty(&freelist))
+ if (!iommu_pages_list_empty(&freelist))
updated = true;
if (count > 1) {
@@ -524,7 +519,7 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
static void v1_free_pgtable(struct io_pgtable *iop)
{
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (pgtable->mode == PAGE_MODE_NONE)
return;
@@ -541,7 +536,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->root =
+ iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index a56a27396305..b47941353ccb 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -121,10 +121,10 @@ static void free_pgtable(u64 *pt, int level)
if (level > 2)
free_pgtable(p, level - 1);
else
- iommu_free_page(p);
+ iommu_free_pages(p);
}
- iommu_free_page(pt);
+ iommu_free_pages(pt);
}
/* Allocate page table */
@@ -152,14 +152,14 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
}
if (!IOMMU_PTE_PRESENT(__pte)) {
- page = iommu_alloc_page_node(nid, gfp);
+ page = iommu_alloc_pages_node_sz(nid, gfp, SZ_4K);
if (!page)
return NULL;
__npte = set_pgtable_attr(page);
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -181,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
if (pg_size == IOMMU_PAGE_SIZE_1G)
free_pgtable(__pte, end_level - 1);
else if (pg_size == IOMMU_PAGE_SIZE_2M)
- iommu_free_page(__pte);
+ iommu_free_pages(__pte);
}
return pte;
@@ -346,7 +346,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->pgd = iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->pgd)
return NULL;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index f34209b08b4c..3117d99cf83d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -241,7 +241,9 @@ static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct acpihid_map_entry *p;
+ struct acpihid_map_entry *p, *p1 = NULL;
+ int hid_count = 0;
+ bool fw_bug;
if (!adev)
return -ENODEV;
@@ -249,12 +251,33 @@ static inline int get_acpihid_device_id(struct device *dev,
list_for_each_entry(p, &acpihid_map, list) {
if (acpi_dev_hid_uid_match(adev, p->hid,
p->uid[0] ? p->uid : NULL)) {
- if (entry)
- *entry = p;
- return p->devid;
+ p1 = p;
+ fw_bug = false;
+ hid_count = 1;
+ break;
+ }
+
+ /*
+ * Count HID matches w/o UID, raise FW_BUG but allow exactly one match
+ */
+ if (acpi_dev_hid_match(adev, p->hid)) {
+ p1 = p;
+ hid_count++;
+ fw_bug = true;
}
}
- return -EINVAL;
+
+ if (!p1)
+ return -EINVAL;
+ if (fw_bug)
+ dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
+ hid_count, hid_count > 1 ? "s" : "");
+ if (hid_count > 1)
+ return -EINVAL;
+ if (entry)
+ *entry = p1;
+
+ return p1->devid;
}
static inline int get_device_sbdf_id(struct device *dev)
@@ -982,6 +1005,14 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{
iommu_ga_log_notifier = notifier;
+ /*
+ * Ensure all in-flight IRQ handlers run to completion before returning
+ * to the caller, e.g. to ensure module code isn't unloaded while it's
+ * being executed in the IRQ handler.
+ */
+ if (!notifier)
+ synchronize_rcu();
+
return 0;
}
EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
@@ -1812,7 +1843,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
- iommu_free_page(ptr);
+ iommu_free_pages(ptr);
}
}
@@ -1845,7 +1876,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
/* Free per device domain ID */
pdom_id_free(gcr3_info->domid);
- iommu_free_page(gcr3_info->gcr3_tbl);
+ iommu_free_pages(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL;
}
@@ -1884,7 +1915,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
return -ENOSPC;
gcr3_info->domid = domid;
- gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
+ gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
if (gcr3_info->gcr3_tbl == NULL) {
pdom_id_free(domid);
return -ENOMEM;
@@ -2908,6 +2939,9 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
list_add_tail(&region->list, head);
+ if (amd_iommu_ht_range_ignore())
+ return;
+
region = iommu_alloc_resv_region(HT_RANGE_START,
HT_RANGE_END - HT_RANGE_START + 1,
0, IOMMU_RESV_RESERVED, GFP_KERNEL);
@@ -2984,38 +3018,6 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
-static int amd_iommu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int amd_iommu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.blocked_domain = &blocked_domain,
@@ -3029,8 +3031,6 @@ const struct iommu_ops amd_iommu_ops = {
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.def_domain_type = amd_iommu_def_domain_type,
- .dev_enable_feat = amd_iommu_dev_enable_feature,
- .dev_disable_feat = amd_iommu_dev_disable_feature,
.page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
@@ -3129,7 +3129,7 @@ static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
return table;
}
-static struct irq_remap_table *__alloc_irq_table(int nid, int order)
+static struct irq_remap_table *__alloc_irq_table(int nid, size_t size)
{
struct irq_remap_table *table;
@@ -3137,7 +3137,8 @@ static struct irq_remap_table *__alloc_irq_table(int nid, int order)
if (!table)
return NULL;
- table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order);
+ table->table = iommu_alloc_pages_node_sz(
+ nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size));
if (!table->table) {
kfree(table);
return NULL;
@@ -3191,7 +3192,6 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
struct irq_remap_table *new_table = NULL;
struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
- int order = get_order(get_irq_table_size(max_irqs));
int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
u16 alias;
@@ -3211,7 +3211,7 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- new_table = __alloc_irq_table(nid, order);
+ new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs));
if (!new_table)
return NULL;
@@ -3246,7 +3246,7 @@ out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
- iommu_free_pages(new_table->table, order);
+ iommu_free_pages(new_table->table);
kfree(new_table);
}
return table;
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
index 7c67d69f0b8c..e6767c057d01 100644
--- a/drivers/iommu/amd/ppr.c
+++ b/drivers/iommu/amd/ppr.c
@@ -48,7 +48,7 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
+ iommu_free_pages(iommu->ppr_log);
}
/*
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index e13501541fdd..757d24f67ad4 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -776,8 +776,7 @@ static void apple_dart_domain_free(struct iommu_domain *domain)
{
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
- if (dart_domain->pgtbl_ops)
- free_io_pgtable_ops(dart_domain->pgtbl_ops);
+ free_io_pgtable_ops(dart_domain->pgtbl_ops);
kfree(dart_domain);
}
diff --git a/drivers/iommu/arm/Kconfig b/drivers/iommu/arm/Kconfig
new file mode 100644
index 000000000000..ef42bbe07dbe
--- /dev/null
+++ b/drivers/iommu/arm/Kconfig
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# ARM IOMMU support
+config ARM_SMMU
+ tristate "ARM Ltd. System MMU (SMMU) Support"
+ depends on ARM64 || ARM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU if ARM
+ help
+ Support for implementations of the ARM System MMU architecture
+ versions 1 and 2.
+
+ Say Y here if your SoC includes an IOMMU device implementing
+ the ARM SMMU architecture.
+
+if ARM_SMMU
+config ARM_SMMU_LEGACY_DT_BINDINGS
+ bool "Support the legacy \"mmu-masters\" devicetree bindings"
+ depends on ARM_SMMU=y && OF
+ help
+ Support for the badly designed and deprecated "mmu-masters"
+ devicetree bindings. This allows some DMA masters to attach
+ to the SMMU but does not provide any support via the DMA API.
+ If you're lucky, you might be able to get VFIO up and running.
+
+ If you say Y here then you'll make me very sad. Instead, say N
+ and move your firmware to the utopian future that was 2016.
+
+config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
+ bool "Disable unmatched stream bypass by default" if EXPERT
+ default y
+ help
+ If your firmware is broken and fails to describe StreamIDs which
+ Linux should know about in order to manage the SMMU correctly and
+ securely, and you don't want to boot with the 'arm-smmu.disable_bypass=0'
+ command line parameter, then as a last resort you can turn it off
+ by default here. But don't. This option may be removed at any time.
+
+ Note that 'arm-smmu.disable_bypass=1' will still take precedence.
+
+config ARM_SMMU_MMU_500_CPRE_ERRATA
+ bool "Enable errata workaround for CPRE in SMMU reset path"
+ default y
+ help
+ Say Y here (by default) to apply workaround to disable
+ MMU-500's next-page prefetcher for sake of 4 known errata.
+
+ Say N here only when it is sure that any errata related to
+ prefetch enablement are not applicable on the platform.
+ Refer silicon-errata.rst for info on errata IDs.
+
+config ARM_SMMU_QCOM
+ def_tristate y
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ When running on a Qualcomm platform that has the custom variant
+ of the ARM SMMU, this needs to be built into the SMMU driver.
+
+config ARM_SMMU_QCOM_DEBUG
+ bool "ARM SMMU QCOM implementation defined debug support"
+ depends on ARM_SMMU_QCOM=y
+ help
+ Support for implementation specific debug features in ARM SMMU
+ hardware found in QTI platforms. This include support for
+ the Translation Buffer Units (TBU) that can be used to obtain
+ additional information when debugging memory management issues
+ like context faults.
+
+ Say Y here to enable debug for issues such as context faults
+ or TLB sync timeouts which requires implementation defined
+ register dumps.
+endif
+
+config ARM_SMMU_V3
+ tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+ depends on ARM64
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select GENERIC_MSI_IRQ
+ select IOMMUFD_DRIVER if IOMMUFD
+ help
+ Support for implementations of the ARM System MMU architecture
+ version 3 providing translation support to a PCIe root complex.
+
+ Say Y here if your system includes an IOMMU device implementing
+ the ARM SMMUv3 architecture.
+
+if ARM_SMMU_V3
+config ARM_SMMU_V3_SVA
+ bool "Shared Virtual Addressing support for the ARM SMMUv3"
+ select IOMMU_SVA
+ select IOMMU_IOPF
+ select MMU_NOTIFIER
+ help
+ Support for sharing process address spaces with devices using the
+ SMMUv3.
+
+ Say Y here if your system supports SVA extensions such as PCIe PASID
+ and PRI.
+
+config ARM_SMMU_V3_IOMMUFD
+ bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
+ depends on IOMMUFD
+ help
+ Support for IOMMUFD features intended to support virtual machines
+ with accelerated virtual IOMMUs.
+
+ Say Y here if you are doing development and testing on this feature.
+
+config ARM_SMMU_V3_KUNIT_TEST
+ tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on ARM_SMMU_V3_SVA
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to unit-test arm-smmu-v3 driver functions.
+
+ If unsure, say N.
+
+config TEGRA241_CMDQV
+ bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
+ depends on ACPI
+ help
+ Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
+ CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
+ support, except with virtualization capabilities.
+
+ Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
+ CMDQ-V extension.
+endif
+
+config QCOM_IOMMU
+ # Note: iommu drivers cannot (yet?) be built as modules
+ bool "Qualcomm IOMMU Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select QCOM_SCM
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for IOMMU on certain Qualcomm SoCs.
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 980cc6b33c43..0601dece0a0d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -13,8 +13,6 @@
#include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h"
-static DEFINE_MUTEX(sva_lock);
-
static void __maybe_unused
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{
@@ -257,84 +255,6 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return true;
}
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- /* We're not keeping track of SIDs in fault events */
- if (master->num_streams != 1)
- return false;
-
- return master->stall_enabled;
-}
-
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
- return false;
-
- /* SSID support is mandatory for the moment */
- return master->ssid_bits;
-}
-
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- bool enabled;
-
- mutex_lock(&sva_lock);
- enabled = master->sva_enabled;
- mutex_unlock(&sva_lock);
- return enabled;
-}
-
-static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- /*
- * Drivers for devices supporting PRI or stall should enable IOPF first.
- * Others have device-specific fault handlers and don't need IOPF.
- */
- if (!arm_smmu_master_iopf_supported(master))
- return 0;
-
- if (!master->iopf_enabled)
- return -EINVAL;
-
- return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
-}
-
-static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- if (!master->iopf_enabled)
- return;
-
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
-}
-
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- int ret;
-
- mutex_lock(&sva_lock);
- ret = arm_smmu_master_sva_enable_iopf(master);
- if (!ret)
- master->sva_enabled = true;
- mutex_unlock(&sva_lock);
-
- return ret;
-}
-
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- mutex_lock(&sva_lock);
- arm_smmu_master_sva_disable_iopf(master);
- master->sva_enabled = false;
- mutex_unlock(&sva_lock);
-
- return 0;
-}
-
void arm_smmu_sva_notifier_synchronize(void)
{
/*
@@ -353,6 +273,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_cd target;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return -EOPNOTSUPP;
+
/* Prevent arm_smmu_mm_release from being called while we are attaching */
if (!mmget_not_zero(domain->mm))
return -EINVAL;
@@ -406,6 +329,9 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
u32 asid;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return ERR_PTR(-EOPNOTSUPP);
+
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 48d910399a1b..10cc6dc26b7b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2720,6 +2720,7 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static struct arm_smmu_master_domain *
arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
+ struct iommu_domain *domain,
struct arm_smmu_master *master,
ioasid_t ssid, bool nested_ats_flush)
{
@@ -2730,6 +2731,7 @@ arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
if (master_domain->master == master &&
+ master_domain->domain == domain &&
master_domain->ssid == ssid &&
master_domain->nested_ats_flush == nested_ats_flush)
return master_domain;
@@ -2756,6 +2758,58 @@ to_smmu_domain_devices(struct iommu_domain *domain)
return NULL;
}
+static int arm_smmu_enable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ int ret;
+
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return -EOPNOTSUPP;
+
+ /*
+ * Drivers for devices supporting PRI or stall require iopf others have
+ * device-specific fault handlers and don't need IOPF, so this is not a
+ * failure.
+ */
+ if (!master->stall_enabled)
+ return 0;
+
+ /* We're not keeping track of SIDs in fault events */
+ if (master->num_streams != 1)
+ return -EOPNOTSUPP;
+
+ if (master->iopf_refcount) {
+ master->iopf_refcount++;
+ master_domain->using_iopf = true;
+ return 0;
+ }
+
+ ret = iopf_queue_add_device(master->smmu->evtq.iopf, master->dev);
+ if (ret)
+ return ret;
+ master->iopf_refcount = 1;
+ master_domain->using_iopf = true;
+ return 0;
+}
+
+static void arm_smmu_disable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return;
+
+ if (!master_domain || !master_domain->using_iopf)
+ return;
+
+ master->iopf_refcount--;
+ if (master->iopf_refcount == 0)
+ iopf_queue_remove_device(master->smmu->evtq.iopf, master->dev);
+}
+
static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
struct iommu_domain *domain,
ioasid_t ssid)
@@ -2772,15 +2826,17 @@ static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
nested_ats_flush = to_smmu_nested_domain(domain)->enable_ats;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid,
- nested_ats_flush);
+ master_domain = arm_smmu_find_master_domain(smmu_domain, domain, master,
+ ssid, nested_ats_flush);
if (master_domain) {
list_del(&master_domain->devices_elm);
- kfree(master_domain);
if (master->ats_enabled)
atomic_dec(&smmu_domain->nr_ats_masters);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ arm_smmu_disable_iopf(master, master_domain);
+ kfree(master_domain);
}
/*
@@ -2853,12 +2909,19 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
kfree(state->vmaster);
return -ENOMEM;
}
+ master_domain->domain = new_domain;
master_domain->master = master;
master_domain->ssid = state->ssid;
if (new_domain->type == IOMMU_DOMAIN_NESTED)
master_domain->nested_ats_flush =
to_smmu_nested_domain(new_domain)->enable_ats;
+ if (new_domain->iopf_handler) {
+ ret = arm_smmu_enable_iopf(master, master_domain);
+ if (ret)
+ goto err_free_master_domain;
+ }
+
/*
* During prepare we want the current smmu_domain and new
* smmu_domain to be in the devices list before we change any
@@ -2878,9 +2941,9 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
!arm_smmu_master_canwbs(master)) {
spin_unlock_irqrestore(&smmu_domain->devices_lock,
flags);
- kfree(master_domain);
kfree(state->vmaster);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iopf;
}
if (state->ats_enabled)
@@ -2899,6 +2962,12 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
wmb();
}
return 0;
+
+err_iopf:
+ arm_smmu_disable_iopf(master, master_domain);
+err_free_master_domain:
+ kfree(master_domain);
+ return ret;
}
/*
@@ -2953,7 +3022,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
smmu = master->smmu;
if (smmu_domain->smmu != smmu)
- return ret;
+ return -EINVAL;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID);
@@ -3510,8 +3579,7 @@ static void arm_smmu_release_device(struct device *dev)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (WARN_ON(arm_smmu_master_sva_enabled(master)))
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+ WARN_ON(master->iopf_refcount);
/* Put the STE back to what arm_smmu_init_strtab() sets */
if (dev->iommu->require_direct)
@@ -3586,58 +3654,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static int arm_smmu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -ENODEV;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!arm_smmu_master_iopf_supported(master))
- return -EINVAL;
- if (master->iopf_enabled)
- return -EBUSY;
- master->iopf_enabled = true;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_supported(master))
- return -EINVAL;
- if (arm_smmu_master_sva_enabled(master))
- return -EBUSY;
- return arm_smmu_master_enable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
-static int arm_smmu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -EINVAL;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!master->iopf_enabled)
- return -EINVAL;
- if (master->sva_enabled)
- return -EBUSY;
- master->iopf_enabled = false;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_enabled(master))
- return -EINVAL;
- return arm_smmu_master_disable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
/*
* HiSilicon PCIe tune and trace device can be used to trace TLP headers on the
* PCIe link and save the data to memory by DMA. The hardware is restricted to
@@ -3670,8 +3686,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .dev_enable_feat = arm_smmu_dev_enable_feature,
- .dev_disable_feat = arm_smmu_dev_disable_feature,
.page_response = arm_smmu_page_response,
.def_domain_type = arm_smmu_def_domain_type,
.viommu_alloc = arm_vsmmu_alloc,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index dd1ad56ce863..ea41d790463e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -837,9 +837,8 @@ struct arm_smmu_master {
bool ats_enabled : 1;
bool ste_ats_enabled : 1;
bool stall_enabled;
- bool sva_enabled;
- bool iopf_enabled;
unsigned int ssid_bits;
+ unsigned int iopf_refcount;
};
/* SMMU private data for an IOMMU domain */
@@ -915,8 +914,14 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
struct arm_smmu_master_domain {
struct list_head devices_elm;
struct arm_smmu_master *master;
+ /*
+ * For nested domains the master_domain is threaded onto the S2 parent,
+ * this points to the IOMMU_DOMAIN_NESTED to disambiguate the masters.
+ */
+ struct iommu_domain *domain;
ioasid_t ssid;
bool nested_ats_flush : 1;
+ bool using_iopf : 1;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -995,11 +1000,6 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
void arm_smmu_sva_notifier_synchronize(void);
struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm);
@@ -1009,31 +1009,6 @@ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return false;
}
-static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
static inline void arm_smmu_sva_notifier_synchronize(void) {}
#define arm_smmu_sva_domain_alloc NULL
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index d03b2239baad..65e0ef6539fe 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -406,6 +406,12 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
@@ -416,6 +422,9 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
if (!tmp || tmp == -EBUSY) {
ret = IRQ_HANDLED;
resume = ARM_SMMU_RESUME_TERMINATE;
+ } else if (tmp == -EAGAIN) {
+ ret = IRQ_HANDLED;
+ resume = 0;
} else {
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, cfi.iova, cfi.fsr);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 59d02687280e..62874b18f645 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -112,25 +112,39 @@ static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
{
struct arm_smmu_domain *smmu_domain = (void *)cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ u32 mask = BIT(cfg->cbndx);
+ bool stall_changed = !!(qsmmu->stall_enabled & mask) != enabled;
+ unsigned long flags;
if (enabled)
- qsmmu->stall_enabled |= BIT(cfg->cbndx);
+ qsmmu->stall_enabled |= mask;
else
- qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
-}
+ qsmmu->stall_enabled &= ~mask;
-static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
-{
- struct arm_smmu_domain *smmu_domain = (void *)cookie;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- u32 reg = 0;
+ /*
+ * If the device is on and we changed the setting, update the register.
+ * The spec pseudocode says that CFCFG is resampled after a fault, and
+ * we believe that no implementations cache it in the TLB, so it should
+ * be safe to change it without a TLB invalidation.
+ */
+ if (stall_changed && pm_runtime_get_if_active(smmu->dev) > 0) {
+ u32 reg;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR);
+
+ if (enabled)
+ reg |= ARM_SMMU_SCTLR_CFCFG;
+ else
+ reg &= ~ARM_SMMU_SCTLR_CFCFG;
- if (terminate)
- reg |= ARM_SMMU_RESUME_TERMINATE;
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR, reg);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
- arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
+ pm_runtime_put_autosuspend(smmu->dev);
+ }
}
static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
@@ -337,7 +351,6 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
- priv->resume_translation = qcom_adreno_smmu_resume_translation;
priv->set_prr_bit = NULL;
priv->set_prr_addr = NULL;
@@ -356,6 +369,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,qcm2290-mdss" },
+ { .compatible = "qcom,sar2130p-mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
{ .compatible = "qcom,sc7280-mdss" },
@@ -585,6 +599,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
@@ -594,6 +609,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 8f439c265a23..8d95b14c7d5a 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -474,6 +474,12 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a775e4dbe06f..6c708fec48d1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -27,6 +27,7 @@
#include <linux/msi.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
@@ -105,7 +106,7 @@ early_param("iommu.forcedac", iommu_dma_forcedac_setup);
struct iova_fq_entry {
unsigned long iova_pfn;
unsigned long pages;
- struct list_head freelist;
+ struct iommu_pages_list freelist;
u64 counter; /* Flush counter when this entry was added */
};
@@ -154,6 +155,8 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
+ fq->entries[idx].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[idx].freelist);
fq->head = (fq->head + 1) & fq->mod_mask;
}
}
@@ -192,7 +195,7 @@ static void fq_flush_timeout(struct timer_list *t)
static void queue_iova(struct iommu_dma_cookie *cookie,
unsigned long pfn, unsigned long pages,
- struct list_head *freelist)
+ struct iommu_pages_list *freelist)
{
struct iova_fq *fq;
unsigned long flags;
@@ -231,7 +234,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
- list_splice(freelist, &fq->entries[idx].freelist);
+ iommu_pages_list_splice(freelist, &fq->entries[idx].freelist);
spin_unlock_irqrestore(&fq->lock, flags);
@@ -289,7 +292,8 @@ static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
spin_lock_init(&fq->lock);
for (i = 0; i < fq_size; i++)
- INIT_LIST_HEAD(&fq->entries[i].freelist);
+ fq->entries[i].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[i].freelist);
}
static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
@@ -1137,6 +1141,54 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
+static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iova_domain *iovad = &domain->iova_cookie->iovad;
+
+ if (!is_swiotlb_active(dev)) {
+ dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+ return (phys_addr_t)DMA_MAPPING_ERROR;
+ }
+
+ trace_swiotlb_bounced(dev, phys, size);
+
+ phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
+ attrs);
+
+ /*
+ * Untrusted devices should not see padding areas with random leftover
+ * kernel data, so zero the pre- and post-padding.
+ * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
+ * the contents of the original memory buffer.
+ */
+ if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
+ size_t start, virt = (size_t)phys_to_virt(phys);
+
+ /* Pre-padding */
+ start = iova_align_down(iovad, virt);
+ memset((void *)start, 0, virt - start);
+
+ /* Post-padding */
+ start = virt + size;
+ memset((void *)start, 0, iova_align(iovad, start) - start);
+ }
+
+ return phys;
+}
+
+/*
+ * Checks if a physical buffer has unaligned boundaries with respect to
+ * the IOMMU granule. Returns non-zero if either the start or end
+ * address is not aligned to the granule boundary.
+ */
+static inline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
+ size_t size)
+{
+ return iova_offset(iovad, phys | size);
+}
+
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -1150,42 +1202,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
dma_addr_t iova, dma_mask = dma_get_mask(dev);
/*
- * If both the physical buffer start address and size are
- * page aligned, we don't need to use a bounce page.
+ * If both the physical buffer start address and size are page aligned,
+ * we don't need to use a bounce page.
*/
if (dev_use_swiotlb(dev, size, dir) &&
- iova_offset(iovad, phys | size)) {
- if (!is_swiotlb_active(dev)) {
- dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+ iova_unaligned(iovad, phys, size)) {
+ phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
+ if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
- }
-
- trace_swiotlb_bounced(dev, phys, size);
-
- phys = swiotlb_tbl_map_single(dev, phys, size,
- iova_mask(iovad), dir, attrs);
-
- if (phys == DMA_MAPPING_ERROR)
- return DMA_MAPPING_ERROR;
-
- /*
- * Untrusted devices should not see padding areas with random
- * leftover kernel data, so zero the pre- and post-padding.
- * swiotlb_tbl_map_single() has initialized the bounce buffer
- * proper to the contents of the original memory buffer.
- */
- if (dev_is_untrusted(dev)) {
- size_t start, virt = (size_t)phys_to_virt(phys);
-
- /* Pre-padding */
- start = iova_align_down(iovad, virt);
- memset((void *)start, 0, virt - start);
-
- /* Post-padding */
- start = virt + size;
- memset((void *)start, 0,
- iova_align(iovad, start) - start);
- }
}
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
@@ -1359,7 +1383,6 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
struct pci_p2pdma_map_state p2pdma_state = {};
- enum pci_p2pdma_map_type map;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
@@ -1389,28 +1412,30 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
- if (is_pci_p2pdma_page(sg_page(s))) {
- map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
- switch (map) {
- case PCI_P2PDMA_MAP_BUS_ADDR:
- /*
- * iommu_map_sg() will skip this segment as
- * it is marked as a bus address,
- * __finalise_sg() will copy the dma address
- * into the output segment.
- */
- continue;
- case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
- /*
- * Mapping through host bridge should be
- * mapped with regular IOVAs, thus we
- * do nothing here and continue below.
- */
- break;
- default:
- ret = -EREMOTEIO;
- goto out_restore_sg;
- }
+ switch (pci_p2pdma_state(&p2pdma_state, dev, sg_page(s))) {
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * Mapping through host bridge should be mapped with
+ * regular IOVAs, thus we do nothing here and continue
+ * below.
+ */
+ break;
+ case PCI_P2PDMA_MAP_NONE:
+ break;
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * iommu_map_sg() will skip this segment as it is marked
+ * as a bus address, __finalise_sg() will copy the dma
+ * address into the output segment.
+ */
+ s->dma_address = pci_p2pdma_bus_addr_map(&p2pdma_state,
+ sg_phys(s));
+ sg_dma_len(s) = sg->length;
+ sg_dma_mark_bus_address(s);
+ continue;
+ default:
+ ret = -EREMOTEIO;
+ goto out_restore_sg;
}
sg_dma_address(s) = s_iova_off;
@@ -1721,6 +1746,354 @@ size_t iommu_dma_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
+/**
+ * dma_iova_try_alloc - Try to allocate an IOVA space
+ * @dev: Device to allocate the IOVA space for
+ * @state: IOVA state
+ * @phys: physical address
+ * @size: IOVA size
+ *
+ * Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space
+ * for the given base address and size.
+ *
+ * Note: @phys is only used to calculate the IOVA alignment. Callers that always
+ * do PAGE_SIZE aligned transfers can safely pass 0 here.
+ *
+ * Returns %true if the IOVA-based DMA API can be used and IOVA space has been
+ * allocated, or %false if the regular DMA API should be used.
+ */
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size)
+{
+ struct iommu_dma_cookie *cookie;
+ struct iommu_domain *domain;
+ struct iova_domain *iovad;
+ size_t iova_off;
+ dma_addr_t addr;
+
+ memset(state, 0, sizeof(*state));
+ if (!use_dma_iommu(dev))
+ return false;
+
+ domain = iommu_get_dma_domain(dev);
+ cookie = domain->iova_cookie;
+ iovad = &cookie->iovad;
+ iova_off = iova_offset(iovad, phys);
+
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, iommu_get_domain_for_dev(dev)))
+ return false;
+
+ if (WARN_ON_ONCE(!size))
+ return false;
+
+ /*
+ * DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu
+ * internals, make sure that caller didn't set it and/or
+ * didn't use this interface to map SIZE_MAX.
+ */
+ if (WARN_ON_ONCE((u64)size & DMA_IOVA_USE_SWIOTLB))
+ return false;
+
+ addr = iommu_dma_alloc_iova(domain,
+ iova_align(iovad, size + iova_off),
+ dma_get_mask(dev), dev);
+ if (!addr)
+ return false;
+
+ state->addr = addr + iova_off;
+ state->__size = size;
+ return true;
+}
+EXPORT_SYMBOL_GPL(dma_iova_try_alloc);
+
+/**
+ * dma_iova_free - Free an IOVA space
+ * @dev: Device to free the IOVA space for
+ * @state: IOVA state
+ *
+ * Undoes a successful dma_try_iova_alloc().
+ *
+ * Note that all dma_iova_link() calls need to be undone first. For callers
+ * that never call dma_iova_unlink(), dma_iova_destroy() can be used instead
+ * which unlinks all ranges and frees the IOVA space in a single efficient
+ * operation.
+ */
+void dma_iova_free(struct device *dev, struct dma_iova_state *state)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, state->addr);
+ size_t size = dma_iova_size(state);
+
+ iommu_dma_free_iova(domain, state->addr - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad), NULL);
+}
+EXPORT_SYMBOL_GPL(dma_iova_free);
+
+static int __dma_iova_link(struct device *dev, dma_addr_t addr,
+ phys_addr_t phys, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, size, dir);
+
+ return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
+ dma_info_to_prot(dir, coherent, attrs), GFP_ATOMIC);
+}
+
+static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
+ phys_addr_t phys, size_t bounce_len,
+ enum dma_data_direction dir, unsigned long attrs,
+ size_t iova_start_pad)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iova_domain *iovad = &domain->iova_cookie->iovad;
+ phys_addr_t bounce_phys;
+ int error;
+
+ bounce_phys = iommu_dma_map_swiotlb(dev, phys, bounce_len, dir, attrs);
+ if (bounce_phys == DMA_MAPPING_ERROR)
+ return -ENOMEM;
+
+ error = __dma_iova_link(dev, addr - iova_start_pad,
+ bounce_phys - iova_start_pad,
+ iova_align(iovad, bounce_len), dir, attrs);
+ if (error)
+ swiotlb_tbl_unmap_single(dev, bounce_phys, bounce_len, dir,
+ attrs);
+ return error;
+}
+
+static int iommu_dma_iova_link_swiotlb(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t offset,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, phys);
+ size_t iova_end_pad = iova_offset(iovad, phys + size);
+ dma_addr_t addr = state->addr + offset;
+ size_t mapped = 0;
+ int error;
+
+ if (iova_start_pad) {
+ size_t bounce_len = min(size, iovad->granule - iova_start_pad);
+
+ error = iommu_dma_iova_bounce_and_link(dev, addr, phys,
+ bounce_len, dir, attrs, iova_start_pad);
+ if (error)
+ return error;
+ state->__size |= DMA_IOVA_USE_SWIOTLB;
+
+ mapped += bounce_len;
+ size -= bounce_len;
+ if (!size)
+ return 0;
+ }
+
+ size -= iova_end_pad;
+ error = __dma_iova_link(dev, addr + mapped, phys + mapped, size, dir,
+ attrs);
+ if (error)
+ goto out_unmap;
+ mapped += size;
+
+ if (iova_end_pad) {
+ error = iommu_dma_iova_bounce_and_link(dev, addr + mapped,
+ phys + mapped, iova_end_pad, dir, attrs, 0);
+ if (error)
+ goto out_unmap;
+ state->__size |= DMA_IOVA_USE_SWIOTLB;
+ }
+
+ return 0;
+
+out_unmap:
+ dma_iova_unlink(dev, state, 0, mapped, dir, attrs);
+ return error;
+}
+
+/**
+ * dma_iova_link - Link a range of IOVA space
+ * @dev: DMA device
+ * @state: IOVA state
+ * @phys: physical address to link
+ * @offset: offset into the IOVA state to map into
+ * @size: size of the buffer
+ * @dir: DMA direction
+ * @attrs: attributes of mapping properties
+ *
+ * Link a range of IOVA space for the given IOVA state without IOTLB sync.
+ * This function is used to link multiple physical addresses in contiguous
+ * IOVA space without performing costly IOTLB sync.
+ *
+ * The caller is responsible to call to dma_iova_sync() to sync IOTLB at
+ * the end of linkage.
+ */
+int dma_iova_link(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, phys);
+
+ if (WARN_ON_ONCE(iova_start_pad && offset > 0))
+ return -EIO;
+
+ if (dev_use_swiotlb(dev, size, dir) &&
+ iova_unaligned(iovad, phys, size))
+ return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
+ size, dir, attrs);
+
+ return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
+ phys - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad), dir, attrs);
+}
+EXPORT_SYMBOL_GPL(dma_iova_link);
+
+/**
+ * dma_iova_sync - Sync IOTLB
+ * @dev: DMA device
+ * @state: IOVA state
+ * @offset: offset into the IOVA state to sync
+ * @size: size of the buffer
+ *
+ * Sync IOTLB for the given IOVA state. This function should be called on
+ * the IOVA-contiguous range created by one ore more dma_iova_link() calls
+ * to sync the IOTLB.
+ */
+int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ dma_addr_t addr = state->addr + offset;
+ size_t iova_start_pad = iova_offset(iovad, addr);
+
+ return iommu_sync_map(domain, addr - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad));
+}
+EXPORT_SYMBOL_GPL(dma_iova_sync);
+
+static void iommu_dma_iova_unlink_range_slow(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, addr);
+ dma_addr_t end = addr + size;
+
+ do {
+ phys_addr_t phys;
+ size_t len;
+
+ phys = iommu_iova_to_phys(domain, addr);
+ if (WARN_ON(!phys))
+ /* Something very horrible happen here */
+ return;
+
+ len = min_t(size_t,
+ end - addr, iovad->granule - iova_start_pad);
+
+ if (!dev_is_dma_coherent(dev) &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(phys, len, dir);
+
+ swiotlb_tbl_unmap_single(dev, phys, len, dir, attrs);
+
+ addr += len;
+ iova_start_pad = 0;
+ } while (addr < end);
+}
+
+static void __iommu_dma_iova_unlink(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs,
+ bool free_iova)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ dma_addr_t addr = state->addr + offset;
+ size_t iova_start_pad = iova_offset(iovad, addr);
+ struct iommu_iotlb_gather iotlb_gather;
+ size_t unmapped;
+
+ if ((state->__size & DMA_IOVA_USE_SWIOTLB) ||
+ (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)))
+ iommu_dma_iova_unlink_range_slow(dev, addr, size, dir, attrs);
+
+ iommu_iotlb_gather_init(&iotlb_gather);
+ iotlb_gather.queued = free_iova && READ_ONCE(cookie->fq_domain);
+
+ size = iova_align(iovad, size + iova_start_pad);
+ addr -= iova_start_pad;
+ unmapped = iommu_unmap_fast(domain, addr, size, &iotlb_gather);
+ WARN_ON(unmapped != size);
+
+ if (!iotlb_gather.queued)
+ iommu_iotlb_sync(domain, &iotlb_gather);
+ if (free_iova)
+ iommu_dma_free_iova(domain, addr, size, &iotlb_gather);
+}
+
+/**
+ * dma_iova_unlink - Unlink a range of IOVA space
+ * @dev: DMA device
+ * @state: IOVA state
+ * @offset: offset into the IOVA state to unlink
+ * @size: size of the buffer
+ * @dir: DMA direction
+ * @attrs: attributes of mapping properties
+ *
+ * Unlink a range of IOVA space for the given IOVA state.
+ */
+void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ __iommu_dma_iova_unlink(dev, state, offset, size, dir, attrs, false);
+}
+EXPORT_SYMBOL_GPL(dma_iova_unlink);
+
+/**
+ * dma_iova_destroy - Finish a DMA mapping transaction
+ * @dev: DMA device
+ * @state: IOVA state
+ * @mapped_len: number of bytes to unmap
+ * @dir: DMA direction
+ * @attrs: attributes of mapping properties
+ *
+ * Unlink the IOVA range up to @mapped_len and free the entire IOVA space. The
+ * range of IOVA from dma_addr to @mapped_len must all be linked, and be the
+ * only linked IOVA in state.
+ */
+void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
+ size_t mapped_len, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ if (mapped_len)
+ __iommu_dma_iova_unlink(dev, state, 0, mapped_len, dir, attrs,
+ true);
+ else
+ /*
+ * We can be here if first call to dma_iova_link() failed and
+ * there is nothing to unlink, so let's be more clear.
+ */
+ dma_iova_free(dev, state);
+}
+EXPORT_SYMBOL_GPL(dma_iova_destroy);
+
void iommu_setup_dma_ops(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 317266aca6e2..fcb6a0f7c082 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -902,11 +902,11 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
if (!domain)
return NULL;
- domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2);
+ domain->pgtable = iommu_alloc_pages_sz(GFP_KERNEL, SZ_16K);
if (!domain->pgtable)
goto err_pgtable;
- domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1);
+ domain->lv2entcnt = iommu_alloc_pages_sz(GFP_KERNEL, SZ_8K);
if (!domain->lv2entcnt)
goto err_counter;
@@ -932,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
return &domain->domain;
err_lv2ent:
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->lv2entcnt);
err_counter:
- iommu_free_pages(domain->pgtable, 2);
+ iommu_free_pages(domain->pgtable);
err_pgtable:
kfree(domain);
return NULL;
@@ -975,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
phys_to_virt(base));
}
- iommu_free_pages(domain->pgtable, 2);
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->pgtable);
+ iommu_free_pages(domain->lv2entcnt);
kfree(domain);
}
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 30be786bff11..5f08523f97cb 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -64,7 +64,7 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_update_paace_stash(liodn, val);
if (ret) {
- pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
+ pr_debug("Failed to update SPAACE for liodn %d\n", liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return ret;
}
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index 6c7528130cf9..ada651c4a01b 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o
-obj-$(CONFIG_DMAR_TABLE) += trace.o
+obj-y += iommu.o pasid.o nested.o cache.o prq.o
+obj-$(CONFIG_DMAR_TABLE) += dmar.o trace.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
-ifdef CONFIG_INTEL_IOMMU
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
-endif
obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index e540092d664d..b61d9ea27aa9 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1099,6 +1099,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
spin_lock_init(&iommu->device_rbtree_lock);
mutex_init(&iommu->iopf_lock);
iommu->node = NUMA_NO_NODE;
+ spin_lock_init(&iommu->lock);
+ ida_init(&iommu->domain_ida);
+ mutex_init(&iommu->did_lock);
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
@@ -1187,7 +1190,7 @@ static void free_iommu(struct intel_iommu *iommu)
}
if (iommu->qi) {
- iommu_free_page(iommu->qi->desc);
+ iommu_free_pages(iommu->qi->desc);
kfree(iommu->qi->desc_status);
kfree(iommu->qi);
}
@@ -1195,6 +1198,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
+ ida_destroy(&iommu->domain_ida);
ida_free(&dmar_seq_ids, iommu->seq_id);
kfree(iommu);
}
@@ -1681,7 +1685,6 @@ int dmar_enable_qi(struct intel_iommu *iommu)
{
struct q_inval *qi;
void *desc;
- int order;
if (!ecap_qis(iommu->ecap))
return -ENOENT;
@@ -1702,8 +1705,9 @@ int dmar_enable_qi(struct intel_iommu *iommu)
* Need two pages to accommodate 256 descriptors of 256 bits each
* if the remapping hardware supports scalable mode translation.
*/
- order = ecap_smts(iommu->ecap) ? 1 : 0;
- desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
+ desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ ecap_smts(iommu->ecap) ? SZ_8K :
+ SZ_4K);
if (!desc) {
kfree(qi);
iommu->qi = NULL;
@@ -1714,7 +1718,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
- iommu_free_page(qi->desc);
+ iommu_free_pages(qi->desc);
kfree(qi);
iommu->qi = NULL;
return -ENOMEM;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index cb0b993bebb4..7aa3932251b2 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -397,7 +397,8 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
if (!alloc)
return NULL;
- context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ SZ_4K);
if (!context)
return NULL;
@@ -571,17 +572,17 @@ static void free_context_table(struct intel_iommu *iommu)
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
- iommu_free_page(context);
+ iommu_free_pages(context);
if (!sm_supported(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
- iommu_free_page(context);
+ iommu_free_pages(context);
}
- iommu_free_page(iommu->root_entry);
+ iommu_free_pages(iommu->root_entry);
iommu->root_entry = NULL;
}
@@ -731,7 +732,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) {
uint64_t pteval, tmp;
- tmp_page = iommu_alloc_page_node(domain->nid, gfp);
+ tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp,
+ SZ_4K);
if (!tmp_page)
return NULL;
@@ -745,7 +747,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
tmp = 0ULL;
if (!try_cmpxchg64(&pte->val, &tmp, pteval))
/* Someone else set it while we were thinking; use theirs. */
- iommu_free_page(tmp_page);
+ iommu_free_pages(tmp_page);
else
domain_flush_cache(domain, pte, sizeof(*pte));
}
@@ -858,7 +860,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
- iommu_free_page(level_pte);
+ iommu_free_pages(level_pte);
}
next:
pfn += level_size(level);
@@ -882,7 +884,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- iommu_free_page(domain->pgd);
+ iommu_free_pages(domain->pgd);
domain->pgd = NULL;
}
}
@@ -894,18 +896,16 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
The 'pte' argument is the *parent* PTE, pointing to the page that is to
be freed. */
static void dma_pte_list_pagetables(struct dmar_domain *domain,
- int level, struct dma_pte *pte,
- struct list_head *freelist)
+ int level, struct dma_pte *parent_pte,
+ struct iommu_pages_list *freelist)
{
- struct page *pg;
+ struct dma_pte *pte = phys_to_virt(dma_pte_addr(parent_pte));
- pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
- list_add_tail(&pg->lru, freelist);
+ iommu_pages_list_add(freelist, pte);
if (level == 1)
return;
- pte = page_address(pg);
do {
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
dma_pte_list_pagetables(domain, level - 1, pte, freelist);
@@ -916,7 +916,7 @@ static void dma_pte_list_pagetables(struct dmar_domain *domain,
static void dma_pte_clear_level(struct dmar_domain *domain, int level,
struct dma_pte *pte, unsigned long pfn,
unsigned long start_pfn, unsigned long last_pfn,
- struct list_head *freelist)
+ struct iommu_pages_list *freelist)
{
struct dma_pte *first_pte = NULL, *last_pte = NULL;
@@ -961,7 +961,8 @@ next:
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
- unsigned long last_pfn, struct list_head *freelist)
+ unsigned long last_pfn,
+ struct iommu_pages_list *freelist)
{
if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
WARN_ON(start_pfn > last_pfn))
@@ -973,8 +974,7 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- struct page *pgd_page = virt_to_page(domain->pgd);
- list_add_tail(&pgd_page->lru, freelist);
+ iommu_pages_list_add(freelist, domain->pgd);
domain->pgd = NULL;
}
}
@@ -984,7 +984,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
@@ -1289,52 +1289,13 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-static int iommu_init_domains(struct intel_iommu *iommu)
-{
- u32 ndomains;
-
- ndomains = cap_ndoms(iommu->cap);
- pr_debug("%s: Number of Domains supported <%d>\n",
- iommu->name, ndomains);
-
- spin_lock_init(&iommu->lock);
-
- iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL);
- if (!iommu->domain_ids)
- return -ENOMEM;
-
- /*
- * If Caching mode is set, then invalid translations are tagged
- * with domain-id 0, hence we need to pre-allocate it. We also
- * use domain-id 0 as a marker for non-allocated domain-id, so
- * make sure it is not used for a real domain.
- */
- set_bit(0, iommu->domain_ids);
-
- /*
- * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
- * entry for first-level or pass-through translation modes should
- * be programmed with a domain id different from those used for
- * second-level or nested translation. We reserve a domain id for
- * this purpose. This domain id is also used for identity domain
- * in legacy mode.
- */
- set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
-
- return 0;
-}
-
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- if (!iommu->domain_ids)
- return;
-
/*
* All iommu domains must have been detached from the devices,
* hence there should be no domain IDs in use.
*/
- if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
- > NUM_RESERVED_DID))
+ if (WARN_ON(!ida_is_empty(&iommu->domain_ida)))
return;
if (iommu->gcmd & DMA_GCMD_TE)
@@ -1343,11 +1304,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
- if (iommu->domain_ids) {
- bitmap_free(iommu->domain_ids);
- iommu->domain_ids = NULL;
- }
-
if (iommu->copied_tables) {
bitmap_free(iommu->copied_tables);
iommu->copied_tables = NULL;
@@ -1380,7 +1336,6 @@ static bool first_level_by_default(struct intel_iommu *iommu)
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{
struct iommu_domain_info *info, *curr;
- unsigned long ndomains;
int num, ret = -ENOSPC;
if (domain->domain.type == IOMMU_DOMAIN_SVA)
@@ -1390,40 +1345,36 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
if (!info)
return -ENOMEM;
- spin_lock(&iommu->lock);
+ guard(mutex)(&iommu->did_lock);
curr = xa_load(&domain->iommu_array, iommu->seq_id);
if (curr) {
curr->refcnt++;
- spin_unlock(&iommu->lock);
kfree(info);
return 0;
}
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
+ num = ida_alloc_range(&iommu->domain_ida, IDA_START_DID,
+ cap_ndoms(iommu->cap) - 1, GFP_KERNEL);
+ if (num < 0) {
pr_err("%s: No free domain ids\n", iommu->name);
goto err_unlock;
}
- set_bit(num, iommu->domain_ids);
info->refcnt = 1;
info->did = num;
info->iommu = iommu;
curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
- NULL, info, GFP_ATOMIC);
+ NULL, info, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
goto err_clear;
}
- spin_unlock(&iommu->lock);
return 0;
err_clear:
- clear_bit(info->did, iommu->domain_ids);
+ ida_free(&iommu->domain_ida, info->did);
err_unlock:
- spin_unlock(&iommu->lock);
kfree(info);
return ret;
}
@@ -1435,21 +1386,21 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
if (domain->domain.type == IOMMU_DOMAIN_SVA)
return;
- spin_lock(&iommu->lock);
+ guard(mutex)(&iommu->did_lock);
info = xa_load(&domain->iommu_array, iommu->seq_id);
if (--info->refcnt == 0) {
- clear_bit(info->did, iommu->domain_ids);
+ ida_free(&iommu->domain_ida, info->did);
xa_erase(&domain->iommu_array, iommu->seq_id);
domain->nid = NUMA_NO_NODE;
kfree(info);
}
- spin_unlock(&iommu->lock);
}
static void domain_exit(struct dmar_domain *domain)
{
if (domain->pgd) {
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist =
+ IOMMU_PAGES_LIST_INIT(freelist);
domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
iommu_put_pages_list(&freelist);
@@ -1681,9 +1632,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
- attr |= DMA_FL_PTE_PRESENT;
if (domain->use_first_level) {
- attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
+ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
if (prot & DMA_PTE_WRITE)
attr |= DMA_FL_PTE_DIRTY;
}
@@ -1859,6 +1809,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
return ret;
info->domain = domain;
+ info->domain_attached = true;
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&domain->lock, flags);
@@ -2027,7 +1978,8 @@ static int copy_context_table(struct intel_iommu *iommu,
if (!old_ce)
goto out;
- new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
+ new_ce = iommu_alloc_pages_node_sz(iommu->node,
+ GFP_KERNEL, SZ_4K);
if (!new_ce)
goto out_unmap;
@@ -2042,7 +1994,7 @@ static int copy_context_table(struct intel_iommu *iommu,
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
- set_bit(did, iommu->domain_ids);
+ ida_alloc_range(&iommu->domain_ida, did, did, GFP_KERNEL);
set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
@@ -2169,11 +2121,6 @@ static int __init init_dmars(void)
}
intel_iommu_init_qi(iommu);
-
- ret = iommu_init_domains(iommu);
- if (ret)
- goto free_iommu;
-
init_translation_status(iommu);
if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
@@ -2651,9 +2598,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- ret = iommu_init_domains(iommu);
- if (ret == 0)
- ret = iommu_alloc_root_entry(iommu);
+ ret = iommu_alloc_root_entry(iommu);
if (ret)
goto out;
@@ -2744,7 +2689,6 @@ static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev)
struct device *tmp;
int i;
- dev = pci_physfn(dev);
rcu_read_lock();
list_for_each_entry_rcu(satcu, &dmar_satc_units, list) {
@@ -2761,15 +2705,16 @@ out:
return satcu;
}
-static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
+static bool dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
{
- int i, ret = 1;
- struct pci_bus *bus;
struct pci_dev *bridge = NULL;
- struct device *tmp;
- struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
struct dmar_satc_unit *satcu;
+ struct acpi_dmar_atsr *atsr;
+ bool supported = true;
+ struct pci_bus *bus;
+ struct device *tmp;
+ int i;
dev = pci_physfn(dev);
satcu = dmar_find_matched_satc_unit(dev);
@@ -2787,11 +2732,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
bridge = bus->self;
/* If it's an integrated device, allow ATS */
if (!bridge)
- return 1;
+ return true;
/* Connected via non-PCIe: no ATS */
if (!pci_is_pcie(bridge) ||
pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
+ return false;
/* If we found the root port, look it up in the ATSR */
if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
break;
@@ -2810,11 +2755,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
if (atsru->include_all)
goto out;
}
- ret = 0;
+ supported = false;
out:
rcu_read_unlock();
- return ret;
+ return supported;
}
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
@@ -2972,9 +2917,14 @@ static ssize_t domains_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
- return sysfs_emit(buf, "%d\n",
- bitmap_weight(iommu->domain_ids,
- cap_ndoms(iommu->cap)));
+ unsigned int count = 0;
+ int id;
+
+ for (id = 0; id < cap_ndoms(iommu->cap); id++)
+ if (ida_exists(&iommu->domain_ida, id))
+ count++;
+
+ return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR_RO(domains_used);
@@ -3257,6 +3207,10 @@ void device_block_translation(struct device *dev)
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
+ /* Device in DMA blocking state. Noting to do. */
+ if (!info->domain_attached)
+ return;
+
if (info->domain)
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
@@ -3268,6 +3222,9 @@ void device_block_translation(struct device *dev)
domain_context_clear(info);
}
+ /* Device now in DMA blocking state. */
+ info->domain_attached = false;
+
if (!info->domain)
return;
@@ -3282,6 +3239,9 @@ void device_block_translation(struct device *dev)
static int blocking_domain_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ iopf_for_domain_remove(info->domain ? &info->domain->domain : NULL, dev);
device_block_translation(dev);
return 0;
}
@@ -3360,7 +3320,7 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
/* always allocate the top pgd */
- domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
+ domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K);
if (!domain->pgd) {
kfree(domain);
return ERR_PTR(-ENOMEM);
@@ -3492,7 +3452,15 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
if (ret)
return ret;
- return dmar_domain_attach_device(to_dmar_domain(domain), dev);
+ ret = iopf_for_domain_set(domain, dev);
+ if (ret)
+ return ret;
+
+ ret = dmar_domain_attach_device(to_dmar_domain(domain), dev);
+ if (ret)
+ iopf_for_domain_remove(domain, dev);
+
+ return ret;
}
static int intel_iommu_map(struct iommu_domain *domain,
@@ -3603,7 +3571,8 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
cache_tag_flush_range(to_dmar_domain(domain), gather->start,
- gather->end, list_empty(&gather->freelist));
+ gather->end,
+ iommu_pages_list_empty(&gather->freelist));
iommu_put_pages_list(&gather->freelist);
}
@@ -3918,6 +3887,8 @@ int intel_iommu_enable_iopf(struct device *dev)
if (!info->pri_enabled)
return -ENODEV;
+ /* pri_enabled is protected by the group mutex. */
+ iommu_group_mutex_assert(dev);
if (info->iopf_refcount) {
info->iopf_refcount++;
return 0;
@@ -3940,43 +3911,13 @@ void intel_iommu_disable_iopf(struct device *dev)
if (WARN_ON(!info->pri_enabled || !info->iopf_refcount))
return;
+ iommu_group_mutex_assert(dev);
if (--info->iopf_refcount)
return;
iopf_queue_remove_device(iommu->iopf_queue, dev);
}
-static int
-intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
-{
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return intel_iommu_enable_iopf(dev);
-
- case IOMMU_DEV_FEAT_SVA:
- return 0;
-
- default:
- return -ENODEV;
- }
-}
-
-static int
-intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
-{
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- intel_iommu_disable_iopf(dev);
- return 0;
-
- case IOMMU_DEV_FEAT_SVA:
- return 0;
-
- default:
- return -ENODEV;
- }
-}
-
static bool intel_iommu_is_attach_deferred(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -4050,6 +3991,7 @@ static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ iopf_for_domain_remove(old, dev);
intel_pasid_tear_down_entry(info->iommu, dev, pasid, false);
domain_remove_dev_pasid(old, dev, pasid);
@@ -4123,6 +4065,10 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
+ ret = iopf_for_domain_replace(domain, old, dev);
+ if (ret)
+ goto out_remove_dev_pasid;
+
if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid, old);
@@ -4130,7 +4076,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
ret = domain_setup_second_level(iommu, dmar_domain,
dev, pasid, old);
if (ret)
- goto out_remove_dev_pasid;
+ goto out_unwind_iopf;
domain_remove_dev_pasid(old, dev, pasid);
@@ -4138,6 +4084,8 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
return 0;
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
@@ -4352,11 +4300,19 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device
if (dev_is_real_dma_subdevice(dev))
return 0;
+ /*
+ * No PRI support with the global identity domain. No need to enable or
+ * disable PRI in this path as the iommu has been put in the blocking
+ * state.
+ */
if (sm_supported(iommu))
ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
else
ret = device_setup_pass_through(dev);
+ if (!ret)
+ info->domain_attached = true;
+
return ret;
}
@@ -4371,10 +4327,16 @@ static int identity_domain_set_dev_pasid(struct iommu_domain *domain,
if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
return -EOPNOTSUPP;
- ret = domain_setup_passthrough(iommu, dev, pasid, old);
+ ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
return ret;
+ ret = domain_setup_passthrough(iommu, dev, pasid, old);
+ if (ret) {
+ iopf_for_domain_replace(old, domain, dev);
+ return ret;
+ }
+
domain_remove_dev_pasid(old, dev, pasid);
return 0;
}
@@ -4401,8 +4363,6 @@ const struct iommu_ops intel_iommu_ops = {
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.device_group = intel_iommu_device_group,
- .dev_enable_feat = intel_iommu_dev_enable_feat,
- .dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index c4916886da5a..3ddbcc603de2 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -493,14 +493,13 @@ struct q_inval {
/* Page Request Queue depth */
#define PRQ_ORDER 4
-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
-#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
+#define PRQ_SIZE (SZ_4K << PRQ_ORDER)
+#define PRQ_RING_MASK (PRQ_SIZE - 0x20)
+#define PRQ_DEPTH (PRQ_SIZE >> 5)
struct dmar_pci_notify_info;
#ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
@@ -722,7 +721,9 @@ struct intel_iommu {
unsigned char name[16]; /* Device Name */
#ifdef CONFIG_INTEL_IOMMU
- unsigned long *domain_ids; /* bitmap of domains */
+ /* mutex to protect domain_ida */
+ struct mutex did_lock;
+ struct ida domain_ida; /* domain id allocator */
unsigned long *copied_tables; /* bitmap of copied tables */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
@@ -773,6 +774,7 @@ struct device_domain_info {
u8 ats_supported:1;
u8 ats_enabled:1;
u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
+ u8 domain_attached:1; /* Device has domain attached */
u8 ats_qdep;
unsigned int iopf_refcount;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
@@ -809,11 +811,22 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
}
/*
- * Domain ID reserved for pasid entries programmed for first-level
- * only and pass-through transfer modes.
+ * Domain ID 0 and 1 are reserved:
+ *
+ * If Caching mode is set, then invalid translations are tagged
+ * with domain-id 0, hence we need to pre-allocate it. We also
+ * use domain-id 0 as a marker for non-allocated domain-id, so
+ * make sure it is not used for a real domain.
+ *
+ * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
+ * entry for first-level or pass-through translation modes should
+ * be programmed with a domain id different from those used for
+ * second-level or nested translation. We reserve a domain id for
+ * this purpose. This domain id is also used for identity domain
+ * in legacy mode.
*/
#define FLPT_DEFAULT_DID 1
-#define NUM_RESERVED_DID 2
+#define IDA_START_DID 2
/* Retrieve the domain ID which has allocated to the domain */
static inline u16
@@ -1298,6 +1311,39 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid);
int intel_iommu_enable_iopf(struct device *dev);
void intel_iommu_disable_iopf(struct device *dev);
+static inline int iopf_for_domain_set(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (!domain || !domain->iopf_handler)
+ return 0;
+
+ return intel_iommu_enable_iopf(dev);
+}
+
+static inline void iopf_for_domain_remove(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (!domain || !domain->iopf_handler)
+ return;
+
+ intel_iommu_disable_iopf(dev);
+}
+
+static inline int iopf_for_domain_replace(struct iommu_domain *new,
+ struct iommu_domain *old,
+ struct device *dev)
+{
+ int ret;
+
+ ret = iopf_for_domain_set(new, dev);
+ if (ret)
+ return ret;
+
+ iopf_for_domain_remove(old, dev);
+
+ return 0;
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 3bc2a03cceca..cf7b6882ec75 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -530,11 +530,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (!ir_table)
return -ENOMEM;
- ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
- INTR_REMAP_PAGE_ORDER);
+ /* 1MB - maximum possible interrupt remapping table size */
+ ir_table_base =
+ iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M);
if (!ir_table_base) {
- pr_err("IR%d: failed to allocate pages of order %d\n",
- iommu->seq_id, INTR_REMAP_PAGE_ORDER);
+ pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id);
goto out_free_table;
}
@@ -612,7 +612,7 @@ out_free_fwnode:
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
- iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(ir_table_base);
out_free_table:
kfree(ir_table);
@@ -633,7 +633,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
irq_domain_free_fwnode(fn);
iommu->ir_domain = NULL;
}
- iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(iommu->ir_table->base);
bitmap_free(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index 6ac5c534bef4..fc312f649f9e 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -27,8 +27,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
unsigned long flags;
int ret = 0;
- if (info->domain)
- device_block_translation(dev);
+ device_block_translation(dev);
if (iommu->agaw < dmar_domain->s2_domain->agaw) {
dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
@@ -56,17 +55,24 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
if (ret)
goto detach_iommu;
+ ret = iopf_for_domain_set(domain, dev);
+ if (ret)
+ goto unassign_tag;
+
ret = intel_pasid_setup_nested(iommu, dev,
IOMMU_NO_PASID, dmar_domain);
if (ret)
- goto unassign_tag;
+ goto disable_iopf;
info->domain = dmar_domain;
+ info->domain_attached = true;
spin_lock_irqsave(&dmar_domain->lock, flags);
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return 0;
+disable_iopf:
+ iopf_for_domain_remove(domain, dev);
unassign_tag:
cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
detach_iommu:
@@ -166,14 +172,20 @@ static int intel_nested_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
- ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
+ ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
goto out_remove_dev_pasid;
+ ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
+ if (ret)
+ goto out_unwind_iopf;
+
domain_remove_dev_pasid(old, dev, pasid);
return 0;
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 7ee18bb48bd4..ac67a056b6c8 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -60,14 +60,14 @@ int intel_pasid_alloc_table(struct device *dev)
size = max_pasid >> (PASID_PDE_SHIFT - 3);
order = size ? get_order(size) : 0;
- dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order);
+ dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL,
+ 1 << (order + PAGE_SHIFT));
if (!dir) {
kfree(pasid_table);
return -ENOMEM;
}
pasid_table->table = dir;
- pasid_table->order = order;
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
info->pasid_table = pasid_table;
@@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev)
max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
for (i = 0; i < max_pde; i++) {
table = get_pasid_table_from_pde(&dir[i]);
- iommu_free_page(table);
+ iommu_free_pages(table);
}
- iommu_free_pages(pasid_table->table, pasid_table->order);
+ iommu_free_pages(pasid_table->table);
kfree(pasid_table);
}
@@ -148,7 +148,8 @@ retry:
if (!entries) {
u64 tmp;
- entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC);
+ entries = iommu_alloc_pages_node_sz(info->iommu->node,
+ GFP_ATOMIC, SZ_4K);
if (!entries)
return NULL;
@@ -161,7 +162,7 @@ retry:
tmp = 0ULL;
if (!try_cmpxchg64(&dir[dir_index].val, &tmp,
(u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
- iommu_free_page(entries);
+ iommu_free_pages(entries);
goto retry;
}
if (!ecap_coherent(info->iommu->ecap)) {
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 668d8ece6b14..fd0fd1a0df84 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -47,7 +47,6 @@ struct pasid_entry {
/* The representative of a PASID table */
struct pasid_table {
void *table; /* pasid table pointer */
- int order; /* page order of pasid table */
u32 max_pasid; /* max pasid */
};
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 5b6a64d96850..52570e42a14c 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -290,7 +290,8 @@ int intel_iommu_enable_prq(struct intel_iommu *iommu)
struct iopf_queue *iopfq;
int irq, ret;
- iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
+ iommu->prq =
+ iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE);
if (!iommu->prq) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
@@ -340,7 +341,7 @@ free_hwirq:
dmar_free_hwirq(irq);
iommu->pr_irq = 0;
free_prq:
- iommu_free_pages(iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return ret;
@@ -363,7 +364,7 @@ int intel_iommu_finish_prq(struct intel_iommu *iommu)
iommu->iopf_queue = NULL;
}
- iommu_free_pages(iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return 0;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index ba93123cb4eb..f3da596410b5 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -164,18 +164,23 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
+ ret = iopf_for_domain_replace(domain, old, dev);
+ if (ret)
+ goto out_remove_dev_pasid;
+
/* Setup the pasid table: */
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
ret = __domain_setup_first_level(iommu, dev, pasid,
FLPT_DEFAULT_DID, mm->pgd,
sflags, old);
if (ret)
- goto out_remove_dev_pasid;
+ goto out_unwind_iopf;
domain_remove_dev_pasid(old, dev, pasid);
return 0;
-
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 7632c80edea6..96425e92f313 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
+#include <linux/device/faux.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -251,8 +252,6 @@ static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
(data->start_level == 1) && (oas == 40);
}
-static bool selftest_running = false;
-
static dma_addr_t __arm_lpae_dma_addr(void *pages)
{
return (dma_addr_t)virt_to_phys(pages);
@@ -263,16 +262,20 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *cookie)
{
struct device *dev = cfg->iommu_dev;
- int order = get_order(size);
+ size_t alloc_size;
dma_addr_t dma;
void *pages;
- VM_BUG_ON((gfp & __GFP_HIGHMEM));
-
+ /*
+ * For very small starting-level translation tables the HW requires a
+ * minimum alignment of at least 64 to cover all cases.
+ */
+ alloc_size = max(size, 64);
if (cfg->alloc)
- pages = cfg->alloc(cookie, size, gfp);
+ pages = cfg->alloc(cookie, alloc_size, gfp);
else
- pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
+ pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp,
+ alloc_size);
if (!pages)
return NULL;
@@ -300,7 +303,7 @@ out_free:
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, order);
+ iommu_free_pages(pages);
return NULL;
}
@@ -316,7 +319,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, get_order(size));
+ iommu_free_pages(pages);
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
@@ -371,7 +374,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
for (i = 0; i < num_entries; i++)
if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
/*
@@ -473,7 +476,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
cptep = iopte_deref(pte, data);
} else if (pte) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
}
@@ -641,8 +644,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
ptep += unmap_idx_start;
pte = READ_ONCE(*ptep);
- if (WARN_ON(!pte))
- return 0;
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
+ return -ENOENT;
+ }
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
@@ -652,8 +657,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* Find and handle non-leaf entries */
for (i = 0; i < num_entries; i++) {
pte = READ_ONCE(ptep[i]);
- if (WARN_ON(!pte))
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
break;
+ }
if (!iopte_leaf(pte, lvl, iop->fmt)) {
__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
@@ -968,7 +975,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
- IO_PGTABLE_QUIRK_ARM_HD))
+ IO_PGTABLE_QUIRK_ARM_HD |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1069,7 +1077,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1310,7 +1319,6 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
#define __FAIL(ops, i) ({ \
WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
arm_lpae_dump_ops(ops); \
- selftest_running = false; \
-EFAULT; \
})
@@ -1326,8 +1334,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
size_t size, mapped;
struct io_pgtable_ops *ops;
- selftest_running = true;
-
for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
cfg_cookie = cfg;
ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
@@ -1416,7 +1422,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
free_io_pgtable_ops(ops);
}
- selftest_running = false;
return 0;
}
@@ -1433,15 +1438,18 @@ static int __init arm_lpae_do_selftests(void)
};
int i, j, k, pass = 0, fail = 0;
- struct device dev;
+ struct faux_device *dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.coherent_walk = true,
- .iommu_dev = &dev,
+ .quirks = IO_PGTABLE_QUIRK_NO_WARN,
};
- /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
- set_dev_node(&dev, NUMA_NO_NODE);
+ dev = faux_device_create("io-pgtable-test", NULL, 0);
+ if (!dev)
+ return -ENOMEM;
+
+ cfg.iommu_dev = &dev->dev;
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
@@ -1461,6 +1469,8 @@ static int __init arm_lpae_do_selftests(void)
}
pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
+ faux_device_destroy(dev);
+
return fail ? -EFAULT : 0;
}
subsys_initcall(arm_lpae_do_selftests);
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 06aca9ab52f9..679bda104797 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -107,14 +107,6 @@ static phys_addr_t iopte_to_paddr(dart_iopte pte,
return paddr;
}
-static void *__dart_alloc_pages(size_t size, gfp_t gfp)
-{
- int order = get_order(size);
-
- VM_BUG_ON((gfp & __GFP_HIGHMEM));
- return iommu_alloc_pages(gfp, order);
-}
-
static int dart_init_pte(struct dart_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
dart_iopte prot, int num_entries,
@@ -256,13 +248,13 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
/* no L2 table present */
if (!pte) {
- cptep = __dart_alloc_pages(tblsz, gfp);
+ cptep = iommu_alloc_pages_sz(gfp, tblsz);
if (!cptep)
return -ENOMEM;
pte = dart_install_table(cptep, ptep, 0, data);
if (pte)
- iommu_free_pages(cptep, get_order(tblsz));
+ iommu_free_pages(cptep);
/* L2 table is present (now) */
pte = READ_ONCE(*ptep);
@@ -413,7 +405,8 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
- data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
+ data->pgd[i] =
+ iommu_alloc_pages_sz(GFP_KERNEL, DART_GRANULE(data));
if (!data->pgd[i])
goto out_free_data;
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
@@ -423,8 +416,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
out_free_data:
while (--i >= 0) {
- iommu_free_pages(data->pgd[i],
- get_order(DART_GRANULE(data)));
+ iommu_free_pages(data->pgd[i]);
}
kfree(data);
return NULL;
@@ -433,7 +425,6 @@ out_free_data:
static void apple_dart_free_pgtable(struct io_pgtable *iop)
{
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
- int order = get_order(DART_GRANULE(data));
dart_iopte *ptep, *end;
int i;
@@ -445,9 +436,9 @@ static void apple_dart_free_pgtable(struct io_pgtable *iop)
dart_iopte pte = *ptep++;
if (pte)
- iommu_free_pages(iopte_deref(pte, data), order);
+ iommu_free_pages(iopte_deref(pte, data));
}
- iommu_free_pages(data->pgd[i], order);
+ iommu_free_pages(data->pgd[i]);
}
kfree(data);
diff --git a/drivers/iommu/iommu-pages.c b/drivers/iommu/iommu-pages.c
new file mode 100644
index 000000000000..238c09e5166b
--- /dev/null
+++ b/drivers/iommu/iommu-pages.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#include "iommu-pages.h"
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+#define IOPTDESC_MATCH(pg_elm, elm) \
+ static_assert(offsetof(struct page, pg_elm) == \
+ offsetof(struct ioptdesc, elm))
+IOPTDESC_MATCH(flags, __page_flags);
+IOPTDESC_MATCH(lru, iopt_freelist_elm); /* Ensure bit 0 is clear */
+IOPTDESC_MATCH(mapping, __page_mapping);
+IOPTDESC_MATCH(private, _private);
+IOPTDESC_MATCH(page_type, __page_type);
+IOPTDESC_MATCH(_refcount, __page_refcount);
+#ifdef CONFIG_MEMCG
+IOPTDESC_MATCH(memcg_data, memcg_data);
+#endif
+#undef IOPTDESC_MATCH
+static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
+
+/**
+ * iommu_alloc_pages_node_sz - Allocate a zeroed page of a given size from
+ * specific NUMA node
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ * @size: Memory size to allocate, rounded up to a power of 2
+ *
+ * Returns the virtual address of the allocated page. The page must be freed
+ * either by calling iommu_free_pages() or via iommu_put_pages_list(). The
+ * returned allocation is round_up_pow_two(size) big, and is physically aligned
+ * to its size.
+ */
+void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size)
+{
+ unsigned long pgcnt;
+ struct folio *folio;
+ unsigned int order;
+
+ /* This uses page_address() on the memory. */
+ if (WARN_ON(gfp & __GFP_HIGHMEM))
+ return NULL;
+
+ /*
+ * Currently sub page allocations result in a full page being returned.
+ */
+ order = get_order(size);
+
+ /*
+ * __folio_alloc_node() does not handle NUMA_NO_NODE like
+ * alloc_pages_node() did.
+ */
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid);
+ if (unlikely(!folio))
+ return NULL;
+
+ /*
+ * All page allocations that should be reported to as "iommu-pagetables"
+ * to userspace must use one of the functions below. This includes
+ * allocations of page-tables and other per-iommu_domain configuration
+ * structures.
+ *
+ * This is necessary for the proper accounting as IOMMU state can be
+ * rather large, i.e. multiple gigabytes in size.
+ */
+ pgcnt = 1UL << order;
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt);
+
+ return folio_address(folio);
+}
+EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz);
+
+static void __iommu_free_desc(struct ioptdesc *iopt)
+{
+ struct folio *folio = ioptdesc_folio(iopt);
+ const unsigned long pgcnt = 1UL << folio_order(folio);
+
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt);
+ folio_put(folio);
+}
+
+/**
+ * iommu_free_pages - free pages
+ * @virt: virtual address of the page to be freed.
+ *
+ * The page must have have been allocated by iommu_alloc_pages_node_sz()
+ */
+void iommu_free_pages(void *virt)
+{
+ if (!virt)
+ return;
+ __iommu_free_desc(virt_to_ioptdesc(virt));
+}
+EXPORT_SYMBOL_GPL(iommu_free_pages);
+
+/**
+ * iommu_put_pages_list - free a list of pages.
+ * @list: The list of pages to be freed
+ *
+ * Frees a list of pages allocated by iommu_alloc_pages_node_sz(). On return the
+ * passed list is invalid, the caller must use IOMMU_PAGES_LIST_INIT to reinit
+ * the list if it expects to use it again.
+ */
+void iommu_put_pages_list(struct iommu_pages_list *list)
+{
+ struct ioptdesc *iopt, *tmp;
+
+ list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm)
+ __iommu_free_desc(iopt);
+}
+EXPORT_SYMBOL_GPL(iommu_put_pages_list);
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index 82ebf0033081..b3af2813ed0c 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -7,180 +7,95 @@
#ifndef __IOMMU_PAGES_H
#define __IOMMU_PAGES_H
-#include <linux/vmstat.h>
-#include <linux/gfp.h>
-#include <linux/mm.h>
-
-/*
- * All page allocations that should be reported to as "iommu-pagetables" to
- * userspace must use one of the functions below. This includes allocations of
- * page-tables and other per-iommu_domain configuration structures.
- *
- * This is necessary for the proper accounting as IOMMU state can be rather
- * large, i.e. multiple gigabytes in size.
- */
-
-/**
- * __iommu_alloc_account - account for newly allocated page.
- * @page: head struct page of the page.
- * @order: order of the page
- */
-static inline void __iommu_alloc_account(struct page *page, int order)
-{
- const long pgcnt = 1l << order;
-
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
-}
-
-/**
- * __iommu_free_account - account a page that is about to be freed.
- * @page: head struct page of the page.
- * @order: order of the page
- */
-static inline void __iommu_free_account(struct page *page, int order)
-{
- const long pgcnt = 1l << order;
-
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
-}
+#include <linux/iommu.h>
/**
- * __iommu_alloc_pages - allocate a zeroed page of a given order.
- * @gfp: buddy allocator flags
- * @order: page order
+ * struct ioptdesc - Memory descriptor for IOMMU page tables
+ * @iopt_freelist_elm: List element for a struct iommu_pages_list
*
- * returns the head struct page of the allocated page.
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
*/
-static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
+struct ioptdesc {
+ unsigned long __page_flags;
+
+ struct list_head iopt_freelist_elm;
+ unsigned long __page_mapping;
+ pgoff_t __index;
+ void *_private;
+
+ unsigned int __page_type;
+ atomic_t __page_refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#endif
+};
+
+static inline struct ioptdesc *folio_ioptdesc(struct folio *folio)
{
- struct page *page;
-
- page = alloc_pages(gfp | __GFP_ZERO, order);
- if (unlikely(!page))
- return NULL;
-
- __iommu_alloc_account(page, order);
-
- return page;
+ return (struct ioptdesc *)folio;
}
-/**
- * __iommu_free_pages - free page of a given order
- * @page: head struct page of the page
- * @order: page order
- */
-static inline void __iommu_free_pages(struct page *page, int order)
+static inline struct folio *ioptdesc_folio(struct ioptdesc *iopt)
{
- if (!page)
- return;
-
- __iommu_free_account(page, order);
- __free_pages(page, order);
+ return (struct folio *)iopt;
}
-/**
- * iommu_alloc_pages_node - allocate a zeroed page of a given order from
- * specific NUMA node.
- * @nid: memory NUMA node id
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * returns the virtual address of the allocated page
- */
-static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
+static inline struct ioptdesc *virt_to_ioptdesc(void *virt)
{
- struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
-
- if (unlikely(!page))
- return NULL;
-
- __iommu_alloc_account(page, order);
-
- return page_address(page);
+ return folio_ioptdesc(virt_to_folio(virt));
}
-/**
- * iommu_alloc_pages - allocate a zeroed page of a given order
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * returns the virtual address of the allocated page
- */
-static inline void *iommu_alloc_pages(gfp_t gfp, int order)
-{
- struct page *page = __iommu_alloc_pages(gfp, order);
-
- if (unlikely(!page))
- return NULL;
-
- return page_address(page);
-}
+void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size);
+void iommu_free_pages(void *virt);
+void iommu_put_pages_list(struct iommu_pages_list *list);
/**
- * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
- * @nid: memory NUMA node id
- * @gfp: buddy allocator flags
- *
- * returns the virtual address of the allocated page
+ * iommu_pages_list_add - add the page to a iommu_pages_list
+ * @list: List to add the page to
+ * @virt: Address returned from iommu_alloc_pages_node_sz()
*/
-static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
+static inline void iommu_pages_list_add(struct iommu_pages_list *list,
+ void *virt)
{
- return iommu_alloc_pages_node(nid, gfp, 0);
+ list_add_tail(&virt_to_ioptdesc(virt)->iopt_freelist_elm, &list->pages);
}
/**
- * iommu_alloc_page - allocate a zeroed page
- * @gfp: buddy allocator flags
+ * iommu_pages_list_splice - Put all the pages in list from into list to
+ * @from: Source list of pages
+ * @to: Destination list of pages
*
- * returns the virtual address of the allocated page
+ * from must be re-initialized after calling this function if it is to be
+ * used again.
*/
-static inline void *iommu_alloc_page(gfp_t gfp)
+static inline void iommu_pages_list_splice(struct iommu_pages_list *from,
+ struct iommu_pages_list *to)
{
- return iommu_alloc_pages(gfp, 0);
+ list_splice(&from->pages, &to->pages);
}
/**
- * iommu_free_pages - free page of a given order
- * @virt: virtual address of the page to be freed.
- * @order: page order
+ * iommu_pages_list_empty - True if the list is empty
+ * @list: List to check
*/
-static inline void iommu_free_pages(void *virt, int order)
+static inline bool iommu_pages_list_empty(struct iommu_pages_list *list)
{
- if (!virt)
- return;
-
- __iommu_free_pages(virt_to_page(virt), order);
+ return list_empty(&list->pages);
}
/**
- * iommu_free_page - free page
- * @virt: virtual address of the page to be freed.
- */
-static inline void iommu_free_page(void *virt)
-{
- iommu_free_pages(virt, 0);
-}
-
-/**
- * iommu_put_pages_list - free a list of pages.
- * @page: the head of the lru list to be freed.
+ * iommu_alloc_pages_sz - Allocate a zeroed page of a given size from
+ * specific NUMA node
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ * @size: Memory size to allocate, this is rounded up to a power of 2
*
- * There are no locking requirement for these pages, as they are going to be
- * put on a free list as soon as refcount reaches 0. Pages are put on this LRU
- * list once they are removed from the IOMMU page tables. However, they can
- * still be access through debugfs.
+ * Returns the virtual address of the allocated page.
*/
-static inline void iommu_put_pages_list(struct list_head *page)
+static inline void *iommu_alloc_pages_sz(gfp_t gfp, size_t size)
{
- while (!list_empty(page)) {
- struct page *p = list_entry(page->prev, struct page, lru);
-
- list_del(&p->lru);
- __iommu_free_account(p, 0);
- put_page(p);
- }
+ return iommu_alloc_pages_node_sz(NUMA_NO_NODE, gfp, size);
}
#endif /* __IOMMU_PAGES_H */
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index ab18bc494eef..1a51cfd82808 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -63,9 +63,6 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
* reference is taken. Caller must call iommu_sva_unbind_device()
* to release each reference.
*
- * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
- * initialize the required SVA features.
- *
* On error, returns an ERR_PTR value.
*/
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
@@ -299,15 +296,12 @@ static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_domain *domain;
- if (ops->domain_alloc_sva) {
- domain = ops->domain_alloc_sva(dev, mm);
- if (IS_ERR(domain))
- return domain;
- } else {
- domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
- if (!domain)
- return ERR_PTR(-ENOMEM);
- }
+ if (!ops->domain_alloc_sva)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = ops->domain_alloc_sva(dev, mm);
+ if (IS_ERR(domain))
+ return domain;
domain->type = IOMMU_DOMAIN_SVA;
domain->cookie_type = IOMMU_COOKIE_SVA;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 4f91a740c15f..a4b606c591da 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -277,6 +277,8 @@ int iommu_device_register(struct iommu_device *iommu,
err = bus_iommu_probe(iommu_buses[i]);
if (err)
iommu_device_unregister(iommu);
+ else
+ WRITE_ONCE(iommu->ready, true);
return err;
}
EXPORT_SYMBOL_GPL(iommu_device_register);
@@ -422,13 +424,15 @@ static int iommu_init_device(struct device *dev)
* is buried in the bus dma_configure path. Properly unpicking that is
* still a big job, so for now just invoke the whole thing. The device
* already having a driver bound means dma_configure has already run and
- * either found no IOMMU to wait for, or we're in its replay call right
- * now, so either way there's no point calling it again.
+ * found no IOMMU to wait for, so there's no point calling it again.
*/
- if (!dev->driver && dev->bus->dma_configure) {
+ if (!dev->iommu->fwspec && !dev->driver && dev->bus->dma_configure) {
mutex_unlock(&iommu_probe_device_lock);
dev->bus->dma_configure(dev);
mutex_lock(&iommu_probe_device_lock);
+ /* If another instance finished the job for us, skip it */
+ if (!dev->iommu || dev->iommu_group)
+ return -ENODEV;
}
/*
* At this point, relevant devices either now have a fwspec which will
@@ -1629,15 +1633,13 @@ static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev)
if (ops->identity_domain)
return ops->identity_domain;
- /* Older drivers create the identity domain via ops->domain_alloc() */
- if (!ops->domain_alloc)
+ if (ops->domain_alloc_identity) {
+ domain = ops->domain_alloc_identity(dev);
+ if (IS_ERR(domain))
+ return domain;
+ } else {
return ERR_PTR(-EOPNOTSUPP);
-
- domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY);
- if (IS_ERR(domain))
- return domain;
- if (!domain)
- return ERR_PTR(-ENOMEM);
+ }
iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops);
return domain;
@@ -2025,8 +2027,10 @@ __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type,
domain = ops->domain_alloc_paging(dev);
else if (ops->domain_alloc_paging_flags)
domain = ops->domain_alloc_paging_flags(dev, flags, NULL);
+#if IS_ENABLED(CONFIG_FSL_PAMU)
else if (ops->domain_alloc && !flags)
domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
+#endif
else
return ERR_PTR(-EOPNOTSUPP);
@@ -2204,6 +2208,19 @@ static void *iommu_make_pasid_array_entry(struct iommu_domain *domain,
return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN);
}
+static bool domain_iommu_ops_compatible(const struct iommu_ops *ops,
+ struct iommu_domain *domain)
+{
+ if (domain->owner == ops)
+ return true;
+
+ /* For static domains, owner isn't set. */
+ if (domain == ops->blocked_domain || domain == ops->identity_domain)
+ return true;
+
+ return false;
+}
+
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
@@ -2214,7 +2231,8 @@ static int __iommu_attach_group(struct iommu_domain *domain,
return -EBUSY;
dev = iommu_group_first_dev(group);
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+ if (!dev_has_iommu(dev) ||
+ !domain_iommu_ops_compatible(dev_iommu_ops(dev), domain))
return -EINVAL;
return __iommu_group_set_domain(group, domain);
@@ -2395,6 +2413,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
unsigned int pgsize_idx, pgsize_idx_next;
unsigned long pgsizes;
size_t offset, pgsize, pgsize_next;
+ size_t offset_end;
unsigned long addr_merge = paddr | iova;
/* Page sizes supported by the hardware and small enough for @size */
@@ -2435,7 +2454,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
* If size is big enough to accommodate the larger page, reduce
* the number of smaller pages.
*/
- if (offset + pgsize_next <= size)
+ if (!check_add_overflow(offset, pgsize_next, &offset_end) &&
+ offset_end <= size)
size = offset;
out_set_count:
@@ -2443,8 +2463,8 @@ out_set_count:
return pgsize;
}
-static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_domain_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@@ -2453,12 +2473,19 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t orig_paddr = paddr;
int ret = 0;
+ might_sleep_if(gfpflags_allow_blocking(gfp));
+
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL))
return -ENODEV;
+ /* Discourage passing strange GFP flags */
+ if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+ __GFP_HIGHMEM)))
+ return -EINVAL;
+
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
@@ -2506,31 +2533,27 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+int iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size)
{
const struct iommu_domain_ops *ops = domain->ops;
- int ret;
- might_sleep_if(gfpflags_allow_blocking(gfp));
-
- /* Discourage passing strange GFP flags */
- if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
- __GFP_HIGHMEM)))
- return -EINVAL;
+ if (!ops->iotlb_sync_map)
+ return 0;
+ return ops->iotlb_sync_map(domain, iova, size);
+}
- ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
- if (ret == 0 && ops->iotlb_sync_map) {
- ret = ops->iotlb_sync_map(domain, iova, size);
- if (ret)
- goto out_err;
- }
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ int ret;
- return ret;
+ ret = iommu_map_nosync(domain, iova, paddr, size, prot, gfp);
+ if (ret)
+ return ret;
-out_err:
- /* undo mappings already done */
- iommu_unmap(domain, iova, size);
+ ret = iommu_sync_map(domain, iova, size);
+ if (ret)
+ iommu_unmap(domain, iova, size);
return ret;
}
@@ -2618,6 +2641,25 @@ size_t iommu_unmap(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap);
+/**
+ * iommu_unmap_fast() - Remove mappings from a range of IOVA without IOTLB sync
+ * @domain: Domain to manipulate
+ * @iova: IO virtual address to start
+ * @size: Length of the range starting from @iova
+ * @iotlb_gather: range information for a pending IOTLB flush
+ *
+ * iommu_unmap_fast() will remove a translation created by iommu_map().
+ * It can't subdivide a mapping created by iommu_map(), so it should be
+ * called with IOVA ranges that match what was passed to iommu_map(). The
+ * range can aggregate contiguous iommu_map() calls so long as no individual
+ * range is split.
+ *
+ * Basically iommu_unmap_fast() is the same as iommu_unmap() but for callers
+ * which manage the IOTLB flushing externally to perform a batched sync.
+ *
+ * Returns: Number of bytes of IOVA unmapped. iova + res will be the point
+ * unmapping stopped.
+ */
size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -2630,26 +2672,17 @@ ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{
- const struct iommu_domain_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
phys_addr_t start;
unsigned int i = 0;
int ret;
- might_sleep_if(gfpflags_allow_blocking(gfp));
-
- /* Discourage passing strange GFP flags */
- if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
- __GFP_HIGHMEM)))
- return -EINVAL;
-
while (i <= nents) {
phys_addr_t s_phys = sg_phys(sg);
if (len && s_phys != start + len) {
- ret = __iommu_map(domain, iova + mapped, start,
+ ret = iommu_map_nosync(domain, iova + mapped, start,
len, prot, gfp);
-
if (ret)
goto out_err;
@@ -2672,11 +2705,10 @@ next:
sg = sg_next(sg);
}
- if (ops->iotlb_sync_map) {
- ret = ops->iotlb_sync_map(domain, iova, mapped);
- if (ret)
- goto out_err;
- }
+ ret = iommu_sync_map(domain, iova, mapped);
+ if (ret)
+ goto out_err;
+
return mapped;
out_err:
@@ -2830,31 +2862,39 @@ bool iommu_default_passthrough(void)
}
EXPORT_SYMBOL_GPL(iommu_default_passthrough);
-const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+static const struct iommu_device *iommu_from_fwnode(const struct fwnode_handle *fwnode)
{
- const struct iommu_ops *ops = NULL;
- struct iommu_device *iommu;
+ const struct iommu_device *iommu, *ret = NULL;
spin_lock(&iommu_device_lock);
list_for_each_entry(iommu, &iommu_device_list, list)
if (iommu->fwnode == fwnode) {
- ops = iommu->ops;
+ ret = iommu;
break;
}
spin_unlock(&iommu_device_lock);
- return ops;
+ return ret;
+}
+
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+{
+ const struct iommu_device *iommu = iommu_from_fwnode(fwnode);
+
+ return iommu ? iommu->ops : NULL;
}
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
{
- const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode);
+ const struct iommu_device *iommu = iommu_from_fwnode(iommu_fwnode);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!ops)
+ if (!iommu)
return driver_deferred_probe_check_state(dev);
+ if (!dev->iommu && !READ_ONCE(iommu->ready))
+ return -EPROBE_DEFER;
if (fwspec)
- return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
+ return iommu->ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
if (!dev_iommu_get(dev))
return -ENOMEM;
@@ -2908,38 +2948,6 @@ int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
-/*
- * Per device IOMMU features.
- */
-int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev_has_iommu(dev)) {
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->dev_enable_feat)
- return ops->dev_enable_feat(dev, feat);
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
-
-/*
- * The device drivers should do the necessary cleanups before calling this.
- */
-int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev_has_iommu(dev)) {
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->dev_disable_feat)
- return ops->dev_disable_feat(dev, feat);
- }
-
- return -EBUSY;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
-
/**
* iommu_setup_default_domain - Set the default_domain for the group
* @group: Group to change
@@ -3366,10 +3374,12 @@ static int __iommu_set_group_pasid(struct iommu_domain *domain,
int ret;
for_each_group_device(group, device) {
- ret = domain->ops->set_dev_pasid(domain, device->dev,
- pasid, old);
- if (ret)
- goto err_revert;
+ if (device->dev->iommu->max_pasids > 0) {
+ ret = domain->ops->set_dev_pasid(domain, device->dev,
+ pasid, old);
+ if (ret)
+ goto err_revert;
+ }
}
return 0;
@@ -3379,15 +3389,18 @@ err_revert:
for_each_group_device(group, device) {
if (device == last_gdev)
break;
- /*
- * If no old domain, undo the succeeded devices/pasid.
- * Otherwise, rollback the succeeded devices/pasid to the old
- * domain. And it is a driver bug to fail attaching with a
- * previously good domain.
- */
- if (!old || WARN_ON(old->ops->set_dev_pasid(old, device->dev,
+ if (device->dev->iommu->max_pasids > 0) {
+ /*
+ * If no old domain, undo the succeeded devices/pasid.
+ * Otherwise, rollback the succeeded devices/pasid to
+ * the old domain. And it is a driver bug to fail
+ * attaching with a previously good domain.
+ */
+ if (!old ||
+ WARN_ON(old->ops->set_dev_pasid(old, device->dev,
pasid, domain)))
- iommu_remove_dev_pasid(device->dev, pasid, domain);
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
+ }
}
return ret;
}
@@ -3398,8 +3411,10 @@ static void __iommu_remove_group_pasid(struct iommu_group *group,
{
struct group_device *device;
- for_each_group_device(group, device)
- iommu_remove_dev_pasid(device->dev, pasid, domain);
+ for_each_group_device(group, device) {
+ if (device->dev->iommu->max_pasids > 0)
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
+ }
}
/*
@@ -3435,12 +3450,19 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
!ops->blocked_domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
- if (ops != domain->owner || pasid == IOMMU_NO_PASID)
+ if (!domain_iommu_ops_compatible(ops, domain) ||
+ pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
for_each_group_device(group, device) {
- if (pasid >= device->dev->iommu->max_pasids) {
+ /*
+ * Skip PASID validation for devices without PASID support
+ * (max_pasids = 0). These devices cannot issue transactions
+ * with PASID, so they don't affect group's PASID usage.
+ */
+ if ((device->dev->iommu->max_pasids > 0) &&
+ (pasid >= device->dev->iommu->max_pasids)) {
ret = -EINVAL;
goto out_unlock;
}
@@ -3511,7 +3533,7 @@ int iommu_replace_device_pasid(struct iommu_domain *domain,
if (!domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
- if (dev_iommu_ops(dev) != domain->owner ||
+ if (!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain) ||
pasid == IOMMU_NO_PASID || !handle)
return -EINVAL;
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 2111bad72c72..86244403b532 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -221,7 +221,6 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
refcount_inc(&idev->obj.users);
/* igroup refcount moves into iommufd_device */
idev->igroup = igroup;
- mutex_init(&idev->iopf_lock);
/*
* If the caller fails after this success it must call
@@ -425,6 +424,25 @@ static int iommufd_hwpt_pasid_compat(struct iommufd_hw_pagetable *hwpt,
return 0;
}
+static bool iommufd_hwpt_compatible_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct pci_dev *pdev;
+
+ if (!hwpt->fault || !dev_is_pci(idev->dev))
+ return true;
+
+ /*
+ * Once we turn on PCI/PRI support for VF, the response failure code
+ * should not be forwarded to the hardware due to PRI being a shared
+ * resource between PF and VFs. There is no coordination for this
+ * shared capability. This waits for a vPRI reset to recover.
+ */
+ pdev = to_pci_dev(idev->dev);
+
+ return (!pdev->is_virtfn || !pci_pri_supported(pdev));
+}
+
static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev,
ioasid_t pasid)
@@ -432,6 +450,9 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle;
int rc;
+ if (!iommufd_hwpt_compatible_device(hwpt, idev))
+ return -EINVAL;
+
rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
if (rc)
return rc;
@@ -440,12 +461,6 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
if (!handle)
return -ENOMEM;
- if (hwpt->fault) {
- rc = iommufd_fault_iopf_enable(idev);
- if (rc)
- goto out_free_handle;
- }
-
handle->idev = idev;
if (pasid == IOMMU_NO_PASID)
rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
@@ -454,13 +469,10 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
rc = iommu_attach_device_pasid(hwpt->domain, idev->dev, pasid,
&handle->handle);
if (rc)
- goto out_disable_iopf;
+ goto out_free_handle;
return 0;
-out_disable_iopf:
- if (hwpt->fault)
- iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
@@ -492,10 +504,7 @@ static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
else
iommu_detach_device_pasid(hwpt->domain, idev->dev, pasid);
- if (hwpt->fault) {
- iommufd_auto_response_faults(hwpt, handle);
- iommufd_fault_iopf_disable(idev);
- }
+ iommufd_auto_response_faults(hwpt, handle);
kfree(handle);
}
@@ -507,6 +516,9 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
struct iommufd_attach_handle *handle, *old_handle;
int rc;
+ if (!iommufd_hwpt_compatible_device(hwpt, idev))
+ return -EINVAL;
+
rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
if (rc)
return rc;
@@ -517,12 +529,6 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
if (!handle)
return -ENOMEM;
- if (hwpt->fault && !old->fault) {
- rc = iommufd_fault_iopf_enable(idev);
- if (rc)
- goto out_free_handle;
- }
-
handle->idev = idev;
if (pasid == IOMMU_NO_PASID)
rc = iommu_replace_group_handle(idev->igroup->group,
@@ -531,20 +537,13 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
rc = iommu_replace_device_pasid(hwpt->domain, idev->dev,
pasid, &handle->handle);
if (rc)
- goto out_disable_iopf;
+ goto out_free_handle;
- if (old->fault) {
- iommufd_auto_response_faults(hwpt, old_handle);
- if (!hwpt->fault)
- iommufd_fault_iopf_disable(idev);
- }
+ iommufd_auto_response_faults(hwpt, old_handle);
kfree(old_handle);
return 0;
-out_disable_iopf:
- if (hwpt->fault && !old->fault)
- iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index f39cf0797347..e373b9eec7f5 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -9,8 +9,6 @@
#include <linux/iommufd.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/pci-ats.h>
#include <linux/poll.h>
#include <uapi/linux/iommufd.h>
@@ -18,50 +16,6 @@
#include "iommufd_private.h"
/* IOMMUFD_OBJ_FAULT Functions */
-
-int iommufd_fault_iopf_enable(struct iommufd_device *idev)
-{
- struct device *dev = idev->dev;
- int ret;
-
- /*
- * Once we turn on PCI/PRI support for VF, the response failure code
- * should not be forwarded to the hardware due to PRI being a shared
- * resource between PF and VFs. There is no coordination for this
- * shared capability. This waits for a vPRI reset to recover.
- */
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (pdev->is_virtfn && pci_pri_supported(pdev))
- return -EINVAL;
- }
-
- mutex_lock(&idev->iopf_lock);
- /* Device iopf has already been on. */
- if (++idev->iopf_enabled > 1) {
- mutex_unlock(&idev->iopf_lock);
- return 0;
- }
-
- ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- --idev->iopf_enabled;
- mutex_unlock(&idev->iopf_lock);
-
- return ret;
-}
-
-void iommufd_fault_iopf_disable(struct iommufd_device *idev)
-{
- mutex_lock(&idev->iopf_lock);
- if (!WARN_ON(idev->iopf_enabled == 0)) {
- if (--idev->iopf_enabled == 0)
- iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
- }
- mutex_unlock(&idev->iopf_lock);
-}
-
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle)
{
@@ -70,7 +24,7 @@ void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct list_head free_list;
unsigned long index;
- if (!fault)
+ if (!fault || !handle)
return;
INIT_LIST_HEAD(&free_list);
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 80e8c76d25f2..9ccc83341f32 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -425,9 +425,6 @@ struct iommufd_device {
/* always the physical device */
struct device *dev;
bool enforce_cache_coherency;
- /* protect iopf_enabled counter */
- struct mutex iopf_lock;
- unsigned int iopf_enabled;
};
static inline struct iommufd_device *
@@ -506,9 +503,6 @@ iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
void iommufd_fault_destroy(struct iommufd_object *obj);
int iommufd_fault_iopf_handler(struct iopf_group *group);
-
-int iommufd_fault_iopf_enable(struct iommufd_device *idev);
-void iommufd_fault_iopf_disable(struct iommufd_device *idev);
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle);
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 18d9a216eb30..6bd0abf9a641 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -58,6 +58,9 @@ enum {
MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
+static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
+static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain);
+
/*
* Syzkaller has trouble randomizing the correct iova to use since it is linked
* to the map ioctl's output, and it has no ide about that. So, simplify things.
@@ -168,6 +171,8 @@ struct mock_dev {
int id;
u32 cache[MOCK_DEV_CACHE_NUM];
atomic_t pasid_1024_fake_error;
+ unsigned int iopf_refcount;
+ struct iommu_domain *domain;
};
static inline struct mock_dev *to_mock_dev(struct device *dev)
@@ -221,6 +226,13 @@ static int mock_domain_nop_attach(struct iommu_domain *domain,
up_write(&mdev->viommu_rwsem);
}
+ rc = mock_dev_enable_iopf(dev, domain);
+ if (rc)
+ return rc;
+
+ mock_dev_disable_iopf(dev, mdev->domain);
+ mdev->domain = domain;
+
return 0;
}
@@ -229,6 +241,7 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
struct iommu_domain *old)
{
struct mock_dev *mdev = to_mock_dev(dev);
+ int rc;
/*
* Per the first attach with pasid 1024, set the
@@ -256,6 +269,12 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
}
}
+ rc = mock_dev_enable_iopf(dev, domain);
+ if (rc)
+ return rc;
+
+ mock_dev_disable_iopf(dev, old);
+
return 0;
}
@@ -610,22 +629,42 @@ static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt
{
}
-static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
+static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain)
{
- if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
+ struct mock_dev *mdev = to_mock_dev(dev);
+ int ret;
+
+ if (!domain || !domain->iopf_handler)
+ return 0;
+
+ if (!mock_iommu_iopf_queue)
return -ENODEV;
- return iopf_queue_add_device(mock_iommu_iopf_queue, dev);
+ if (mdev->iopf_refcount) {
+ mdev->iopf_refcount++;
+ return 0;
+ }
+
+ ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev);
+ if (ret)
+ return ret;
+
+ mdev->iopf_refcount = 1;
+
+ return 0;
}
-static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
+static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain)
{
- if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
- return -ENODEV;
+ struct mock_dev *mdev = to_mock_dev(dev);
- iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
+ if (!domain || !domain->iopf_handler)
+ return;
- return 0;
+ if (--mdev->iopf_refcount)
+ return;
+
+ iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
}
static void mock_viommu_destroy(struct iommufd_viommu *viommu)
@@ -770,8 +809,6 @@ static const struct iommu_ops mock_ops = {
.device_group = generic_device_group,
.probe_device = mock_probe_device,
.page_response = mock_domain_page_response,
- .dev_enable_feat = mock_dev_enable_feat,
- .dev_disable_feat = mock_dev_disable_feat,
.user_pasid_table = true,
.viommu_alloc = mock_viommu_alloc,
.default_domain_ops =
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index e424b279a8cd..90341b24a811 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1090,7 +1090,8 @@ static int ipmmu_probe(struct platform_device *pdev)
if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu))
return 0;
- ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, dev_name(&pdev->dev));
+ ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s",
+ dev_name(&pdev->dev));
if (ret)
return ret;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index df98d0c65f54..cb95fecf6016 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1550,6 +1550,31 @@ static const struct mtk_iommu_plat_data mt6795_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */
};
+static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0}, /* Region0: larb0/1 */
+ [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
+ [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */
+ 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
+ ~0, ~0, ~0, ~0, ~0},
+ [3] = {0},
+ [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
+ [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */
+};
+
+static const struct mtk_iommu_plat_data mt6893_data = {
+ .m4u_plat = M4U_MT8192,
+ .flags = HAS_BCLK | OUT_ORDER_WR_EN | HAS_SUB_COMM_2BITS |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8192_larb_region_msk,
+ .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
+ {0, 14, 16}, {0, 13, 18, 17}},
+};
+
static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
@@ -1673,17 +1698,6 @@ static const struct mtk_iommu_plat_data mt8188_data_vpp = {
27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}},
};
-static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
- [0] = {~0, ~0}, /* Region0: larb0/1 */
- [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
- [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */
- 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
- ~0, ~0, ~0, ~0, ~0},
- [3] = {0},
- [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
- [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */
-};
-
static const struct mtk_iommu_plat_data mt8192_data = {
.m4u_plat = M4U_MT8192,
.flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
@@ -1777,6 +1791,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data},
+ { .compatible = "mediatek,mt6893-iommu-mm", .data = &mt6893_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
diff --git a/drivers/iommu/riscv/Makefile b/drivers/iommu/riscv/Makefile
index f54c9ed17d41..b5929f9f23e6 100644
--- a/drivers/iommu/riscv/Makefile
+++ b/drivers/iommu/riscv/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o
+obj-y += iommu.o iommu-platform.o
obj-$(CONFIG_RISCV_IOMMU_PCI) += iommu-pci.o
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 8f049d4a0e2c..bb57092ca901 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -48,14 +48,13 @@ static DEFINE_IDA(riscv_iommu_pscids);
/* Device resource-managed allocations */
struct riscv_iommu_devres {
void *addr;
- int order;
};
static void riscv_iommu_devres_pages_release(struct device *dev, void *res)
{
struct riscv_iommu_devres *devres = res;
- iommu_free_pages(devres->addr, devres->order);
+ iommu_free_pages(devres->addr);
}
static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p)
@@ -66,13 +65,14 @@ static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p
return devres->addr == target->addr;
}
-static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
+static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu,
+ unsigned int size)
{
struct riscv_iommu_devres *devres;
void *addr;
- addr = iommu_alloc_pages_node(dev_to_node(iommu->dev),
- GFP_KERNEL_ACCOUNT, order);
+ addr = iommu_alloc_pages_node_sz(dev_to_node(iommu->dev),
+ GFP_KERNEL_ACCOUNT, size);
if (unlikely(!addr))
return NULL;
@@ -80,12 +80,11 @@ static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
sizeof(struct riscv_iommu_devres), GFP_KERNEL);
if (unlikely(!devres)) {
- iommu_free_pages(addr, order);
+ iommu_free_pages(addr);
return NULL;
}
devres->addr = addr;
- devres->order = order;
devres_add(iommu->dev, devres);
@@ -163,9 +162,9 @@ static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu,
} else {
do {
const size_t queue_size = entry_size << (logsz + 1);
- const int order = get_order(queue_size);
- queue->base = riscv_iommu_get_pages(iommu, order);
+ queue->base = riscv_iommu_get_pages(
+ iommu, max(queue_size, SZ_4K));
queue->phys = __pa(queue->base);
} while (!queue->base && logsz-- > 0);
}
@@ -620,7 +619,7 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm
break;
}
- ptr = riscv_iommu_get_pages(iommu, 0);
+ ptr = riscv_iommu_get_pages(iommu, SZ_4K);
if (!ptr)
return NULL;
@@ -700,7 +699,7 @@ static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu)
}
if (!iommu->ddt_root) {
- iommu->ddt_root = riscv_iommu_get_pages(iommu, 0);
+ iommu->ddt_root = riscv_iommu_get_pages(iommu, SZ_4K);
iommu->ddt_phys = __pa(iommu->ddt_root);
}
@@ -1087,7 +1086,8 @@ static void riscv_iommu_iotlb_sync(struct iommu_domain *iommu_domain,
#define _io_pte_entry(pn, prot) ((_PAGE_PFN_MASK & ((pn) << _PAGE_PFN_SHIFT)) | (prot))
static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain,
- unsigned long pte, struct list_head *freelist)
+ unsigned long pte,
+ struct iommu_pages_list *freelist)
{
unsigned long *ptr;
int i;
@@ -1105,9 +1105,9 @@ static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain,
}
if (freelist)
- list_add_tail(&virt_to_page(ptr)->lru, freelist);
+ iommu_pages_list_add(freelist, ptr);
else
- iommu_free_page(ptr);
+ iommu_free_pages(ptr);
}
static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain,
@@ -1144,13 +1144,14 @@ pte_retry:
* page table. This might race with other mappings, retry.
*/
if (_io_pte_none(pte)) {
- addr = iommu_alloc_page_node(domain->numa_node, gfp);
+ addr = iommu_alloc_pages_node_sz(domain->numa_node, gfp,
+ SZ_4K);
if (!addr)
return NULL;
old = pte;
pte = _io_pte_entry(virt_to_pfn(addr), _PAGE_TABLE);
if (cmpxchg_relaxed(ptr, old, pte) != old) {
- iommu_free_page(addr);
+ iommu_free_pages(addr);
goto pte_retry;
}
}
@@ -1194,7 +1195,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
unsigned long *ptr;
unsigned long pte, old, pte_prot;
int rc = 0;
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (!(prot & IOMMU_WRITE))
pte_prot = _PAGE_BASE | _PAGE_READ;
@@ -1225,7 +1226,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
*mapped = size;
- if (!list_empty(&freelist)) {
+ if (!iommu_pages_list_empty(&freelist)) {
/*
* In 1.0 spec version, the smallest scope we can use to
* invalidate all levels of page table (i.e. leaf and non-leaf)
@@ -1385,8 +1386,8 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
domain->numa_node = dev_to_node(iommu->dev);
domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD);
domain->pgd_mode = pgd_mode;
- domain->pgd_root = iommu_alloc_page_node(domain->numa_node,
- GFP_KERNEL_ACCOUNT);
+ domain->pgd_root = iommu_alloc_pages_node_sz(domain->numa_node,
+ GFP_KERNEL_ACCOUNT, SZ_4K);
if (!domain->pgd_root) {
kfree(domain);
return ERR_PTR(-ENOMEM);
@@ -1395,7 +1396,7 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1,
RISCV_IOMMU_MAX_PSCID, GFP_KERNEL);
if (domain->pscid < 0) {
- iommu_free_page(domain->pgd_root);
+ iommu_free_pages(domain->pgd_root);
kfree(domain);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index af4cc91b2bbf..22f74ba33a0e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -730,14 +730,15 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (rk_dte_is_pt_valid(dte))
goto done;
- page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
+ page_table = iommu_alloc_pages_sz(GFP_ATOMIC | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!page_table)
return ERR_PTR(-ENOMEM);
pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) {
dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n");
- iommu_free_page(page_table);
+ iommu_free_pages(page_table);
return ERR_PTR(-ENOMEM);
}
@@ -1062,7 +1063,8 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
- rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
+ rk_domain->dt = iommu_alloc_pages_sz(GFP_KERNEL | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!rk_domain->dt)
goto err_free_domain;
@@ -1086,7 +1088,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
return &rk_domain->domain;
err_free_dt:
- iommu_free_page(rk_domain->dt);
+ iommu_free_pages(rk_domain->dt);
err_free_domain:
kfree(rk_domain);
@@ -1107,13 +1109,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
u32 *page_table = phys_to_virt(pt_phys);
dma_unmap_single(rk_domain->dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
- iommu_free_page(page_table);
+ iommu_free_pages(page_table);
}
}
dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
- iommu_free_page(rk_domain->dt);
+ iommu_free_pages(rk_domain->dt);
kfree(rk_domain);
}
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index e1c76e0f9c2b..433b59f43530 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -31,10 +31,21 @@ struct s390_domain {
unsigned long *dma_table;
spinlock_t list_lock;
struct rcu_head rcu;
+ u8 origin_type;
};
static struct iommu_domain blocking_domain;
+static inline unsigned int calc_rfx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_rsx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK;
+}
+
static inline unsigned int calc_rtx(dma_addr_t ptr)
{
return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
@@ -56,6 +67,20 @@ static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
*entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
}
+static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (rso & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RFX;
+}
+
+static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (rto & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RSX;
+}
+
static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
{
*entry &= ZPCI_RTE_FLAG_MASK;
@@ -70,6 +95,22 @@ static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
*entry |= ZPCI_TABLE_TYPE_SX;
}
+static inline void validate_rf_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RFX;
+}
+
+static inline void validate_rs_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RSX;
+}
+
static inline void validate_rt_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
@@ -120,6 +161,22 @@ static inline int pt_entry_isvalid(unsigned long entry)
return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
}
+static inline unsigned long *get_rf_rso(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
+static inline unsigned long *get_rs_rto(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
static inline unsigned long *get_rt_sto(unsigned long entry)
{
if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
@@ -191,18 +248,59 @@ static void dma_free_seg_table(unsigned long entry)
dma_free_cpu_table(sto);
}
-static void dma_cleanup_tables(unsigned long *table)
+static void dma_free_rt_table(unsigned long entry)
{
+ unsigned long *rto = get_rs_rto(entry);
int rtx;
- if (!table)
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(rto[rtx]))
+ dma_free_seg_table(rto[rtx]);
+
+ dma_free_cpu_table(rto);
+}
+
+static void dma_free_rs_table(unsigned long entry)
+{
+ unsigned long *rso = get_rf_rso(entry);
+ int rsx;
+
+ for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
+ if (reg_entry_isvalid(rso[rsx]))
+ dma_free_rt_table(rso[rsx]);
+
+ dma_free_cpu_table(rso);
+}
+
+static void dma_cleanup_tables(struct s390_domain *domain)
+{
+ int rtx, rsx, rfx;
+
+ if (!domain->dma_table)
return;
- for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
- if (reg_entry_isvalid(table[rtx]))
- dma_free_seg_table(table[rtx]);
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++)
+ if (reg_entry_isvalid(domain->dma_table[rfx]))
+ dma_free_rs_table(domain->dma_table[rfx]);
+ break;
+ case ZPCI_TABLE_TYPE_RSX:
+ for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
+ if (reg_entry_isvalid(domain->dma_table[rsx]))
+ dma_free_rt_table(domain->dma_table[rsx]);
+ break;
+ case ZPCI_TABLE_TYPE_RTX:
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(domain->dma_table[rtx]))
+ dma_free_seg_table(domain->dma_table[rtx]);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
+ return;
+ }
- dma_free_cpu_table(table);
+ dma_free_cpu_table(domain->dma_table);
}
static unsigned long *dma_alloc_page_table(gfp_t gfp)
@@ -218,6 +316,70 @@ static unsigned long *dma_alloc_page_table(gfp_t gfp)
return table;
}
+static unsigned long *dma_walk_rs_table(unsigned long *rso,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned int rsx = calc_rsx(dma_addr);
+ unsigned long old_rse, rse;
+ unsigned long *rsep, *rto;
+
+ rsep = &rso[rsx];
+ rse = READ_ONCE(*rsep);
+ if (reg_entry_isvalid(rse)) {
+ rto = get_rs_rto(rse);
+ } else {
+ rto = dma_alloc_cpu_table(gfp);
+ if (!rto)
+ return NULL;
+
+ set_rs_rto(&rse, virt_to_phys(rto));
+ validate_rs_entry(&rse);
+ entry_clr_protected(&rse);
+
+ old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse);
+ if (old_rse != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(rto);
+ rto = get_rs_rto(old_rse);
+ }
+ }
+ return rto;
+}
+
+static unsigned long *dma_walk_rf_table(unsigned long *rfo,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned int rfx = calc_rfx(dma_addr);
+ unsigned long old_rfe, rfe;
+ unsigned long *rfep, *rso;
+
+ rfep = &rfo[rfx];
+ rfe = READ_ONCE(*rfep);
+ if (reg_entry_isvalid(rfe)) {
+ rso = get_rf_rso(rfe);
+ } else {
+ rso = dma_alloc_cpu_table(gfp);
+ if (!rso)
+ return NULL;
+
+ set_rf_rso(&rfe, virt_to_phys(rso));
+ validate_rf_entry(&rfe);
+ entry_clr_protected(&rfe);
+
+ old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe);
+ if (old_rfe != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(rso);
+ rso = get_rf_rso(old_rfe);
+ }
+ }
+
+ if (!rso)
+ return NULL;
+
+ return dma_walk_rs_table(rso, dma_addr, gfp);
+}
+
static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
{
unsigned long old_rte, rte;
@@ -271,11 +433,31 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
return pto;
}
-static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, gfp_t gfp)
+static unsigned long *dma_walk_region_tables(struct s390_domain *domain,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ return dma_walk_rf_table(domain->dma_table, dma_addr, gfp);
+ case ZPCI_TABLE_TYPE_RSX:
+ return dma_walk_rs_table(domain->dma_table, dma_addr, gfp);
+ case ZPCI_TABLE_TYPE_RTX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
+static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain,
+ dma_addr_t dma_addr, gfp_t gfp)
{
- unsigned long *sto, *pto;
+ unsigned long *rto, *sto, *pto;
unsigned int rtx, sx, px;
+ rto = dma_walk_region_tables(domain, dma_addr, gfp);
+ if (!rto)
+ return NULL;
+
rtx = calc_rtx(dma_addr);
sto = dma_get_seg_table_origin(&rto[rtx], gfp);
if (!sto)
@@ -329,9 +511,25 @@ static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
}
}
+static inline u64 max_tbl_size(struct s390_domain *domain)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RTX:
+ return ZPCI_TABLE_SIZE_RT - 1;
+ case ZPCI_TABLE_TYPE_RSX:
+ return ZPCI_TABLE_SIZE_RS - 1;
+ case ZPCI_TABLE_TYPE_RFX:
+ return U64_MAX;
+ default:
+ return 0;
+ }
+}
+
static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
{
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct s390_domain *s390_domain;
+ u64 aperture_size;
s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
if (!s390_domain)
@@ -342,9 +540,26 @@ static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
kfree(s390_domain);
return NULL;
}
+
+ aperture_size = min(s390_iommu_aperture,
+ zdev->end_dma - zdev->start_dma + 1);
+ if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
+ } else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) &&
+ (zdev->dtsm & ZPCI_IOTA_DT_RS)) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX;
+ } else if (zdev->dtsm & ZPCI_IOTA_DT_RF) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX;
+ } else {
+ /* Assume RTX available */
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
+ aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma;
+ }
+ zdev->end_dma = zdev->start_dma + aperture_size - 1;
+
s390_domain->domain.geometry.force_aperture = true;
s390_domain->domain.geometry.aperture_start = 0;
- s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1;
+ s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain);
spin_lock_init(&s390_domain->list_lock);
INIT_LIST_HEAD_RCU(&s390_domain->devices);
@@ -356,7 +571,7 @@ static void s390_iommu_rcu_free_domain(struct rcu_head *head)
{
struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
- dma_cleanup_tables(s390_domain->dma_table);
+ dma_cleanup_tables(s390_domain);
kfree(s390_domain);
}
@@ -381,6 +596,21 @@ static void zdev_s390_domain_update(struct zpci_dev *zdev,
spin_unlock_irqrestore(&zdev->dom_lock, flags);
}
+static u64 get_iota_region_flag(struct s390_domain *domain)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RTX:
+ return ZPCI_IOTA_RTTO_FLAG;
+ case ZPCI_TABLE_TYPE_RSX:
+ return ZPCI_IOTA_RSTO_FLAG;
+ case ZPCI_TABLE_TYPE_RFX:
+ return ZPCI_IOTA_RFTO_FLAG;
+ default:
+ WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
+ return 0;
+ }
+}
+
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@@ -399,7 +629,7 @@ static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
default:
s390_domain = to_s390_domain(domain);
iota = virt_to_phys(s390_domain->dma_table) |
- ZPCI_IOTA_RTTO_FLAG;
+ get_iota_region_flag(s390_domain);
rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
zdev->end_dma, iota, status);
}
@@ -482,6 +712,8 @@ static void s390_iommu_get_resv_regions(struct device *dev,
{
struct zpci_dev *zdev = to_zpci_dev(dev);
struct iommu_resv_region *region;
+ u64 max_size, end_resv;
+ unsigned long flags;
if (zdev->start_dma) {
region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
@@ -491,10 +723,21 @@ static void s390_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, list);
}
- if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) {
- region = iommu_alloc_resv_region(zdev->end_dma + 1,
- ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1,
- 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
+ spin_lock_irqsave(&zdev->dom_lock, flags);
+ if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) {
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+ return;
+ }
+
+ max_size = max_tbl_size(to_s390_domain(zdev->s390_domain));
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+
+ if (zdev->end_dma < max_size) {
+ end_resv = max_size - zdev->end_dma;
+ region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv,
+ 0, IOMMU_RESV_RESERVED,
+ GFP_KERNEL);
if (!region)
return;
list_add_tail(&region->list, list);
@@ -510,13 +753,9 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
zdev = to_zpci_dev(dev);
- if (zdev->start_dma > zdev->end_dma ||
- zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1)
+ if (zdev->start_dma > zdev->end_dma)
return ERR_PTR(-EINVAL);
- if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
- zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
-
if (zdev->tlb_refresh)
dev->iommu->shadow_on_flush = 1;
@@ -606,8 +845,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
int rc;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
- gfp);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
if (unlikely(!entry)) {
rc = -ENOMEM;
goto undo_cpu_trans;
@@ -622,8 +860,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
undo_cpu_trans:
while (i-- > 0) {
dma_addr -= PAGE_SIZE;
- entry = dma_walk_cpu_trans(s390_domain->dma_table,
- dma_addr, gfp);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
if (!entry)
break;
dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
@@ -640,8 +877,7 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
int rc = 0;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
- GFP_ATOMIC);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC);
if (unlikely(!entry)) {
rc = -EINVAL;
break;
@@ -685,6 +921,51 @@ static int s390_iommu_map_pages(struct iommu_domain *domain,
return rc;
}
+static unsigned long *get_rso_from_iova(struct s390_domain *domain,
+ dma_addr_t iova)
+{
+ unsigned long *rfo;
+ unsigned long rfe;
+ unsigned int rfx;
+
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ rfo = domain->dma_table;
+ rfx = calc_rfx(iova);
+ rfe = READ_ONCE(rfo[rfx]);
+ if (!reg_entry_isvalid(rfe))
+ return NULL;
+ return get_rf_rso(rfe);
+ case ZPCI_TABLE_TYPE_RSX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
+static unsigned long *get_rto_from_iova(struct s390_domain *domain,
+ dma_addr_t iova)
+{
+ unsigned long *rso;
+ unsigned long rse;
+ unsigned int rsx;
+
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ case ZPCI_TABLE_TYPE_RSX:
+ rso = get_rso_from_iova(domain, iova);
+ rsx = calc_rsx(iova);
+ rse = READ_ONCE(rso[rsx]);
+ if (!reg_entry_isvalid(rse))
+ return NULL;
+ return get_rs_rto(rse);
+ case ZPCI_TABLE_TYPE_RTX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -698,10 +979,13 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
iova > domain->geometry.aperture_end)
return 0;
+ rto = get_rto_from_iova(s390_domain, iova);
+ if (!rto)
+ return 0;
+
rtx = calc_rtx(iova);
sx = calc_sx(iova);
px = calc_px(iova);
- rto = s390_domain->dma_table;
rte = READ_ONCE(rto[rtx]);
if (reg_entry_isvalid(rte)) {
@@ -756,7 +1040,6 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
int zpci_init_iommu(struct zpci_dev *zdev)
{
- u64 aperture_size;
int rc = 0;
rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
@@ -774,12 +1057,6 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_sysfs;
- zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
- aperture_size = min3(s390_iommu_aperture,
- ZPCI_TABLE_SIZE_RT - zdev->start_dma,
- zdev->end_dma - zdev->start_dma + 1);
- zdev->end_dma = zdev->start_dma + aperture_size - 1;
-
return 0;
out_sysfs:
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 8d8f11854676..76c9620af4bb 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -690,8 +690,8 @@ sun50i_iommu_domain_alloc_paging(struct device *dev)
if (!sun50i_domain)
return NULL;
- sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(DT_SIZE));
+ sun50i_domain->dt =
+ iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, DT_SIZE);
if (!sun50i_domain->dt)
goto err_free_domain;
@@ -713,7 +713,7 @@ static void sun50i_iommu_domain_free(struct iommu_domain *domain)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
- iommu_free_pages(sun50i_domain->dt, get_order(DT_SIZE));
+ iommu_free_pages(sun50i_domain->dt);
sun50i_domain->dt = NULL;
kfree(sun50i_domain);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 69d353e1df84..61897d50162d 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -51,14 +51,17 @@ struct tegra_smmu {
struct iommu_device iommu; /* IOMMU Core code handle */
};
+struct tegra_pd;
+struct tegra_pt;
+
struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
spinlock_t lock;
u32 *count;
- struct page **pts;
- struct page *pd;
+ struct tegra_pt **pts;
+ struct tegra_pd *pd;
dma_addr_t pd_dma;
unsigned id;
u32 attr;
@@ -155,6 +158,14 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
SMMU_PDE_NONSECURE)
+struct tegra_pd {
+ u32 val[SMMU_NUM_PDE];
+};
+
+struct tegra_pt {
+ u32 val[SMMU_NUM_PTE];
+};
+
static unsigned int iova_pd_index(unsigned long iova)
{
return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
@@ -284,7 +295,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0);
+ as->pd = iommu_alloc_pages_sz(GFP_KERNEL | __GFP_DMA, SMMU_SIZE_PD);
if (!as->pd) {
kfree(as);
return NULL;
@@ -292,7 +303,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
if (!as->count) {
- __iommu_free_pages(as->pd, 0);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
@@ -300,7 +311,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
if (!as->pts) {
kfree(as->count);
- __iommu_free_pages(as->pd, 0);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
@@ -417,8 +428,8 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
goto unlock;
}
- as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
- DMA_TO_DEVICE);
+ as->pd_dma =
+ dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, as->pd_dma)) {
err = -ENOMEM;
goto unlock;
@@ -450,7 +461,7 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
return 0;
err_unmap:
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
unlock:
mutex_unlock(&smmu->lock);
@@ -469,7 +480,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
tegra_smmu_free_asid(smmu, as->id);
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
as->smmu = NULL;
@@ -548,11 +559,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
+ struct tegra_pd *pd = as->pd;
unsigned long offset = pd_index * sizeof(*pd);
/* Set the page directory entry first */
- pd[pd_index] = value;
+ pd->val[pd_index] = value;
/* The flush the page directory entry from caches */
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
@@ -564,11 +575,9 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+static u32 *tegra_smmu_pte_offset(struct tegra_pt *pt, unsigned long iova)
{
- u32 *pt = page_address(pt_page);
-
- return pt + iova_pt_index(iova);
+ return &pt->val[iova_pt_index(iova)];
}
static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
@@ -576,21 +585,19 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- struct page *pt_page;
- u32 *pd;
+ struct tegra_pt *pt;
- pt_page = as->pts[pd_index];
- if (!pt_page)
+ pt = as->pts[pd_index];
+ if (!pt)
return NULL;
- pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]);
- return tegra_smmu_pte_offset(pt_page, iova);
+ return tegra_smmu_pte_offset(pt, iova);
}
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- dma_addr_t *dmap, struct page *page)
+ dma_addr_t *dmap, struct tegra_pt *pt)
{
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
@@ -598,30 +605,28 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
if (!as->pts[pde]) {
dma_addr_t dma;
- dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
+ dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
- __iommu_free_pages(page, 0);
+ iommu_free_pages(pt);
return NULL;
}
if (!smmu_dma_addr_valid(smmu, dma)) {
- dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
- __iommu_free_pages(page, 0);
+ dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
return NULL;
}
- as->pts[pde] = page;
+ as->pts[pde] = pt;
tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
SMMU_PDE_NEXT));
*dmap = dma;
} else {
- u32 *pd = page_address(as->pd);
-
- *dmap = smmu_pde_to_dma(smmu, pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -637,7 +642,7 @@ static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
{
unsigned int pde = iova_pd_index(iova);
- struct page *page = as->pts[pde];
+ struct tegra_pt *pt = as->pts[pde];
/*
* When no entries in this page table are used anymore, return the
@@ -645,13 +650,13 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
*/
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]);
tegra_smmu_set_pde(as, iova, 0);
- dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
- __iommu_free_pages(page, 0);
+ dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
as->pts[pde] = NULL;
}
}
@@ -671,16 +676,16 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static struct page *as_get_pde_page(struct tegra_smmu_as *as,
- unsigned long iova, gfp_t gfp,
- unsigned long *flags)
+static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as,
+ unsigned long iova, gfp_t gfp,
+ unsigned long *flags)
{
unsigned int pde = iova_pd_index(iova);
- struct page *page = as->pts[pde];
+ struct tegra_pt *pt = as->pts[pde];
/* at first check whether allocation needs to be done at all */
- if (page)
- return page;
+ if (pt)
+ return pt;
/*
* In order to prevent exhaustion of the atomic memory pool, we
@@ -690,7 +695,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
if (gfpflags_allow_blocking(gfp))
spin_unlock_irqrestore(&as->lock, *flags);
- page = __iommu_alloc_pages(gfp | __GFP_DMA, 0);
+ pt = iommu_alloc_pages_sz(gfp | __GFP_DMA, SMMU_SIZE_PT);
if (gfpflags_allow_blocking(gfp))
spin_lock_irqsave(&as->lock, *flags);
@@ -701,13 +706,13 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
* if allocation succeeded and the allocation failure isn't fatal.
*/
if (as->pts[pde]) {
- if (page)
- __iommu_free_pages(page, 0);
+ if (pt)
+ iommu_free_pages(pt);
- page = as->pts[pde];
+ pt = as->pts[pde];
}
- return page;
+ return pt;
}
static int
@@ -717,15 +722,15 @@ __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
- struct page *page;
+ struct tegra_pt *pt;
u32 pte_attrs;
u32 *pte;
- page = as_get_pde_page(as, iova, gfp, flags);
- if (!page)
+ pt = as_get_pde_page(as, iova, gfp, flags);
+ if (!pt)
return -ENOMEM;
- pte = as_get_pte(as, iova, &pte_dma, page);
+ pte = as_get_pte(as, iova, &pte_dma, pt);
if (!pte)
return -ENOMEM;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index b85ce6310ddb..ecd41fb03e5a 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -48,6 +48,7 @@ struct viommu_dev {
u64 pgsize_bitmap;
u32 first_domain;
u32 last_domain;
+ u32 identity_domain_id;
/* Supported MAP flags */
u32 map_flags;
u32 probe_size;
@@ -62,7 +63,6 @@ struct viommu_mapping {
struct viommu_domain {
struct iommu_domain domain;
struct viommu_dev *viommu;
- struct mutex mutex; /* protects viommu pointer */
unsigned int id;
u32 map_flags;
@@ -70,7 +70,6 @@ struct viommu_domain {
struct rb_root_cached mappings;
unsigned long nr_endpoints;
- bool bypass;
};
struct viommu_endpoint {
@@ -97,6 +96,8 @@ struct viommu_event {
};
};
+static struct viommu_domain viommu_identity_domain;
+
#define to_viommu_domain(domain) \
container_of(domain, struct viommu_domain, domain)
@@ -305,6 +306,22 @@ out_unlock:
return ret;
}
+static int viommu_send_attach_req(struct viommu_dev *viommu, struct device *dev,
+ struct virtio_iommu_req_attach *req)
+{
+ int ret;
+ unsigned int i;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ req->endpoint = cpu_to_le32(fwspec->ids[i]);
+ ret = viommu_send_req_sync(viommu, req, sizeof(*req));
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
/*
* viommu_add_mapping - add a mapping to the internal tree
*
@@ -637,71 +654,45 @@ static void viommu_event_handler(struct virtqueue *vq)
/* IOMMU API */
-static struct iommu_domain *viommu_domain_alloc(unsigned type)
+static struct iommu_domain *viommu_domain_alloc_paging(struct device *dev)
{
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct viommu_dev *viommu = vdev->viommu;
+ unsigned long viommu_page_size;
struct viommu_domain *vdomain;
-
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
-
- vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
- if (!vdomain)
- return NULL;
-
- mutex_init(&vdomain->mutex);
- spin_lock_init(&vdomain->mappings_lock);
- vdomain->mappings = RB_ROOT_CACHED;
-
- return &vdomain->domain;
-}
-
-static int viommu_domain_finalise(struct viommu_endpoint *vdev,
- struct iommu_domain *domain)
-{
int ret;
- unsigned long viommu_page_size;
- struct viommu_dev *viommu = vdev->viommu;
- struct viommu_domain *vdomain = to_viommu_domain(domain);
viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
if (viommu_page_size > PAGE_SIZE) {
dev_err(vdev->dev,
"granule 0x%lx larger than system page size 0x%lx\n",
viommu_page_size, PAGE_SIZE);
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
- ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
- viommu->last_domain, GFP_KERNEL);
- if (ret < 0)
- return ret;
+ vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
+ if (!vdomain)
+ return ERR_PTR(-ENOMEM);
- vdomain->id = (unsigned int)ret;
+ spin_lock_init(&vdomain->mappings_lock);
+ vdomain->mappings = RB_ROOT_CACHED;
- domain->pgsize_bitmap = viommu->pgsize_bitmap;
- domain->geometry = viommu->geometry;
+ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+ viommu->last_domain, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(vdomain);
+ return ERR_PTR(ret);
+ }
- vdomain->map_flags = viommu->map_flags;
- vdomain->viommu = viommu;
+ vdomain->id = (unsigned int)ret;
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- if (virtio_has_feature(viommu->vdev,
- VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
- vdomain->bypass = true;
- return 0;
- }
+ vdomain->domain.pgsize_bitmap = viommu->pgsize_bitmap;
+ vdomain->domain.geometry = viommu->geometry;
- ret = viommu_domain_map_identity(vdev, vdomain);
- if (ret) {
- ida_free(&viommu->domain_ids, vdomain->id);
- vdomain->viommu = NULL;
- return ret;
- }
- }
+ vdomain->map_flags = viommu->map_flags;
+ vdomain->viommu = viommu;
- return 0;
+ return &vdomain->domain;
}
static void viommu_domain_free(struct iommu_domain *domain)
@@ -717,29 +708,37 @@ static void viommu_domain_free(struct iommu_domain *domain)
kfree(vdomain);
}
+static struct iommu_domain *viommu_domain_alloc_identity(struct device *dev)
+{
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct iommu_domain *domain;
+ int ret;
+
+ if (virtio_has_feature(vdev->viommu->vdev,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG))
+ return &viommu_identity_domain.domain;
+
+ domain = viommu_domain_alloc_paging(dev);
+ if (IS_ERR(domain))
+ return domain;
+
+ ret = viommu_domain_map_identity(vdev, to_viommu_domain(domain));
+ if (ret) {
+ viommu_domain_free(domain);
+ return ERR_PTR(ret);
+ }
+ return domain;
+}
+
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- int i;
int ret = 0;
struct virtio_iommu_req_attach req;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
struct viommu_domain *vdomain = to_viommu_domain(domain);
- mutex_lock(&vdomain->mutex);
- if (!vdomain->viommu) {
- /*
- * Properly initialize the domain now that we know which viommu
- * owns it.
- */
- ret = viommu_domain_finalise(vdev, domain);
- } else if (vdomain->viommu != vdev->viommu) {
- ret = -EINVAL;
- }
- mutex_unlock(&vdomain->mutex);
-
- if (ret)
- return ret;
+ if (vdomain->viommu != vdev->viommu)
+ return -EINVAL;
/*
* In the virtio-iommu device, when attaching the endpoint to a new
@@ -761,16 +760,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
.domain = cpu_to_le32(vdomain->id),
};
- if (vdomain->bypass)
- req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
-
- for (i = 0; i < fwspec->num_ids; i++) {
- req.endpoint = cpu_to_le32(fwspec->ids[i]);
-
- ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
- if (ret)
- return ret;
- }
+ ret = viommu_send_attach_req(vdomain->viommu, dev, &req);
+ if (ret)
+ return ret;
if (!vdomain->nr_endpoints) {
/*
@@ -788,6 +780,40 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0;
}
+static int viommu_attach_identity_domain(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret = 0;
+ struct virtio_iommu_req_attach req;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+ req = (struct virtio_iommu_req_attach) {
+ .head.type = VIRTIO_IOMMU_T_ATTACH,
+ .domain = cpu_to_le32(vdev->viommu->identity_domain_id),
+ .flags = cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS),
+ };
+
+ ret = viommu_send_attach_req(vdev->viommu, dev, &req);
+ if (ret)
+ return ret;
+
+ if (vdev->vdomain)
+ vdev->vdomain->nr_endpoints--;
+ vdomain->nr_endpoints++;
+ vdev->vdomain = vdomain;
+ return 0;
+}
+
+static struct viommu_domain viommu_identity_domain = {
+ .domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &(const struct iommu_domain_ops) {
+ .attach_dev = viommu_attach_identity_domain,
+ },
+ },
+};
+
static void viommu_detach_dev(struct viommu_endpoint *vdev)
{
int i;
@@ -1062,7 +1088,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_ops viommu_ops = {
.capable = viommu_capable,
- .domain_alloc = viommu_domain_alloc,
+ .domain_alloc_identity = viommu_domain_alloc_identity,
+ .domain_alloc_paging = viommu_domain_alloc_paging,
.probe_device = viommu_probe_device,
.release_device = viommu_release_device,
.device_group = viommu_device_group,
@@ -1184,6 +1211,12 @@ static int viommu_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
+ /* Reserve an ID to use as the bypass domain */
+ if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
+ viommu->identity_domain_id = viommu->first_domain;
+ viommu->first_domain++;
+ }
+
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
virtio_device_ready(vdev);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 08bb3b031f23..0d196e447142 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -166,6 +166,11 @@ config DW_APB_ICTL
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN_HIERARCHY
+config ECONET_EN751221_INTC
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
config FARADAY_FTINTC010
bool
select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 365bcea9a61f..23ca4959e6ce 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o
obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
+obj-$(CONFIG_ECONET_EN751221_INTC) += irq-econet-en751221.o
obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 552aa04ff063..e7dfcf0cda43 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -180,7 +180,7 @@ static void __init combiner_init(void __iomem *combiner_base,
if (!combiner_data)
return;
- combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
+ combiner_irq_domain = irq_domain_create_linear(of_fwnode_handle(np), nr_irq,
&combiner_irq_domain_ops, combiner_data);
if (WARN_ON(!combiner_irq_domain)) {
pr_warn("%s: irq domain init failed\n", __func__);
diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c
index dfb761e86c9c..8f300843bbca 100644
--- a/drivers/irqchip/irq-al-fic.c
+++ b/drivers/irqchip/irq-al-fic.c
@@ -65,15 +65,13 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct al_fic *fic = gc->private;
enum al_fic_state new_state;
- int ret = 0;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) &&
((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) {
pr_debug("fic doesn't support flow type %d\n", flow_type);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ?
@@ -91,16 +89,10 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
if (fic->state == AL_FIC_UNCONFIGURED) {
al_fic_set_trigger(fic, gc, new_state);
} else if (fic->state != new_state) {
- pr_debug("fic %s state already configured to %d\n",
- fic->name, fic->state);
- ret = -EINVAL;
- goto err;
+ pr_debug("fic %s state already configured to %d\n", fic->name, fic->state);
+ return -EINVAL;
}
-
-err:
- irq_gc_unlock(gc);
-
- return ret;
+ return 0;
}
static void al_fic_irq_handler(struct irq_desc *desc)
@@ -139,7 +131,7 @@ static int al_fic_register(struct device_node *node,
struct irq_chip_generic *gc;
int ret;
- fic->domain = irq_domain_add_linear(node,
+ fic->domain = irq_domain_create_linear(of_fwnode_handle(node),
NR_FIC_IRQS,
&irq_generic_chip_ops,
fic);
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index a1430ab60a8a..a5289dc26dca 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -205,15 +205,14 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv,
return -ENXIO;
}
- middle_domain = irq_domain_add_hierarchy(gic_domain, 0, 0, NULL,
- &alpine_msix_middle_domain_ops,
- priv);
+ middle_domain = irq_domain_create_hierarchy(gic_domain, 0, 0, NULL,
+ &alpine_msix_middle_domain_ops, priv);
if (!middle_domain) {
pr_err("Failed to create the MSIX middle domain\n");
return -ENOMEM;
}
- msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node),
&alpine_msix_domain_info,
middle_domain);
if (!msi_domain) {
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 974dc088c853..032d66dceb8e 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -1014,7 +1014,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
irqc->info.die_stride = off - start_off;
- irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
+ irqc->hw_domain = irq_domain_create_tree(of_fwnode_handle(node),
&aic_irq_domain_ops, irqc);
if (WARN_ON(!irqc->hw_domain))
goto err_unmap;
@@ -1067,7 +1067,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
if (is_kernel_in_hyp_mode()) {
struct irq_fwspec mi = {
- .fwnode = of_node_to_fwnode(node),
+ .fwnode = of_fwnode_handle(node),
.param_count = 3,
.param = {
[0] = AIC_FIQ, /* This is a lie */
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 2aa6a51e05d0..67b672a78862 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -348,12 +348,12 @@ static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node,
mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK;
}
- mpic->msi_inner_domain = irq_domain_add_linear(NULL, mpic->msi_doorbell_size,
+ mpic->msi_inner_domain = irq_domain_create_linear(NULL, mpic->msi_doorbell_size,
&mpic_msi_domain_ops, mpic);
if (!mpic->msi_inner_domain)
return -ENOMEM;
- mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info,
+ mpic->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node), &mpic_msi_domain_info,
mpic->msi_inner_domain);
if (!mpic->msi_domain) {
irq_domain_remove(mpic->msi_inner_domain);
@@ -492,7 +492,7 @@ static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node)
{
int base_ipi;
- mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR,
+ mpic->ipi_domain = irq_domain_create_linear(of_fwnode_handle(node), IPI_DOORBELL_NR,
&mpic_ipi_domain_ops, mpic);
if (WARN_ON(!mpic->ipi_domain))
return -ENOMEM;
@@ -546,7 +546,7 @@ static void mpic_reenable_percpu(struct mpic *mpic)
{
/* Re-enable per-CPU interrupts that were enabled before suspend */
for (irq_hw_number_t i = 0; i < MPIC_PER_CPU_IRQS_NR; i++) {
- unsigned int virq = irq_linear_revmap(mpic->domain, i);
+ unsigned int virq = irq_find_mapping(mpic->domain, i);
struct irq_data *d;
if (!virq || !irq_percpu_is_enabled(virq))
@@ -740,7 +740,7 @@ static void mpic_resume(void)
/* Re-enable interrupts */
for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) {
- unsigned int virq = irq_linear_revmap(mpic->domain, i);
+ unsigned int virq = irq_find_mapping(mpic->domain, i);
struct irq_data *d;
if (!virq)
@@ -861,7 +861,7 @@ static int __init mpic_of_init(struct device_node *node, struct device_node *par
if (!mpic_is_ipi_available(mpic))
nr_irqs = MPIC_PER_CPU_IRQS_NR;
- mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, mpic);
+ mpic->domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, &mpic_irq_ops, mpic);
if (!mpic->domain) {
pr_err("%pOF: Unable to add IRQ domain\n", node);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index 9c9fc3e2967e..87c1feb999ff 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -82,7 +82,7 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
goto err_iounmap;
}
- i2c_ic->irq_domain = irq_domain_add_linear(node, ASPEED_I2C_IC_NUM_BUS,
+ i2c_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), ASPEED_I2C_IC_NUM_BUS,
&aspeed_i2c_ic_irq_domain_ops,
NULL);
if (!i2c_ic->irq_domain) {
diff --git a/drivers/irqchip/irq-aspeed-intc.c b/drivers/irqchip/irq-aspeed-intc.c
index bd3b759b4b2c..8330221799a0 100644
--- a/drivers/irqchip/irq-aspeed-intc.c
+++ b/drivers/irqchip/irq-aspeed-intc.c
@@ -102,7 +102,7 @@ static int __init aspeed_intc_ic_of_init(struct device_node *node,
writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG);
writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG);
- intc_ic->irq_domain = irq_domain_add_linear(node, INTC_IRQS_PER_WORD,
+ intc_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), INTC_IRQS_PER_WORD,
&aspeed_intc_ic_irq_domain_ops, intc_ic);
if (!intc_ic->irq_domain) {
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index 94a7223e95df..1c7045467c48 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -165,7 +165,7 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
goto err;
}
- scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs,
+ scu_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), scu_ic->num_irqs,
&aspeed_scu_ic_domain_ops,
scu_ic);
if (!scu_ic->irq_domain) {
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
index 62ccf2c0c414..9b665b5bb531 100644
--- a/drivers/irqchip/irq-aspeed-vic.c
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -211,8 +211,8 @@ static int __init avic_of_init(struct device_node *node,
set_handle_irq(avic_handle_irq);
/* Register our domain */
- vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0,
- &avic_dom_ops, vic);
+ vic->dom = irq_domain_create_simple(of_fwnode_handle(node), NUM_IRQS, 0,
+ &avic_dom_ops, vic);
return 0;
}
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
index 92f001a5ff8d..268cc18b781f 100644
--- a/drivers/irqchip/irq-ath79-misc.c
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -147,7 +147,7 @@ static int __init ath79_misc_intc_of_init(
return -ENOMEM;
}
- domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT,
+ domain = irq_domain_create_linear(of_fwnode_handle(node), ATH79_MISC_IRQ_COUNT,
&misc_irq_domain_ops, base);
if (!domain) {
pr_err("Failed to add MISC irqdomain\n");
@@ -188,7 +188,7 @@ void __init ath79_misc_irq_init(void __iomem *regs, int irq,
else
ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
- domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT,
+ domain = irq_domain_create_legacy(NULL, ATH79_MISC_IRQ_COUNT,
irq_base, 0, &misc_irq_domain_ops, regs);
if (!domain)
panic("Failed to create MISC irqdomain");
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 4525366d16d6..3cad30a40c19 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -228,7 +228,7 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
goto err_iounmap;
}
- domain = irq_domain_add_linear(node, nchips * 32, ops, aic);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32, ops, aic);
if (!domain) {
ret = -ENOMEM;
goto err_free_aic;
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 3839ad79ad31..03aeed39a4d2 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -78,9 +78,8 @@ static int aic_retrigger(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
/* Enable interrupt on AIC5 */
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
- irq_gc_unlock(gc);
return 1;
}
@@ -106,30 +105,27 @@ static void aic_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR);
irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR);
- irq_gc_unlock(gc);
}
static void aic_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR);
irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR);
- irq_gc_unlock(gc);
}
static void aic_pm_shutdown(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
- irq_gc_unlock(gc);
}
#else
#define aic_suspend NULL
@@ -175,10 +171,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
{
struct irq_domain_chip_generic *dgc = d->gc;
struct irq_chip_generic *gc;
- unsigned long flags;
unsigned smr;
- int idx;
- int ret;
+ int idx, ret;
if (!dgc)
return -EINVAL;
@@ -194,11 +188,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
gc = dgc->gc[idx];
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock_irq)(&gc->lock);
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
- irq_gc_unlock_irqrestore(gc, flags);
return ret;
}
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index e98c2875af9e..60b00d2c3d7a 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -92,11 +92,10 @@ static void aic5_mask(struct irq_data *d)
* Disable interrupt on AIC5. We always take the lock of the
* first irq chip as all chips share the same registers.
*/
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
gc->mask_cache &= ~d->mask;
- irq_gc_unlock(bgc);
}
static void aic5_unmask(struct irq_data *d)
@@ -109,11 +108,10 @@ static void aic5_unmask(struct irq_data *d)
* Enable interrupt on AIC5. We always take the lock of the
* first irq chip as all chips share the same registers.
*/
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IECR);
gc->mask_cache |= d->mask;
- irq_gc_unlock(bgc);
}
static int aic5_retrigger(struct irq_data *d)
@@ -122,11 +120,9 @@ static int aic5_retrigger(struct irq_data *d)
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
/* Enable interrupt on AIC5 */
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
- irq_gc_unlock(bgc);
-
return 1;
}
@@ -137,14 +133,12 @@ static int aic5_set_type(struct irq_data *d, unsigned type)
unsigned int smr;
int ret;
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
ret = aic_common_set_type(d, type, &smr);
if (!ret)
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
- irq_gc_unlock(bgc);
-
return ret;
}
@@ -166,7 +160,7 @@ static void aic5_suspend(struct irq_data *d)
smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR);
}
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
for (i = 0; i < dgc->irqs_per_chip; i++) {
mask = 1 << i;
if ((mask & gc->mask_cache) == (mask & gc->wake_active))
@@ -178,7 +172,6 @@ static void aic5_suspend(struct irq_data *d)
else
irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
}
- irq_gc_unlock(bgc);
}
static void aic5_resume(struct irq_data *d)
@@ -190,7 +183,7 @@ static void aic5_resume(struct irq_data *d)
int i;
u32 mask;
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
if (smr_cache) {
irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU);
@@ -214,7 +207,6 @@ static void aic5_resume(struct irq_data *d)
else
irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
}
- irq_gc_unlock(bgc);
}
static void aic5_pm_shutdown(struct irq_data *d)
@@ -225,13 +217,12 @@ static void aic5_pm_shutdown(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
int i;
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
for (i = 0; i < dgc->irqs_per_chip; i++) {
irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
irq_reg_writel(bgc, 1, AT91_AIC5_ICCR);
}
- irq_gc_unlock(bgc);
}
#else
#define aic5_suspend NULL
@@ -277,7 +268,6 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
unsigned int *out_type)
{
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
- unsigned long flags;
unsigned smr;
int ret;
@@ -289,13 +279,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
if (ret)
return ret;
- irq_gc_lock_irqsave(bgc, flags);
+ guard(raw_spinlock_irq)(&bgc->lock);
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
- irq_gc_unlock_irqrestore(bgc, flags);
-
return ret;
}
diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c
index 4cce24233f0f..63de5ef6cf2d 100644
--- a/drivers/irqchip/irq-bcm2712-mip.c
+++ b/drivers/irqchip/irq-bcm2712-mip.c
@@ -11,7 +11,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#define MIP_INT_RAISE 0x00
#define MIP_INT_CLEAR 0x10
@@ -174,8 +174,8 @@ static int mip_init_domains(struct mip_priv *mip, struct device_node *np)
{
struct irq_domain *middle;
- middle = irq_domain_add_hierarchy(mip->parent, 0, mip->num_msis, np,
- &mip_middle_domain_ops, mip);
+ middle = irq_domain_create_hierarchy(mip->parent, 0, mip->num_msis, of_fwnode_handle(np),
+ &mip_middle_domain_ops, mip);
if (!middle)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 6c20604c2242..1e384c870350 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -144,7 +144,7 @@ static int __init armctrl_of_init(struct device_node *node,
if (!base)
panic("%pOF: unable to map IC registers\n", node);
- intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
+ intc.domain = irq_domain_create_linear(of_fwnode_handle(node), MAKE_HWIRQ(NR_BANKS, 0),
&armctrl_ops, NULL);
if (!intc.domain)
panic("%pOF: unable to create IRQ domain\n", node);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index e366257684b5..fafd1f71348e 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -325,7 +325,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
bcm2835_init_local_timer_frequency();
- intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1,
+ intc.domain = irq_domain_create_linear(of_fwnode_handle(node), LAST_IRQ + 1,
&bcm2836_arm_irqchip_intc_ops,
NULL);
if (!intc.domain)
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index 90daa274ef23..ca4e141c5bc2 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -316,7 +316,7 @@ static int __init bcm6345_l1_of_init(struct device_node *dn,
raw_spin_lock_init(&intc->lock);
- intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ intc->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * intc->n_words,
&bcm6345_l1_domain_ops,
intc);
if (!intc->domain) {
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 36e71af054e9..04fac0cc857f 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -416,7 +416,7 @@ static int __init bcm7038_l1_of_init(struct device_node *dn,
}
}
- intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ intc->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * intc->n_words,
&bcm7038_l1_domain_ops,
intc);
if (!intc->domain) {
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 1e9dab6e0d86..ff22c3104401 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -63,16 +63,15 @@ static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc)
for (idx = 0; idx < b->n_words; idx++) {
int base = idx * IRQS_PER_WORD;
- struct irq_chip_generic *gc =
- irq_get_domain_generic_chip(b->domain, base);
+ struct irq_chip_generic *gc;
unsigned long pending;
int hwirq;
- irq_gc_lock(gc);
- pending = irq_reg_readl(gc, b->stat_offset[idx]) &
- gc->mask_cache &
- data->irq_map_mask[idx];
- irq_gc_unlock(gc);
+ gc = irq_get_domain_generic_chip(b->domain, base);
+ scoped_guard (raw_spinlock, &gc->lock) {
+ pending = irq_reg_readl(gc, b->stat_offset[idx]) & gc->mask_cache &
+ data->irq_map_mask[idx];
+ }
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD)
generic_handle_domain_irq(b->domain, base + hwirq);
@@ -86,11 +85,9 @@ static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc)
struct bcm7120_l2_intc_data *b = gc->private;
struct irq_chip_type *ct = gc->chip_types;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
if (b->can_wake)
- irq_reg_writel(gc, gc->mask_cache | gc->wake_active,
- ct->regs.mask);
- irq_gc_unlock(gc);
+ irq_reg_writel(gc, gc->mask_cache | gc->wake_active, ct->regs.mask);
}
static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
@@ -98,9 +95,8 @@ static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
struct irq_chip_type *ct = gc->chip_types;
/* Restore the saved mask */
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, gc->mask_cache, ct->regs.mask);
- irq_gc_unlock(gc);
}
static int bcm7120_l2_intc_init_one(struct device_node *dn,
@@ -264,7 +260,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
goto out_free_l1_data;
}
- data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words,
+ data->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * data->n_words,
&irq_generic_chip_ops, NULL);
if (!data->domain) {
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index db4c9721fcf2..1bec5b2cd3f0 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -97,9 +97,8 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
- unsigned long flags;
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock_irqsave)(&gc->lock);
/* Save the current mask */
if (save)
b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
@@ -109,7 +108,6 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save)
irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
}
- irq_gc_unlock_irqrestore(gc, flags);
}
static void brcmstb_l2_intc_shutdown(struct irq_data *d)
@@ -127,9 +125,8 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
- unsigned long flags;
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock_irqsave)(&gc->lock);
if (ct->chip.irq_ack) {
/* Clear unmasked non-wakeup interrupts */
irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
@@ -139,7 +136,6 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
/* Restore the saved mask */
irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
- irq_gc_unlock_irqrestore(gc, flags);
}
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
@@ -182,7 +178,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
goto out_unmap;
}
- data->domain = irq_domain_add_linear(np, 32,
+ data->domain = irq_domain_create_linear(of_fwnode_handle(np), 32,
&irq_generic_chip_ops, NULL);
if (!data->domain) {
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 48c73c948ddf..c4b73ba2323b 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -184,8 +184,8 @@ static int __init _clps711x_intc_init(struct device_node *np,
clps711x_intc->ops.map = clps711x_intc_irq_map;
clps711x_intc->ops.xlate = irq_domain_xlate_onecell;
clps711x_intc->domain =
- irq_domain_add_legacy(np, ARRAY_SIZE(clps711x_irqs),
- 0, 0, &clps711x_intc->ops, NULL);
+ irq_domain_create_legacy(of_fwnode_handle(np), ARRAY_SIZE(clps711x_irqs), 0, 0,
+ &clps711x_intc->ops, NULL);
if (!clps711x_intc->domain) {
err = -ENOMEM;
goto out_irqfree;
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index a05a7501e107..66bb39e24a52 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -351,10 +351,8 @@ static int __init irqcrossbar_init(struct device_node *node,
if (err)
return err;
- domain = irq_domain_add_hierarchy(parent_domain, 0,
- cb->max_crossbar_sources,
- node, &crossbar_domain_ops,
- NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, cb->max_crossbar_sources,
+ of_fwnode_handle(node), &crossbar_domain_ops, NULL);
if (!domain) {
pr_err("%pOF: failed to allocated domain\n", node);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index 6710691e4c25..5b7150705d29 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -50,11 +50,10 @@ static void irq_ck_mask_set_bit(struct irq_data *d)
unsigned long ifr = ct->regs.mask - 8;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
*ct->mask_cache |= mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
irq_reg_writel(gc, irq_reg_readl(gc, ifr) & ~mask, ifr);
- irq_gc_unlock(gc);
}
static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base,
@@ -114,7 +113,7 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent)
return -EINVAL;
}
- root_domain = irq_domain_add_linear(node, nr_irq,
+ root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq,
&irq_generic_chip_ops, NULL);
if (!root_domain) {
pr_err("C-SKY Intc irq_domain_add failed.\n");
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index 4aebd67d4f8f..1d1f5091f26f 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -255,7 +255,7 @@ csky_mpintc_init(struct device_node *node, struct device_node *parent)
writel_relaxed(BIT(0), INTCG_base + INTCG_ICTLR);
}
- root_domain = irq_domain_add_linear(node, nr_irq, &csky_irqdomain_ops,
+ root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq, &csky_irqdomain_ops,
NULL);
if (!root_domain)
return -ENXIO;
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index d7948c55f542..00cdcc90f614 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -204,8 +204,10 @@ static int __init davinci_cp_intc_do_init(struct resource *res, unsigned int num
return irq_base;
}
- davinci_cp_intc_irq_domain = irq_domain_add_legacy(node, num_irqs, irq_base, 0,
- &davinci_cp_intc_irq_domain_ops, NULL);
+ davinci_cp_intc_irq_domain = irq_domain_create_legacy(of_fwnode_handle(node), num_irqs,
+ irq_base, 0,
+ &davinci_cp_intc_irq_domain_ops,
+ NULL);
if (!davinci_cp_intc_irq_domain) {
pr_err("%s: unable to create an interrupt domain\n", __func__);
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index 3b0d78aac13b..eb5a8de82751 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -95,7 +95,7 @@ static int __init digicolor_of_init(struct device_node *node,
regmap_write(ucregs, UC_IRQ_CONTROL, 1);
digicolor_irq_domain =
- irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
+ irq_domain_create_linear(of_fwnode_handle(node), 64, &irq_generic_chip_ops, NULL);
if (!digicolor_irq_domain) {
pr_err("%pOF: unable to create IRQ domain\n", node);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index d5c1c750c8d2..4240a0dbf627 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -101,10 +101,9 @@ static void dw_apb_ictl_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
writel_relaxed(~0, gc->reg_base + ct->regs.enable);
writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
- irq_gc_unlock(gc);
}
#else
#define dw_apb_ictl_resume NULL
@@ -173,7 +172,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
else
nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
- domain = irq_domain_add_linear(np, nrirqs, domain_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(np), nrirqs, domain_ops, NULL);
if (!domain) {
pr_err("%pOF: unable to add irq domain\n", np);
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-econet-en751221.c b/drivers/irqchip/irq-econet-en751221.c
new file mode 100644
index 000000000000..d83d5eb12795
--- /dev/null
+++ b/drivers/irqchip/irq-econet-en751221.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * EN751221 Interrupt Controller Driver.
+ *
+ * The EcoNet EN751221 Interrupt Controller is a simple interrupt controller
+ * designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can
+ * be routed to either VPE but not both, so to support per-CPU interrupts, a
+ * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In
+ * this driver, these are called "shadow interrupts". The assignment of shadow
+ * interrupts is defined by the SoC integrator when wiring the interrupt lines,
+ * so they are configurable in the device tree.
+ *
+ * If an interrupt (say 30) needs per-CPU capability, the SoC integrator
+ * allocates another IRQ number (say 29) to be its shadow. The device tree
+ * reflects this by adding the pair <30 29> to the "econet,shadow-interrupts"
+ * property.
+ *
+ * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29,
+ * telling the hardware to mask VPE#1's view of IRQ 30.
+ *
+ * Copyright (C) 2025 Caleb James DeLisle <cjd@cjdns.fr>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define IRQ_COUNT 40
+
+#define NOT_PERCPU 0xff
+#define IS_SHADOW 0xfe
+
+#define REG_MASK0 0x04
+#define REG_MASK1 0x50
+#define REG_PENDING0 0x08
+#define REG_PENDING1 0x54
+
+/**
+ * @membase: Base address of the interrupt controller registers
+ * @interrupt_shadows: Array of all interrupts, for each value,
+ * - NOT_PERCPU: This interrupt is not per-cpu, so it has no shadow
+ * - IS_SHADOW: This interrupt is a shadow of another per-cpu interrupt
+ * - else: This is a per-cpu interrupt whose shadow is the value
+ */
+static struct {
+ void __iomem *membase;
+ u8 interrupt_shadows[IRQ_COUNT];
+} econet_intc __ro_after_init;
+
+static DEFINE_RAW_SPINLOCK(irq_lock);
+
+/* IRQs must be disabled */
+static void econet_wreg(u32 reg, u32 val, u32 mask)
+{
+ u32 v;
+
+ guard(raw_spinlock)(&irq_lock);
+
+ v = ioread32(econet_intc.membase + reg);
+ v &= ~mask;
+ v |= val & mask;
+ iowrite32(v, econet_intc.membase + reg);
+}
+
+/* IRQs must be disabled */
+static void econet_chmask(u32 hwirq, bool unmask)
+{
+ u32 reg, mask;
+ u8 shadow;
+
+ /*
+ * If the IRQ is a shadow, it should never be manipulated directly.
+ * It should only be masked/unmasked as a result of the "real" per-cpu
+ * irq being manipulated by a thread running on VPE#1.
+ * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask.
+ * This is single processor only, so smp_processor_id() never exceeds 1.
+ */
+ shadow = econet_intc.interrupt_shadows[hwirq];
+ if (WARN_ON_ONCE(shadow == IS_SHADOW))
+ return;
+ else if (shadow != NOT_PERCPU && smp_processor_id() == 1)
+ hwirq = shadow;
+
+ if (hwirq >= 32) {
+ reg = REG_MASK1;
+ mask = BIT(hwirq - 32);
+ } else {
+ reg = REG_MASK0;
+ mask = BIT(hwirq);
+ }
+
+ econet_wreg(reg, unmask ? mask : 0, mask);
+}
+
+/* IRQs must be disabled */
+static void econet_intc_mask(struct irq_data *d)
+{
+ econet_chmask(d->hwirq, false);
+}
+
+/* IRQs must be disabled */
+static void econet_intc_unmask(struct irq_data *d)
+{
+ econet_chmask(d->hwirq, true);
+}
+
+static void econet_mask_all(void)
+{
+ /* IRQs are generally disabled during init, but guarding here makes it non-obligatory. */
+ guard(irqsave)();
+ econet_wreg(REG_MASK0, 0, ~0);
+ econet_wreg(REG_MASK1, 0, ~0);
+}
+
+static void econet_intc_handle_pending(struct irq_domain *d, u32 pending, u32 offset)
+{
+ int hwirq;
+
+ while (pending) {
+ hwirq = fls(pending) - 1;
+ generic_handle_domain_irq(d, hwirq + offset);
+ pending &= ~BIT(hwirq);
+ }
+}
+
+static void econet_intc_from_parent(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *domain;
+ u32 pending0, pending1;
+
+ chained_irq_enter(chip, desc);
+
+ pending0 = ioread32(econet_intc.membase + REG_PENDING0);
+ pending1 = ioread32(econet_intc.membase + REG_PENDING1);
+
+ if (unlikely(!(pending0 | pending1))) {
+ spurious_interrupt();
+ } else {
+ domain = irq_desc_get_handler_data(desc);
+ econet_intc_handle_pending(domain, pending0, 0);
+ econet_intc_handle_pending(domain, pending1, 32);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static const struct irq_chip econet_irq_chip;
+
+static int econet_intc_map(struct irq_domain *d, u32 irq, irq_hw_number_t hwirq)
+{
+ int ret;
+
+ if (hwirq >= IRQ_COUNT) {
+ pr_err("%s: hwirq %lu out of range\n", __func__, hwirq);
+ return -EINVAL;
+ } else if (econet_intc.interrupt_shadows[hwirq] == IS_SHADOW) {
+ pr_err("%s: can't map hwirq %lu, it is a shadow interrupt\n", __func__, hwirq);
+ return -EINVAL;
+ }
+
+ if (econet_intc.interrupt_shadows[hwirq] == NOT_PERCPU) {
+ irq_set_chip_and_handler(irq, &econet_irq_chip, handle_level_irq);
+ } else {
+ irq_set_chip_and_handler(irq, &econet_irq_chip, handle_percpu_devid_irq);
+ ret = irq_set_percpu_devid(irq);
+ if (ret)
+ pr_warn("%s: Failed irq_set_percpu_devid for %u: %d\n", d->name, irq, ret);
+ }
+
+ irq_set_chip_data(irq, NULL);
+ return 0;
+}
+
+static const struct irq_chip econet_irq_chip = {
+ .name = "en751221-intc",
+ .irq_unmask = econet_intc_unmask,
+ .irq_mask = econet_intc_mask,
+ .irq_mask_ack = econet_intc_mask,
+};
+
+static const struct irq_domain_ops econet_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = econet_intc_map
+};
+
+static int __init get_shadow_interrupts(struct device_node *node)
+{
+ const char *field = "econet,shadow-interrupts";
+ int num_shadows;
+
+ num_shadows = of_property_count_u32_elems(node, field);
+
+ memset(econet_intc.interrupt_shadows, NOT_PERCPU,
+ sizeof(econet_intc.interrupt_shadows));
+
+ if (num_shadows <= 0) {
+ return 0;
+ } else if (num_shadows % 2) {
+ pr_err("%pOF: %s count is odd, ignoring\n", node, field);
+ return 0;
+ }
+
+ u32 *shadows __free(kfree) = kmalloc_array(num_shadows, sizeof(u32), GFP_KERNEL);
+ if (!shadows)
+ return -ENOMEM;
+
+ if (of_property_read_u32_array(node, field, shadows, num_shadows)) {
+ pr_err("%pOF: Failed to read %s\n", node, field);
+ return -EINVAL;
+ }
+
+ for (int i = 0; i < num_shadows; i += 2) {
+ u32 shadow = shadows[i + 1];
+ u32 target = shadows[i];
+
+ if (shadow > IRQ_COUNT) {
+ pr_err("%pOF: %s[%d] shadow(%d) out of range\n",
+ node, field, i + 1, shadow);
+ continue;
+ }
+
+ if (target >= IRQ_COUNT) {
+ pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target);
+ continue;
+ }
+
+ if (econet_intc.interrupt_shadows[target] != NOT_PERCPU) {
+ pr_err("%pOF: %s[%d] target(%d) already has a shadow\n",
+ node, field, i, target);
+ continue;
+ }
+
+ if (econet_intc.interrupt_shadows[shadow] != NOT_PERCPU) {
+ pr_err("%pOF: %s[%d] shadow(%d) already has a target\n",
+ node, field, i + 1, shadow);
+ continue;
+ }
+
+ econet_intc.interrupt_shadows[target] = shadow;
+ econet_intc.interrupt_shadows[shadow] = IS_SHADOW;
+ }
+
+ return 0;
+}
+
+static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ struct resource res;
+ int ret, irq;
+
+ ret = get_shadow_interrupts(node);
+ if (ret)
+ return ret;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node);
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("%pOF: DT: Failed to get 'reg'\n", node);
+ ret = -EINVAL;
+ goto err_dispose_mapping;
+ }
+
+ if (!request_mem_region(res.start, resource_size(&res), res.name)) {
+ pr_err("%pOF: Failed to request memory\n", node);
+ ret = -EBUSY;
+ goto err_dispose_mapping;
+ }
+
+ econet_intc.membase = ioremap(res.start, resource_size(&res));
+ if (!econet_intc.membase) {
+ pr_err("%pOF: Failed to remap membase\n", node);
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ econet_mask_all();
+
+ domain = irq_domain_create_linear(of_fwnode_handle(node), IRQ_COUNT,
+ &econet_domain_ops, NULL);
+ if (!domain) {
+ pr_err("%pOF: Failed to add irqdomain\n", node);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ irq_set_chained_handler_and_data(irq, econet_intc_from_parent, domain);
+
+ return 0;
+
+err_unmap:
+ iounmap(econet_intc.membase);
+err_release:
+ release_mem_region(res.start, resource_size(&res));
+err_dispose_mapping:
+ irq_dispose_mapping(irq);
+ return ret;
+}
+
+IRQCHIP_DECLARE(econet_en751221_intc, "econet,en751221-intc", econet_intc_of_init);
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index b91c358ea6db..a59a66d79da6 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -180,8 +180,9 @@ static int __init ft010_of_init_irq(struct device_node *node,
writel(0, FT010_IRQ_MASK(f->base));
writel(0, FT010_FIQ_MASK(f->base));
- f->domain = irq_domain_add_simple(node, FT010_NUM_IRQS, 0,
- &ft010_irqdomain_ops, f);
+ f->domain = irq_domain_create_simple(of_fwnode_handle(node),
+ FT010_NUM_IRQS, 0,
+ &ft010_irqdomain_ops, f);
set_handle_irq(ft010_irqchip_handle_irq);
return 0;
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index cc6a6c1585d2..24ef5af569fe 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -26,7 +26,7 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/arm-gic-common.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
/*
* MSI_TYPER:
@@ -261,23 +261,23 @@ static struct msi_parent_ops gicv2m_msi_parent_ops = {
static __init int gicv2m_allocate_domains(struct irq_domain *parent)
{
- struct irq_domain *inner_domain;
+ struct irq_domain_info info = {
+ .ops = &gicv2m_domain_ops,
+ .parent = parent,
+ };
struct v2m_data *v2m;
v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
if (!v2m)
return 0;
- inner_domain = irq_domain_create_hierarchy(parent, 0, 0, v2m->fwnode,
- &gicv2m_domain_ops, v2m);
- if (!inner_domain) {
+ info.host_data = v2m;
+ info.fwnode = v2m->fwnode;
+
+ if (!msi_create_parent_irq_domain(&info, &gicv2m_msi_parent_ops)) {
pr_err("Failed to create GICv2m domain\n");
return -ENOMEM;
}
-
- irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- inner_domain->msi_parent_ops = &gicv2m_msi_parent_ops;
return 0;
}
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 8e87fc35f8aa..11549d85f23b 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -152,7 +152,7 @@ static void __init its_fsl_mc_of_msi_init(void)
if (!of_property_read_bool(np, "msi-controller"))
continue;
- its_fsl_mc_msi_init_one(of_node_to_fwnode(np),
+ its_fsl_mc_msi_init_one(of_fwnode_handle(np),
np->full_name);
}
}
diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
index c5a7eb1c0419..a5e110ffdd88 100644
--- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c
+++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
@@ -8,7 +8,7 @@
#include <linux/pci.h>
#include "irq-gic-common.h"
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS | \
@@ -68,17 +68,6 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev);
/*
- * @domain->msi_domain_info->hwsize contains the size of the
- * MSI[-X] domain, but vector allocation happens one by one. This
- * needs some thought when MSI comes into play as the size of MSI
- * might be unknown at domain creation time and therefore set to
- * MSI_MAX_INDEX.
- */
- msi_info = msi_get_domain_info(domain);
- if (msi_info->hwsize > nvec)
- nvec = msi_info->hwsize;
-
- /*
* Always allocate a power of 2, and special case device 0 for
* broken systems where the DevID is not wired (and all devices
* appear as DevID 0). For that reason, we generously allocate a
@@ -118,6 +107,14 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
index++;
} while (!ret);
+ if (ret) {
+ struct device_node *np = NULL;
+
+ ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id);
+ if (np)
+ of_node_put(np);
+ }
+
return ret;
}
@@ -143,14 +140,6 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = dev_id;
- /*
- * @domain->msi_domain_info->hwsize contains the size of the device
- * domain, but vector allocation happens one by one.
- */
- msi_info = msi_get_domain_info(domain);
- if (msi_info->hwsize > nvec)
- nvec = msi_info->hwsize;
-
/* Allocate at least 32 MSIs, and always as a power of 2 */
nvec = max_t(int, 32, roundup_pow_of_two(nvec));
@@ -159,6 +148,14 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
dev, nvec, info);
}
+static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
+{
+ struct msi_domain_info *msi_info;
+
+ msi_info = msi_get_domain_info(domain->parent);
+ msi_info->ops->msi_teardown(domain->parent, info);
+}
+
static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent, struct msi_domain_info *info)
{
@@ -182,6 +179,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* %MSI_MAX_INDEX.
*/
info->ops->msi_prepare = its_pci_msi_prepare;
+ info->ops->msi_teardown = its_msi_teardown;
break;
case DOMAIN_BUS_DEVICE_MSI:
case DOMAIN_BUS_WIRED_TO_MSI:
@@ -190,6 +188,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* size is also known at domain creation time.
*/
info->ops->msi_prepare = its_pmsi_prepare;
+ info->ops->msi_teardown = its_msi_teardown;
break;
default:
/* Confused. How did the lib return true? */
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 0115ad6c8259..d54fa0638dc4 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,7 +41,7 @@
#include <asm/exception.h>
#include "irq-gic-common.h"
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
@@ -125,6 +125,8 @@ struct its_node {
int vlpi_redist_offset;
};
+static DEFINE_PER_CPU(struct its_node *, local_4_1_its);
+
#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
@@ -2778,6 +2780,7 @@ static u64 inherit_vpe_l1_table_from_its(void)
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
+ *this_cpu_ptr(&local_4_1_its) = its;
return val;
}
@@ -2815,6 +2818,7 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
*mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
+ *this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu);
return val;
}
@@ -3620,8 +3624,33 @@ out:
return err;
}
+static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
+{
+ struct its_device *its_dev = info->scratchpad[0].ptr;
+
+ guard(mutex)(&its_dev->its->dev_alloc_lock);
+
+ /* If the device is shared, keep everything around */
+ if (its_dev->shared)
+ return;
+
+ /* LPIs should have been already unmapped at this stage */
+ if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map.lpi_map,
+ its_dev->event_map.nr_lpis)))
+ return;
+
+ its_lpi_free(its_dev->event_map.lpi_map,
+ its_dev->event_map.lpi_base,
+ its_dev->event_map.nr_lpis);
+
+ /* Unmap device/itt, and get rid of the tracking */
+ its_send_mapd(its_dev, 0);
+ its_free_device(its_dev);
+}
+
static struct msi_domain_ops its_msi_domain_ops = {
.msi_prepare = its_msi_prepare,
+ .msi_teardown = its_msi_teardown,
};
static int its_irq_gic_domain_alloc(struct irq_domain *domain,
@@ -3722,7 +3751,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- struct its_node *its = its_dev->its;
int i;
bitmap_release_region(its_dev->event_map.lpi_map,
@@ -3736,26 +3764,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_reset_irq_data(data);
}
- mutex_lock(&its->dev_alloc_lock);
-
- /*
- * If all interrupts have been freed, start mopping the
- * floor. This is conditioned on the device not being shared.
- */
- if (!its_dev->shared &&
- bitmap_empty(its_dev->event_map.lpi_map,
- its_dev->event_map.nr_lpis)) {
- its_lpi_free(its_dev->event_map.lpi_map,
- its_dev->event_map.lpi_base,
- its_dev->event_map.nr_lpis);
-
- /* Unmap device/itt */
- its_send_mapd(its_dev, 0);
- its_free_device(its_dev);
- }
-
- mutex_unlock(&its->dev_alloc_lock);
-
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
@@ -4180,7 +4188,7 @@ static struct irq_chip its_vpe_irq_chip = {
static struct its_node *find_4_1_its(void)
{
- static struct its_node *its = NULL;
+ struct its_node *its = *this_cpu_ptr(&local_4_1_its);
if (!its) {
list_for_each_entry(its, &its_nodes, entry) {
@@ -5118,7 +5126,12 @@ out_unmap:
static int its_init_domain(struct its_node *its)
{
- struct irq_domain *inner_domain;
+ struct irq_domain_info dom_info = {
+ .fwnode = its->fwnode_handle,
+ .ops = &its_domain_ops,
+ .domain_flags = its->msi_domain_flags,
+ .parent = its_parent,
+ };
struct msi_domain_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -5127,21 +5140,12 @@ static int its_init_domain(struct its_node *its)
info->ops = &its_msi_domain_ops;
info->data = its;
+ dom_info.host_data = info;
- inner_domain = irq_domain_create_hierarchy(its_parent,
- its->msi_domain_flags, 0,
- its->fwnode_handle, &its_domain_ops,
- info);
- if (!inner_domain) {
+ if (!msi_create_parent_irq_domain(&dom_info, &gic_v3_its_msi_parent_ops)) {
kfree(info);
return -ENOMEM;
}
-
- irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
-
- inner_domain->msi_parent_ops = &gic_v3_its_msi_parent_ops;
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
-
return 0;
}
@@ -5518,7 +5522,7 @@ static struct its_node __init *its_node_init(struct resource *res,
its->base = its_base;
its->phys_base = res->start;
its->get_msi_base = its_irq_get_msi_base;
- its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
+ its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
its->numa_node = numa_node;
its->fwnode_handle = handle;
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index 647b18e24e0c..aa11bbe8026a 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -18,7 +18,7 @@
#include <linux/irqchip/arm-gic-v3.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
struct mbi_range {
u32 spi_start;
@@ -206,17 +206,13 @@ static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
static int mbi_allocate_domain(struct irq_domain *parent)
{
- struct irq_domain *nexus_domain;
+ struct irq_domain_info info = {
+ .fwnode = parent->fwnode,
+ .ops = &mbi_domain_ops,
+ .parent = parent,
+ };
- nexus_domain = irq_domain_create_hierarchy(parent, 0, 0, parent->fwnode,
- &mbi_domain_ops, NULL);
- if (!nexus_domain)
- return -ENOMEM;
-
- irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
- nexus_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- nexus_domain->msi_parent_ops = &gic_v3_mbi_msi_parent_ops;
- return 0;
+ return msi_create_parent_irq_domain(&info, &gic_v3_mbi_msi_parent_ops) ? 0 : -ENOMEM;
}
int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 270d7a4d85a6..efc791c43d44 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1826,7 +1826,7 @@ static int partition_domain_translate(struct irq_domain *d,
ppi_idx = __gic_get_ppi_index(ppi_intid);
ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
- of_node_to_fwnode(np));
+ of_fwnode_handle(np));
if (ret < 0)
return ret;
@@ -2192,7 +2192,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
part = &parts[part_idx];
- part->partition_id = of_node_to_fwnode(child_part);
+ part->partition_id = of_fwnode_handle(child_part);
pr_info("GIC: PPI partition %pOFn[%d] { ",
child_part, part_idx);
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
index 513f6edbbe95..a8b23b507ecd 100644
--- a/drivers/irqchip/irq-goldfish-pic.c
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -101,10 +101,9 @@ static int __init goldfish_pic_of_init(struct device_node *of_node,
irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
IRQ_NOPROBE | IRQ_LEVEL, 0);
- gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
- GFPIC_IRQ_BASE, 0,
- &goldfish_irq_domain_ops,
- NULL);
+ gfpic->irq_domain = irq_domain_create_legacy(of_fwnode_handle(of_node), GFPIC_NR_IRQS,
+ GFPIC_IRQ_BASE, 0, &goldfish_irq_domain_ops,
+ NULL);
if (!gfpic->irq_domain) {
pr_err("Failed to add irqdomain!\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 31c3f70a5d5e..b7958c5a1221 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -386,10 +386,8 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
return -EINVAL;
}
- hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
- 0,
- &hip04_irq_domain_ops,
- &hip04_data);
+ hip04_data.domain = irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, irq_base, 0,
+ &hip04_irq_domain_ops, &hip04_data);
if (WARN_ON(!hip04_data.domain))
return -EINVAL;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 115bdcffab24..91b2f587119c 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -313,8 +313,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
init_8259A(0);
- domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
- &i8259A_ops, NULL);
+ domain = irq_domain_create_legacy(of_fwnode_handle(node), 16, I8259A_IRQ_BASE, 0,
+ &i8259A_ops, NULL);
if (!domain)
panic("Failed to add i8259 IRQ domain");
diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c
index 0732a0e9af62..f8324fb1fe8f 100644
--- a/drivers/irqchip/irq-idt3243x.c
+++ b/drivers/irqchip/irq-idt3243x.c
@@ -72,7 +72,7 @@ static int idt_pic_init(struct device_node *of_node, struct device_node *parent)
goto out_unmap_irq;
}
- domain = irq_domain_add_linear(of_node, IDT_PIC_NR_IRQS,
+ domain = irq_domain_create_linear(of_fwnode_handle(of_node), IDT_PIC_NR_IRQS,
&irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("Failed to add irqdomain!\n");
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 85f80bac0961..f0410d5d7315 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -372,7 +372,7 @@ static int pdc_intc_probe(struct platform_device *pdev)
priv->syswake_irq = irq;
/* Set up an IRQ domain */
- priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops,
+ priv->domain = irq_domain_create_linear(of_fwnode_handle(node), 16, &irq_generic_chip_ops,
priv);
if (unlikely(!priv->domain)) {
dev_err(&pdev->dev, "cannot add IRQ domain\n");
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 095ae8e3217e..b91f5c14b405 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -240,8 +240,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
return -ENOMEM;
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
- node, &gpcv2_irqchip_data_domain_ops, cd);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
+ of_fwnode_handle(node), &gpcv2_irqchip_data_domain_ops, cd);
if (!domain) {
iounmap(cd->gpc_base);
kfree(cd);
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index 787543d07565..5f9b204d350b 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -254,7 +254,7 @@ static int imx_intmux_probe(struct platform_device *pdev)
goto out;
}
- domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops,
+ domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &imx_intmux_domain_ops,
&data->irqchip_data[i]);
if (!domain) {
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index afbfcce3b1e3..6dc9ac48fee5 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -212,7 +212,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
/* steer all IRQs into configured channel */
writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
- data->domain = irq_domain_add_linear(np, data->reg_num * 32,
+ data->domain = irq_domain_create_linear(of_fwnode_handle(np), data->reg_num * 32,
&imx_irqsteer_domain_ops, data);
if (!data->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index 69aacdfc8bef..137da1927d14 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -24,7 +24,7 @@
#include <linux/pm_domain.h>
#include <linux/spinlock.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#define IMX_MU_CHANS 4
diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
index 3363f83bd7e9..794ecba717c9 100644
--- a/drivers/irqchip/irq-ingenic-tcu.c
+++ b/drivers/irqchip/irq-ingenic-tcu.c
@@ -52,11 +52,10 @@ static void ingenic_tcu_gc_unmask_enable_reg(struct irq_data *d)
struct regmap *map = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
regmap_write(map, ct->regs.ack, mask);
regmap_write(map, ct->regs.enable, mask);
*ct->mask_cache |= mask;
- irq_gc_unlock(gc);
}
static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d)
@@ -66,10 +65,9 @@ static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d)
struct regmap *map = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
regmap_write(map, ct->regs.disable, mask);
*ct->mask_cache &= ~mask;
- irq_gc_unlock(gc);
}
static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d)
@@ -79,10 +77,9 @@ static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d)
struct regmap *map = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
regmap_write(map, ct->regs.ack, mask);
regmap_write(map, ct->regs.disable, mask);
- irq_gc_unlock(gc);
}
static int __init ingenic_tcu_irq_init(struct device_node *np,
@@ -114,8 +111,8 @@ static int __init ingenic_tcu_irq_init(struct device_node *np,
tcu->nb_parent_irqs = irqs;
- tcu->domain = irq_domain_add_linear(np, 32, &irq_generic_chip_ops,
- NULL);
+ tcu->domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &irq_generic_chip_ops,
+ NULL);
if (!tcu->domain) {
ret = -ENOMEM;
goto err_free_tcu;
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index cee839ca627e..52393724f213 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -90,8 +90,8 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
- domain = irq_domain_add_linear(node, num_chips * 32,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), num_chips * 32,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
err = -ENOMEM;
goto out_unmap_base;
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
index f23b02f62a5c..a9a5a52b818a 100644
--- a/drivers/irqchip/irq-ixp4xx.c
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -261,7 +261,7 @@ static int __init ixp4xx_of_init_irq(struct device_node *np,
pr_crit("IXP4XX: could not ioremap interrupt controller\n");
return -ENODEV;
}
- fwnode = of_node_to_fwnode(np);
+ fwnode = of_fwnode_handle(np);
/* These chip variants have 64 interrupts */
is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") ||
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 1f613eb7b7f0..94c05cf974be 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -107,9 +107,8 @@ static int __init aic_irq_of_init(struct device_node *node,
if (ret < 0)
return ret;
- domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
- &jcore_aic_irqdomain_ops,
- &jcore_aic);
+ domain = irq_domain_create_legacy(of_fwnode_handle(node), dom_sz - min_irq, min_irq,
+ min_irq, &jcore_aic_irqdomain_ops, &jcore_aic);
if (!domain)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 37e1a03fcbb4..c9e902b7bf48 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -157,8 +157,8 @@ static int keystone_irq_probe(struct platform_device *pdev)
kirq->chip.irq_mask = keystone_irq_setmask;
kirq->chip.irq_unmask = keystone_irq_unmask;
- kirq->irqd = irq_domain_add_linear(np, KEYSTONE_N_IRQ,
- &keystone_irq_ops, kirq);
+ kirq->irqd = irq_domain_create_linear(of_fwnode_handle(np), KEYSTONE_N_IRQ,
+ &keystone_irq_ops, kirq);
if (!kirq->irqd) {
dev_err(dev, "IRQ domain registration failed\n");
return -ENODEV;
diff --git a/drivers/irqchip/irq-lan966x-oic.c b/drivers/irqchip/irq-lan966x-oic.c
index 41ac880e3b87..11d3a0ffa261 100644
--- a/drivers/irqchip/irq-lan966x-oic.c
+++ b/drivers/irqchip/irq-lan966x-oic.c
@@ -71,14 +71,12 @@ static unsigned int lan966x_oic_irq_startup(struct irq_data *data)
struct lan966x_oic_chip_regs *chip_regs = gc->private;
u32 map;
- irq_gc_lock(gc);
-
- /* Map the source interrupt to the destination */
- map = irq_reg_readl(gc, chip_regs->reg_off_map);
- map |= data->mask;
- irq_reg_writel(gc, map, chip_regs->reg_off_map);
-
- irq_gc_unlock(gc);
+ scoped_guard (raw_spinlock, &gc->lock) {
+ /* Map the source interrupt to the destination */
+ map = irq_reg_readl(gc, chip_regs->reg_off_map);
+ map |= data->mask;
+ irq_reg_writel(gc, map, chip_regs->reg_off_map);
+ }
ct->chip.irq_ack(data);
ct->chip.irq_unmask(data);
@@ -95,14 +93,12 @@ static void lan966x_oic_irq_shutdown(struct irq_data *data)
ct->chip.irq_mask(data);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
/* Unmap the interrupt */
map = irq_reg_readl(gc, chip_regs->reg_off_map);
map &= ~data->mask;
irq_reg_writel(gc, map, chip_regs->reg_off_map);
-
- irq_gc_unlock(gc);
}
static int lan966x_oic_irq_set_type(struct irq_data *data,
@@ -224,7 +220,7 @@ static int lan966x_oic_probe(struct platform_device *pdev)
.exit = lan966x_oic_chip_exit,
};
struct irq_domain_info d_info = {
- .fwnode = of_node_to_fwnode(pdev->dev.of_node),
+ .fwnode = of_fwnode_handle(pdev->dev.of_node),
.domain_flags = IRQ_DOMAIN_FLAG_DESTROY_GC,
.size = LAN966X_OIC_NR_IRQ,
.hwirq_max = LAN966X_OIC_NR_IRQ,
diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c
index 80e55955a29f..bf52dc8345f5 100644
--- a/drivers/irqchip/irq-loongarch-avec.c
+++ b/drivers/irqchip/irq-loongarch-avec.c
@@ -18,7 +18,7 @@
#include <asm/loongarch.h>
#include <asm/setup.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include "irq-loongson.h"
#define VECTORS_PER_REG 64
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index e62dab4c97fc..950bc087e388 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -100,7 +100,7 @@ static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
static int __init cpuintc_of_init(struct device_node *of_node,
struct device_node *parent)
{
- cpuintc_handle = of_node_to_fwnode(of_node);
+ cpuintc_handle = of_fwnode_handle(of_node);
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index bb79e19dfb59..b2860eb2d32c 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -554,7 +554,7 @@ static int __init eiointc_of_init(struct device_node *of_node,
priv->vec_count = VEC_COUNT;
priv->node = 0;
- priv->domain_handle = of_node_to_fwnode(of_node);
+ priv->domain_handle = of_fwnode_handle(of_node);
ret = eiointc_init(priv, parent_irq, 0);
if (ret < 0)
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index 5da02c7ad0b3..d8558eb35044 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -248,7 +248,7 @@ static int htvec_of_init(struct device_node *node,
}
err = htvec_init(res.start, resource_size(&res),
- num_parents, parent_irq, of_node_to_fwnode(node));
+ num_parents, parent_irq, of_fwnode_handle(node));
if (err < 0)
return err;
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 2b1bd4a96665..0033c2188abc 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -116,9 +116,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
u32 mask = data->mask;
- unsigned long flags;
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock)(&gc->lock);
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
@@ -137,10 +136,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
break;
default:
- irq_gc_unlock_irqrestore(gc, flags);
return -EINVAL;
}
- irq_gc_unlock_irqrestore(gc, flags);
irqd_set_trigger_type(data, type);
return 0;
@@ -157,10 +154,9 @@ static void liointc_suspend(struct irq_chip_generic *gc)
static void liointc_resume(struct irq_chip_generic *gc)
{
struct liointc_priv *priv = gc->private;
- unsigned long flags;
int i;
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock_irqsave)(&gc->lock);
/* Disable all at first */
writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
/* Restore map cache */
@@ -170,7 +166,6 @@ static void liointc_resume(struct irq_chip_generic *gc)
writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE);
/* Restore mask cache */
writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
- irq_gc_unlock_irqrestore(gc, flags);
}
static int parent_irq[LIOINTC_NUM_PARENT];
@@ -363,7 +358,7 @@ static int __init liointc_of_init(struct device_node *node,
}
err = liointc_init(res.start, resource_size(&res),
- revision, of_node_to_fwnode(node), node);
+ revision, of_fwnode_handle(node), node);
if (err < 0)
return err;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 9c62108b3ad5..a0257c7bef10 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -15,7 +15,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include "irq-loongson.h"
static int nr_pics;
@@ -243,7 +243,7 @@ static int pch_msi_of_init(struct device_node *node, struct device_node *parent)
return -EINVAL;
}
- err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_node_to_fwnode(node));
+ err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_fwnode_handle(node));
if (err < 0)
return err;
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 69efda35a8e7..62e6bf3a0611 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -392,7 +392,7 @@ static int pch_pic_of_init(struct device_node *node,
}
err = pch_pic_init(res.start, resource_size(&res), vec_base,
- parent_domain, of_node_to_fwnode(node), 0);
+ parent_domain, of_fwnode_handle(node), 0);
if (err < 0)
return err;
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index 4d70a857133f..14cca44baa14 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -210,8 +210,8 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
return -EINVAL;
}
- irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
- &lpc32xx_irq_domain_ops, irqc);
+ irqc->domain = irq_domain_create_linear(of_fwnode_handle(node), NR_LPC32XX_IC_IRQS,
+ &lpc32xx_irq_domain_ops, irqc);
if (!irqc->domain) {
pr_err("unable to add irq domain\n");
iounmap(irqc->base);
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
index 139f26b0a6ef..50a7b38381b9 100644
--- a/drivers/irqchip/irq-ls-extirq.c
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -208,8 +208,8 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent)
of_device_is_compatible(node, "fsl,ls1043a-extirq");
raw_spin_lock_init(&priv->lock);
- domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node,
- &extirq_domain_ops, priv);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, of_fwnode_handle(node),
+ &extirq_domain_ops, priv);
if (!domain) {
ret = -ENOMEM;
goto err_add_hierarchy;
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 3cb80796cc7c..84bc5e4b47cf 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -215,17 +215,17 @@ static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
{
/* Initialize MSI domain parent */
- msi_data->parent = irq_domain_add_linear(NULL,
- msi_data->irqs_num,
- &ls_scfg_msi_domain_ops,
- msi_data);
+ msi_data->parent = irq_domain_create_linear(NULL,
+ msi_data->irqs_num,
+ &ls_scfg_msi_domain_ops,
+ msi_data);
if (!msi_data->parent) {
dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
msi_data->msi_domain = pci_msi_create_irq_domain(
- of_node_to_fwnode(msi_data->pdev->dev.of_node),
+ of_fwnode_handle(msi_data->pdev->dev.of_node),
&ls_scfg_msi_domain_info,
msi_data->parent);
if (!msi_data->msi_domain) {
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
index 77a3f7dfaaf0..589d32007fca 100644
--- a/drivers/irqchip/irq-ls1x.c
+++ b/drivers/irqchip/irq-ls1x.c
@@ -126,8 +126,8 @@ static int __init ls1x_intc_of_init(struct device_node *node,
}
/* Set up an IRQ domain */
- priv->domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops,
- NULL);
+ priv->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &irq_generic_chip_ops,
+ NULL);
if (!priv->domain) {
pr_err("ls1x-irq: cannot add IRQ domain\n");
err = -ENOMEM;
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
index 5dcd94c000a2..516a3a0e359c 100644
--- a/drivers/irqchip/irq-mchp-eic.c
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -248,8 +248,9 @@ static int mchp_eic_init(struct device_node *node, struct device_node *parent)
eic->irqs[i] = irq.args[1];
}
- eic->domain = irq_domain_add_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ,
- node, &mchp_eic_domain_ops, eic);
+ eic->domain = irq_domain_create_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ,
+ of_fwnode_handle(node), &mchp_eic_domain_ops,
+ eic);
if (!eic->domain) {
pr_err("%pOF: Failed to add domain\n", node);
ret = -ENODEV;
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 0a25536a5d07..7d177626d64b 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -607,7 +607,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
domain = irq_domain_create_hierarchy(parent_domain, 0,
ctl->params->nr_hwirq,
- of_node_to_fwnode(node),
+ of_fwnode_handle(node),
&meson_gpio_irq_domain_ops,
ctl);
if (!domain) {
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 0c7ae71a0af0..ac784ef3ed4b 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -238,11 +238,9 @@ static void mips_cpu_register_ipi_domain(struct device_node *of_node)
struct cpu_ipi_domain_state *ipi_domain_state;
ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
- ipi_domain = irq_domain_add_hierarchy(irq_domain,
- IRQ_DOMAIN_FLAG_IPI_SINGLE,
- 2, of_node,
- &mips_cpu_ipi_chip_ops,
- ipi_domain_state);
+ ipi_domain = irq_domain_create_hierarchy(irq_domain, IRQ_DOMAIN_FLAG_IPI_SINGLE, 2,
+ of_fwnode_handle(of_node),
+ &mips_cpu_ipi_chip_ops, ipi_domain_state);
if (!ipi_domain)
panic("Failed to add MIPS CPU IPI domain");
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
@@ -260,9 +258,8 @@ static void __init __mips_cpu_irq_init(struct device_node *of_node)
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP);
- irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
- &mips_cpu_intc_irq_domain_ops,
- NULL);
+ irq_domain = irq_domain_create_legacy(of_fwnode_handle(of_node), 8, MIPS_CPU_IRQ_BASE, 0,
+ &mips_cpu_intc_irq_domain_ops, NULL);
if (!irq_domain)
panic("Failed to add irqdomain for MIPS CPU");
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index bca8053864b2..34e8d09c12a0 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -841,10 +841,10 @@ static int gic_register_ipi_domain(struct device_node *node)
struct irq_domain *gic_ipi_domain;
unsigned int v[2], num_ipis;
- gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
- IRQ_DOMAIN_FLAG_IPI_PER_CPU,
- GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
- node, &gic_ipi_domain_ops, NULL);
+ gic_ipi_domain = irq_domain_create_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU,
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+ of_fwnode_handle(node), &gic_ipi_domain_ops,
+ NULL);
if (!gic_ipi_domain) {
pr_err("Failed to add IPI domain");
return -ENXIO;
@@ -963,9 +963,10 @@ static int __init gic_of_init(struct device_node *node,
gic_irq_dispatch);
}
- gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
- gic_shared_intrs, 0,
- &gic_irq_domain_ops, NULL);
+ gic_irq_domain = irq_domain_create_simple(of_fwnode_handle(node),
+ GIC_NUM_LOCAL_INTRS +
+ gic_shared_intrs, 0,
+ &gic_irq_domain_ops, NULL);
if (!gic_irq_domain) {
pr_err("Failed to add IRQ domain");
return -ENXIO;
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 25cf4f80e767..09e640430208 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -261,9 +261,9 @@ static int __init mmp_init_bases(struct device_node *node)
}
icu_data[0].virq_base = 0;
- icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
- &mmp_irq_domain_ops,
- &icu_data[0]);
+ icu_data[0].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[0]);
for (irq = 0; irq < nr_irqs; irq++) {
ret = irq_create_mapping(icu_data[0].domain, irq);
if (!ret) {
@@ -391,9 +391,9 @@ static int __init mmp2_mux_of_init(struct device_node *node,
return -EINVAL;
icu_data[i].virq_base = 0;
- icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
- &mmp_irq_domain_ops,
- &icu_data[i]);
+ icu_data[i].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[i]);
for (irq = 0; irq < nr_irqs; irq++) {
ret = irq_create_mapping(icu_data[i].domain, irq);
if (!ret) {
diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c
index 3dc745b14caf..8cbc191f750b 100644
--- a/drivers/irqchip/irq-mscc-ocelot.c
+++ b/drivers/irqchip/irq-mscc-ocelot.c
@@ -83,7 +83,7 @@ static void ocelot_irq_unmask(struct irq_data *data)
unsigned int mask = data->mask;
u32 val;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
/*
* Clear sticky bits for edge mode interrupts.
* Serval has only one trigger register replication, but the adjacent
@@ -97,7 +97,6 @@ static void ocelot_irq_unmask(struct irq_data *data)
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, mask, p->reg_off_ena_set);
- irq_gc_unlock(gc);
}
static void ocelot_irq_handler(struct irq_desc *desc)
@@ -132,8 +131,8 @@ static int __init vcoreiii_irq_init(struct device_node *node,
if (!parent_irq)
return -EINVAL;
- domain = irq_domain_add_linear(node, p->n_irq,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), p->n_irq,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("%pOFn: unable to add irq domain\n", node);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index 51464c6257f3..246c30205af4 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -4,7 +4,7 @@
#include <linux/export.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
/**
* msi_lib_init_dev_msi_info - Domain info setup for MSI domains
@@ -105,8 +105,13 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* MSI message into the hardware which is the whole purpose of the
* device MSI domain aside of mask/unmask which is provided e.g. by
* PCI/MSI device domains.
+ *
+ * The exception to the rule is when the underlying domain
+ * tells you that affinity is not a thing -- for example when
+ * everything is muxed behind a single interrupt.
*/
- chip->irq_set_affinity = msi_domain_set_affinity;
+ if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY))
+ chip->irq_set_affinity = msi_domain_set_affinity;
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
diff --git a/drivers/irqchip/irq-msi-lib.h b/drivers/irqchip/irq-msi-lib.h
deleted file mode 100644
index 681ceabb7bc7..000000000000
--- a/drivers/irqchip/irq-msi-lib.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-// Copyright (C) 2022 Linutronix GmbH
-// Copyright (C) 2022 Intel
-
-#ifndef _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
-#define _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
-
-#include <linux/bits.h>
-#include <linux/irqdomain.h>
-#include <linux/msi.h>
-
-#ifdef CONFIG_PCI_MSI
-#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
-#else
-#define MATCH_PCI_MSI (0)
-#endif
-
-#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
-
-int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
-
-bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
- struct irq_domain *real_parent,
- struct msi_domain_info *info);
-
-#endif /* _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H */
diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c
index f6133ae28155..9643cc3a77d7 100644
--- a/drivers/irqchip/irq-mst-intc.c
+++ b/drivers/irqchip/irq-mst-intc.c
@@ -273,8 +273,8 @@ static int __init mst_intc_of_init(struct device_node *dn,
raw_spin_lock_init(&cd->lock);
cd->irq_start = irq_start;
cd->nr_irqs = irq_end - irq_start + 1;
- domain = irq_domain_add_hierarchy(domain_parent, 0, cd->nr_irqs, dn,
- &mst_intc_domain_ops, cd);
+ domain = irq_domain_create_hierarchy(domain_parent, 0, cd->nr_irqs, of_fwnode_handle(dn),
+ &mst_intc_domain_ops, cd);
if (!domain) {
iounmap(cd->base);
kfree(cd);
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
index 76bc0283e3b9..de481ba340f8 100644
--- a/drivers/irqchip/irq-mtk-cirq.c
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -336,9 +336,8 @@ static int __init mtk_cirq_of_init(struct device_node *node,
cirq_data->offsets = match->data;
irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
- domain = irq_domain_add_hierarchy(domain_parent, 0,
- irq_num, node,
- &cirq_domain_ops, cirq_data);
+ domain = irq_domain_create_hierarchy(domain_parent, 0, irq_num, of_fwnode_handle(node),
+ &cirq_domain_ops, cirq_data);
if (!domain) {
ret = -ENOMEM;
goto out_unmap;
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 586e52d5442b..6895e7096b27 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -207,8 +207,8 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
chip_data->which_word[i] = word;
}
- domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node,
- &sysirq_domain_ops, chip_data);
+ domain = irq_domain_create_hierarchy(domain_parent, 0, intpol_num, of_fwnode_handle(node),
+ &sysirq_domain_ops, chip_data);
if (!domain) {
ret = -ENOMEM;
goto out_free_which_word;
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index 60b976286636..d3232d6d8dce 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -17,7 +17,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -170,9 +170,12 @@ static const struct msi_parent_ops gicp_msi_parent_ops = {
static int mvebu_gicp_probe(struct platform_device *pdev)
{
- struct irq_domain *inner_domain, *parent_domain;
struct device_node *node = pdev->dev.of_node;
struct device_node *irq_parent_dn;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &gicp_domain_ops,
+ };
struct mvebu_gicp *gicp;
int ret, i;
@@ -217,30 +220,23 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
if (!gicp->spi_bitmap)
return -ENOMEM;
+ info.size = gicp->spi_cnt;
+ info.host_data = gicp;
+
irq_parent_dn = of_irq_find_parent(node);
if (!irq_parent_dn) {
dev_err(&pdev->dev, "failed to find parent IRQ node\n");
return -ENODEV;
}
- parent_domain = irq_find_host(irq_parent_dn);
+ info.parent = irq_find_host(irq_parent_dn);
of_node_put(irq_parent_dn);
- if (!parent_domain) {
+ if (!info.parent) {
dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
return -ENODEV;
}
- inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
- gicp->spi_cnt,
- of_node_to_fwnode(node),
- &gicp_domain_ops, gicp);
- if (!inner_domain)
- return -ENOMEM;
-
- irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- inner_domain->msi_parent_ops = &gicp_msi_parent_ops;
- return 0;
+ return msi_create_parent_irq_domain(&info, &gicp_msi_parent_ops) ? 0 : -ENOMEM;
}
static const struct of_device_id mvebu_gicp_of_match[] = {
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 4eebed39880a..db5dbc6e88b0 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -20,7 +20,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include <dt-bindings/interrupt-controller/mvebu-icu.h>
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
index 54f6f0811573..e5b2bde3d933 100644
--- a/drivers/irqchip/irq-mvebu-odmi.c
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -18,7 +18,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -167,7 +167,12 @@ static const struct msi_parent_ops odmi_msi_parent_ops = {
static int __init mvebu_odmi_init(struct device_node *node,
struct device_node *parent)
{
- struct irq_domain *parent_domain, *inner_domain;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &odmi_domain_ops,
+ .size = odmis_count * NODMIS_PER_FRAME,
+ .parent = irq_find_host(parent),
+ };
int ret, i;
if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
@@ -203,22 +208,10 @@ static int __init mvebu_odmi_init(struct device_node *node,
}
}
- parent_domain = irq_find_host(parent);
+ if (msi_create_parent_irq_domain(&info, &odmi_msi_parent_ops))
+ return 0;
- inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
- odmis_count * NODMIS_PER_FRAME,
- of_node_to_fwnode(node),
- &odmi_domain_ops, NULL);
- if (!inner_domain) {
- ret = -ENOMEM;
- goto err_unmap;
- }
-
- irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- inner_domain->msi_parent_ops = &odmi_msi_parent_ops;
-
- return 0;
+ ret = -ENOMEM;
err_unmap:
for (i = 0; i < odmis_count; i++) {
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index 3888b7585981..8db638aa21d2 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -150,8 +150,8 @@ static int mvebu_pic_probe(struct platform_device *pdev)
return -EINVAL;
}
- pic->domain = irq_domain_add_linear(node, PIC_MAX_IRQS,
- &mvebu_pic_domain_ops, pic);
+ pic->domain = irq_domain_create_linear(of_fwnode_handle(node), PIC_MAX_IRQS,
+ &mvebu_pic_domain_ops, pic);
if (!pic->domain) {
dev_err(&pdev->dev, "Failed to allocate irq domain\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index ebd4a9014e8d..5822ea864765 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -14,7 +14,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
/* Cause register */
#define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
@@ -366,6 +366,10 @@ static const struct msi_parent_ops sei_msi_parent_ops = {
static int mvebu_sei_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &mvebu_sei_cp_domain_ops,
+ };
struct mvebu_sei *sei;
u32 parent_irq;
int ret;
@@ -402,7 +406,7 @@ static int mvebu_sei_probe(struct platform_device *pdev)
}
/* Create the root SEI domain */
- sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ sei->sei_domain = irq_domain_create_linear(of_fwnode_handle(node),
(sei->caps->ap_range.size +
sei->caps->cp_range.size),
&mvebu_sei_domain_ops,
@@ -418,7 +422,7 @@ static int mvebu_sei_probe(struct platform_device *pdev)
/* Create the 'wired' domain */
sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
sei->caps->ap_range.size,
- of_node_to_fwnode(node),
+ of_fwnode_handle(node),
&mvebu_sei_ap_domain_ops,
sei);
if (!sei->ap_domain) {
@@ -430,21 +434,17 @@ static int mvebu_sei_probe(struct platform_device *pdev)
irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
/* Create the 'MSI' domain */
- sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
- sei->caps->cp_range.size,
- of_node_to_fwnode(node),
- &mvebu_sei_cp_domain_ops,
- sei);
+ info.size = sei->caps->cp_range.size;
+ info.host_data = sei;
+ info.parent = sei->sei_domain;
+
+ sei->cp_domain = msi_create_parent_irq_domain(&info, &sei_msi_parent_ops);
if (!sei->cp_domain) {
pr_err("Failed to create CPs IRQ domain\n");
ret = -ENOMEM;
goto remove_ap_domain;
}
- irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
- sei->cp_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- sei->cp_domain->msi_parent_ops = &sei_msi_parent_ops;
-
mvebu_sei_reset(sei);
irq_set_chained_handler_and_data(parent_irq, mvebu_sei_handle_cascade_irq, sei);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index d67b5da38982..0bb423dd5280 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -162,8 +162,8 @@ static const struct irq_domain_ops icoll_irq_domain_ops = {
static void __init icoll_add_domain(struct device_node *np,
int num)
{
- icoll_domain = irq_domain_add_linear(np, num,
- &icoll_irq_domain_ops, NULL);
+ icoll_domain = irq_domain_create_linear(of_fwnode_handle(np), num,
+ &icoll_irq_domain_ops, NULL);
if (!icoll_domain)
panic("%pOF: unable to create irq domain", np);
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index ba6332b00a0a..76e11cac9631 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -90,7 +90,7 @@ static int __init nvic_of_init(struct device_node *node,
irqs = NVIC_MAX_IRQ;
nvic_irq_domain =
- irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL);
+ irq_domain_create_linear(of_fwnode_handle(node), irqs, &nvic_irq_domain_ops, NULL);
if (!nvic_irq_domain) {
pr_warn("Failed to allocate irq domain\n");
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index ad84a2f03368..16f00db570e7 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -248,7 +248,7 @@ static int __init omap_init_irq_of(struct device_node *node)
if (WARN_ON(!omap_irq_base))
return -ENOMEM;
- domain = irq_domain_add_linear(node, omap_nr_irqs,
+ domain = irq_domain_create_linear(of_fwnode_handle(node), omap_nr_irqs,
&irq_generic_chip_ops, NULL);
omap_irq_soft_reset();
@@ -274,7 +274,7 @@ static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
irq_base = 0;
}
- domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
+ domain = irq_domain_create_legacy(of_fwnode_handle(node), omap_nr_irqs, irq_base, 0,
&irq_domain_simple_ops, NULL);
omap_irq_soft_reset();
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index f289ccd95291..48126067c54b 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -144,8 +144,8 @@ static int __init or1k_pic_init(struct device_node *node,
/* Disable all interrupts until explicitly requested */
mtspr(SPR_PICMR, (0UL));
- root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops,
- pic);
+ root_domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &or1k_irq_domain_ops,
+ pic);
set_handle_irq(or1k_pic_handle_irq);
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index 4e4e874e09a8..dddbc05917c0 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -59,7 +59,7 @@ static int __init orion_irq_init(struct device_node *np,
/* count number of irq chips by valid reg addresses */
num_chips = of_address_count(np);
- orion_irq_domain = irq_domain_add_linear(np,
+ orion_irq_domain = irq_domain_create_linear(of_fwnode_handle(np),
num_chips * ORION_IRQS_PER_CHIP,
&irq_generic_chip_ops, NULL);
if (!orion_irq_domain)
@@ -146,8 +146,8 @@ static int __init orion_bridge_irq_init(struct device_node *np,
/* get optional number of interrupts provided */
of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
- domain = irq_domain_add_linear(np, nrirqs,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(np), nrirqs,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("%pOFn: unable to add irq domain\n", np);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-owl-sirq.c b/drivers/irqchip/irq-owl-sirq.c
index 6e4127465094..3d93d21f6732 100644
--- a/drivers/irqchip/irq-owl-sirq.c
+++ b/drivers/irqchip/irq-owl-sirq.c
@@ -323,8 +323,8 @@ static int __init owl_sirq_init(const struct owl_sirq_params *params,
owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_CLK_SEL, i);
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_SIRQ, node,
- &owl_sirq_domain_ops, chip_data);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_SIRQ, of_fwnode_handle(node),
+ &owl_sirq_domain_ops, chip_data);
if (!domain) {
pr_err("%pOF: failed to add domain\n", node);
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index b546b1036e12..5dfda8e8df10 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -227,9 +227,9 @@ static int __init pic32_of_init(struct device_node *node,
goto err_iounmap;
}
- evic_irq_domain = irq_domain_add_linear(node, nchips * 32,
- &pic32_irq_domain_ops,
- priv);
+ evic_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32,
+ &pic32_irq_domain_ops,
+ priv);
if (!evic_irq_domain) {
ret = -ENOMEM;
goto err_free_priv;
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
index bee01980b463..87a5813fd835 100644
--- a/drivers/irqchip/irq-pruss-intc.c
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -555,8 +555,8 @@ static int pruss_intc_probe(struct platform_device *pdev)
mutex_init(&intc->lock);
- intc->domain = irq_domain_add_linear(dev->of_node, max_system_events,
- &pruss_intc_irq_domain_ops, intc);
+ intc->domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), max_system_events,
+ &pruss_intc_irq_domain_ops, intc);
if (!intc->domain)
return -ENOMEM;
@@ -581,8 +581,7 @@ static int pruss_intc_probe(struct platform_device *pdev)
host_data->intc = intc;
host_data->host_irq = i;
- irq_set_handler_data(irq, host_data);
- irq_set_chained_handler(irq, pruss_intc_irq_handler);
+ irq_set_chained_handler_and_data(irq, pruss_intc_irq_handler, host_data);
}
return 0;
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index f772deb9cba5..8d569f7c5a7a 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -450,7 +450,7 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
priv->domain = irq_domain_create_hierarchy(parent_domain,
IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt,
- of_node_to_fwnode(np), &qcom_mpm_ops, priv);
+ of_fwnode_handle(np), &qcom_mpm_ops, priv);
if (!priv->domain) {
dev_err(dev, "failed to create MPM domain\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index 2a349082af81..942c1f8c363d 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -162,7 +162,7 @@ static int __init realtek_rtl_of_init(struct device_node *node, struct device_no
else if (!parent_irq)
return -ENODEV;
- domain = irq_domain_add_linear(node, RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL);
if (!domain)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 954419f2460d..0959ed43b1a9 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -513,8 +513,10 @@ static int intc_irqpin_probe(struct platform_device *pdev)
irq_chip->irq_set_wake = intc_irqpin_irq_set_wake;
irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0,
- &intc_irqpin_irq_domain_ops, p);
+ p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node),
+ nirqs, 0,
+ &intc_irqpin_irq_domain_ops,
+ p);
if (!p->irq_domain) {
ret = -ENXIO;
dev_err(dev, "cannot initialize irq domain\n");
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index cbce8ffc7de4..5c3196e5a437 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -168,8 +168,8 @@ static int irqc_probe(struct platform_device *pdev)
p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
- p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs,
- &irq_generic_chip_ops, p);
+ p->irq_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), p->number_of_irqs,
+ &irq_generic_chip_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
dev_err(dev, "cannot initialize irq domain\n");
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index d4e6a68889ec..0a9640ba0adb 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -231,9 +231,9 @@ static int rza1_irqc_probe(struct platform_device *pdev)
priv->chip.irq_set_type = rza1_irqc_set_type;
priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
- priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
- np, &rza1_irqc_domain_ops,
- priv);
+ priv->irq_domain = irq_domain_create_hierarchy(parent, 0, IRQC_NUM_IRQ,
+ of_fwnode_handle(np), &rza1_irqc_domain_ops,
+ priv);
if (!priv->irq_domain) {
dev_err(dev, "cannot initialize irq domain\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 6a2e41f02446..1e861bd64f97 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -574,9 +574,9 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
raw_spin_lock_init(&rzg2l_irqc_data->lock);
- irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
- node, &rzg2l_irqc_domain_ops,
- rzg2l_irqc_data);
+ irq_domain = irq_domain_create_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
+ of_fwnode_handle(node), &rzg2l_irqc_domain_ops,
+ rzg2l_irqc_data);
if (!irq_domain) {
pm_runtime_put(dev);
return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n");
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index 0f0fd7d4dfdf..1c12e6ec1370 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -522,8 +522,9 @@ static int rzv2h_icu_init_common(struct device_node *node, struct device_node *p
raw_spin_lock_init(&rzv2h_icu_data->lock);
- irq_domain = irq_domain_add_hierarchy(parent_domain, 0, ICU_NUM_IRQ, node,
- &rzv2h_icu_domain_ops, rzv2h_icu_data);
+ irq_domain = irq_domain_create_hierarchy(parent_domain, 0, ICU_NUM_IRQ,
+ of_fwnode_handle(node), &rzv2h_icu_domain_ops,
+ rzv2h_icu_data);
if (!irq_domain) {
dev_err(&pdev->dev, "failed to add irq domain\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index b8ae67c25b37..1b9fbfce9581 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -20,7 +20,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include "irq-riscv-imsic-state.h"
static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index 62f76950a113..77670dd645ac 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -564,7 +564,7 @@ void imsic_state_offline(void)
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
raw_spin_lock_irqsave(&lpriv->lock, flags);
- WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0);
+ WARN_ON_ONCE(timer_delete_sync_try(&lpriv->timer) < 0);
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
#endif
}
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index f653c13de62b..e5805885394e 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -242,7 +242,7 @@ static int __init riscv_intc_init(struct device_node *node,
chip = &andes_intc_chip;
}
- return riscv_intc_init_common(of_node_to_fwnode(node), chip);
+ return riscv_intc_init_common(of_fwnode_handle(node), chip);
}
IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index 9d0b80271949..d8d4dff16276 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -162,7 +162,7 @@ void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start)
*/
writel_relaxed(1, iobase + ICCR);
- sa1100_normal_irqdomain = irq_domain_add_simple(NULL,
+ sa1100_normal_irqdomain = irq_domain_create_simple(NULL,
32, irq_start,
&sa1100_normal_irqdomain_ops, NULL);
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
index 375b55aa0acd..af16bc5a3c8b 100644
--- a/drivers/irqchip/irq-sg2042-msi.c
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -17,23 +17,38 @@
#include <linux/property.h>
#include <linux/slab.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
-#define SG2042_MAX_MSI_VECTOR 32
+struct sg204x_msi_chip_info {
+ const struct irq_chip *irqchip;
+ const struct msi_parent_ops *parent_ops;
+};
+
+/**
+ * struct sg204x_msi_chipdata - chip data for the SG204x MSI IRQ controller
+ * @reg_clr: clear reg, see TRM, 10.1.33, GP_INTR0_CLR
+ * @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET
+ * @irq_first: First vectors number that MSIs starts
+ * @num_irqs: Number of vectors for MSIs
+ * @msi_map: mapping for allocated MSI vectors.
+ * @msi_map_lock: Lock for msi_map
+ * @chip_info: chip specific infomations
+ */
+struct sg204x_msi_chipdata {
+ void __iomem *reg_clr;
-struct sg2042_msi_chipdata {
- void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR
+ phys_addr_t doorbell_addr;
- phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET
+ u32 irq_first;
+ u32 num_irqs;
- u32 irq_first; // The vector number that MSIs starts
- u32 num_irqs; // The number of vectors for MSIs
+ unsigned long *msi_map;
+ struct mutex msi_map_lock;
- DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR);
- struct mutex msi_map_lock; // lock for msi_map
+ const struct sg204x_msi_chip_info *chip_info;
};
-static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req)
+static int sg204x_msi_allocate_hwirq(struct sg204x_msi_chipdata *data, int num_req)
{
int first;
@@ -43,7 +58,7 @@ static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_r
return first >= 0 ? first : -ENOSPC;
}
-static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req)
+static void sg204x_msi_free_hwirq(struct sg204x_msi_chipdata *data, int hwirq, int num_req)
{
guard(mutex)(&data->msi_map_lock);
bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req));
@@ -51,7 +66,7 @@ static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, i
static void sg2042_msi_irq_ack(struct irq_data *d)
{
- struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
int bit_off = d->hwirq;
writel(1 << bit_off, data->reg_clr);
@@ -61,7 +76,7 @@ static void sg2042_msi_irq_ack(struct irq_data *d)
static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
msg->address_hi = upper_32_bits(data->doorbell_addr);
msg->address_lo = lower_32_bits(data->doorbell_addr);
@@ -79,9 +94,38 @@ static const struct irq_chip sg2042_msi_middle_irq_chip = {
.irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg,
};
-static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
+static void sg2044_msi_irq_ack(struct irq_data *d)
+{
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ writel(0, (u32 __iomem *)data->reg_clr + d->hwirq);
+ irq_chip_ack_parent(d);
+}
+
+static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ phys_addr_t doorbell = data->doorbell_addr + 4 * (d->hwirq / 32);
+
+ msg->address_lo = lower_32_bits(doorbell);
+ msg->address_hi = upper_32_bits(doorbell);
+ msg->data = d->hwirq % 32;
+}
+
+static struct irq_chip sg2044_msi_middle_irq_chip = {
+ .name = "SG2044 MSI",
+ .irq_ack = sg2044_msi_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_compose_msi_msg = sg2044_msi_irq_compose_msi_msg,
+};
+
+static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
{
- struct sg2042_msi_chipdata *data = domain->host_data;
+ struct sg204x_msi_chipdata *data = domain->host_data;
struct irq_fwspec fwspec;
struct irq_data *d;
int ret;
@@ -99,47 +143,45 @@ static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned in
return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
}
-static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
+static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
- struct sg2042_msi_chipdata *data = domain->host_data;
+ struct sg204x_msi_chipdata *data = domain->host_data;
int hwirq, err, i;
- hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs);
+ hwirq = sg204x_msi_allocate_hwirq(data, nr_irqs);
if (hwirq < 0)
return hwirq;
for (i = 0; i < nr_irqs; i++) {
- err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
+ err = sg204x_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
if (err)
goto err_hwirq;
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &sg2042_msi_middle_irq_chip, data);
+ data->chip_info->irqchip, data);
}
-
return 0;
err_hwirq:
- sg2042_msi_free_hwirq(data, hwirq, nr_irqs);
+ sg204x_msi_free_hwirq(data, hwirq, nr_irqs);
irq_domain_free_irqs_parent(domain, virq, i);
-
return err;
}
-static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
+static void sg204x_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
- sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs);
+ sg204x_msi_free_hwirq(data, d->hwirq, nr_irqs);
}
-static const struct irq_domain_ops sg2042_msi_middle_domain_ops = {
- .alloc = sg2042_msi_middle_domain_alloc,
- .free = sg2042_msi_middle_domain_free,
+static const struct irq_domain_ops sg204x_msi_middle_domain_ops = {
+ .alloc = sg204x_msi_middle_domain_alloc,
+ .free = sg204x_msi_middle_domain_free,
.select = msi_lib_irq_domain_select,
};
@@ -158,14 +200,30 @@ static const struct msi_parent_ops sg2042_msi_parent_ops = {
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
+#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+
+#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static const struct msi_parent_ops sg2044_msi_parent_ops = {
+ .required_flags = SG2044_MSI_FLAGS_REQUIRED,
+ .supported_flags = SG2044_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .prefix = "SG2044-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int sg204x_msi_init_domains(struct sg204x_msi_chipdata *data,
struct irq_domain *plic_domain, struct device *dev)
{
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct irq_domain *middle_domain;
middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode,
- &sg2042_msi_middle_domain_ops, data);
+ &sg204x_msi_middle_domain_ops, data);
if (!middle_domain) {
pr_err("Failed to create the MSI middle domain\n");
return -ENOMEM;
@@ -174,24 +232,29 @@ static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- middle_domain->msi_parent_ops = &sg2042_msi_parent_ops;
-
+ middle_domain->msi_parent_ops = data->chip_info->parent_ops;
return 0;
}
static int sg2042_msi_probe(struct platform_device *pdev)
{
struct fwnode_reference_args args = { };
- struct sg2042_msi_chipdata *data;
+ struct sg204x_msi_chipdata *data;
struct device *dev = &pdev->dev;
struct irq_domain *plic_domain;
struct resource *res;
int ret;
- data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct sg204x_msi_chipdata), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->chip_info = device_get_match_data(&pdev->dev);
+ if (!data->chip_info) {
+ dev_err(&pdev->dev, "Failed to get irqchip\n");
+ return -EINVAL;
+ }
+
data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr");
if (IS_ERR(data->reg_clr)) {
dev_err(dev, "Failed to map clear register\n");
@@ -232,11 +295,28 @@ static int sg2042_msi_probe(struct platform_device *pdev)
mutex_init(&data->msi_map_lock);
- return sg2042_msi_init_domains(data, plic_domain, dev);
+ data->msi_map = devm_bitmap_zalloc(&pdev->dev, data->num_irqs, GFP_KERNEL);
+ if (!data->msi_map) {
+ dev_err(&pdev->dev, "Unable to allocate msi mapping\n");
+ return -ENOMEM;
+ }
+
+ return sg204x_msi_init_domains(data, plic_domain, dev);
}
+static const struct sg204x_msi_chip_info sg2042_chip_info = {
+ .irqchip = &sg2042_msi_middle_irq_chip,
+ .parent_ops = &sg2042_msi_parent_ops,
+};
+
+static const struct sg204x_msi_chip_info sg2044_chip_info = {
+ .irqchip = &sg2044_msi_middle_irq_chip,
+ .parent_ops = &sg2044_msi_parent_ops,
+};
+
static const struct of_device_id sg2042_msi_of_match[] = {
- { .compatible = "sophgo,sg2042-msi" },
+ { .compatible = "sophgo,sg2042-msi", .data = &sg2042_chip_info },
+ { .compatible = "sophgo,sg2044-msi", .data = &sg2044_chip_info },
{ }
};
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index c7db617e1a2f..0cad68aa8388 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -249,12 +249,12 @@ static int __init exiu_dt_init(struct device_node *node,
return -ENXIO;
}
- data = exiu_init(of_node_to_fwnode(node), &res);
+ data = exiu_init(of_fwnode_handle(node), &res);
if (IS_ERR(data))
return PTR_ERR(data);
- domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
- &exiu_domain_ops, data);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_IRQS, of_fwnode_handle(node),
+ &exiu_domain_ops, data);
if (!domain) {
pr_err("%pOF: failed to allocate domain\n", node);
goto out_unmap;
diff --git a/drivers/irqchip/irq-sp7021-intc.c b/drivers/irqchip/irq-sp7021-intc.c
index bed78d1def3d..2a6eda9ab62e 100644
--- a/drivers/irqchip/irq-sp7021-intc.c
+++ b/drivers/irqchip/irq-sp7021-intc.c
@@ -256,8 +256,8 @@ static int __init sp_intc_init_dt(struct device_node *node, struct device_node *
writel_relaxed(~0, REG_INTR_CLEAR + i * 4);
}
- sp_intc.domain = irq_domain_add_linear(node, SP_INTC_NR_IRQS,
- &sp_intc_dm_ops, &sp_intc);
+ sp_intc.domain = irq_domain_create_linear(of_fwnode_handle(node), SP_INTC_NR_IRQS,
+ &sp_intc_dm_ops, &sp_intc);
if (!sp_intc.domain) {
ret = -ENOMEM;
goto out_unmap1;
diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c
index 0f5837176e53..2460798ec158 100644
--- a/drivers/irqchip/irq-starfive-jh8100-intc.c
+++ b/drivers/irqchip/irq-starfive-jh8100-intc.c
@@ -158,8 +158,8 @@ static int __init starfive_intc_init(struct device_node *intc,
raw_spin_lock_init(&irqc->lock);
- irqc->domain = irq_domain_add_linear(intc, STARFIVE_INTC_SRC_IRQ_NUM,
- &starfive_intc_domain_ops, irqc);
+ irqc->domain = irq_domain_create_linear(of_fwnode_handle(intc), STARFIVE_INTC_SRC_IRQ_NUM,
+ &starfive_intc_domain_ops, irqc);
if (!irqc->domain) {
pr_err("Unable to create IRQ domain\n");
ret = -EINVAL;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 7c6a0080c330..978811f2abe8 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -169,22 +169,18 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
u32 rtsr, ftsr;
int err;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
if (err)
- goto unlock;
+ return err;
irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
-
-unlock:
- irq_gc_unlock(gc);
-
- return err;
+ return 0;
}
static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data,
@@ -217,18 +213,16 @@ static void stm32_irq_suspend(struct irq_chip_generic *gc)
{
struct stm32_exti_chip_data *chip_data = gc->private;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
stm32_chip_suspend(chip_data, gc->wake_active);
- irq_gc_unlock(gc);
}
static void stm32_irq_resume(struct irq_chip_generic *gc)
{
struct stm32_exti_chip_data *chip_data = gc->private;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
stm32_chip_resume(chip_data, gc->mask_cache);
- irq_gc_unlock(gc);
}
static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
@@ -265,11 +259,8 @@ static void stm32_irq_ack(struct irq_data *d)
struct stm32_exti_chip_data *chip_data = gc->private;
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- irq_gc_lock(gc);
-
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
-
- irq_gc_unlock(gc);
}
static struct
@@ -344,8 +335,8 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
if (!host_data)
return -ENOMEM;
- domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
- &irq_exti_domain_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), drv_data->bank_nr * IRQS_PER_BANK,
+ &irq_exti_domain_ops, NULL);
if (!domain) {
pr_err("%pOFn: Could not register interrupt domain.\n",
node);
diff --git a/drivers/irqchip/irq-stm32mp-exti.c b/drivers/irqchip/irq-stm32mp-exti.c
index cb83d6cc6113..c6b4407d05f9 100644
--- a/drivers/irqchip/irq-stm32mp-exti.c
+++ b/drivers/irqchip/irq-stm32mp-exti.c
@@ -531,7 +531,7 @@ static int stm32mp_exti_domain_alloc(struct irq_domain *dm,
if (ret)
return ret;
/* we only support one parent, so far */
- if (of_node_to_fwnode(out_irq.np) != dm->parent->fwnode)
+ if (of_fwnode_handle(out_irq.np) != dm->parent->fwnode)
return -EINVAL;
of_phandle_args_to_fwspec(out_irq.np, out_irq.args,
@@ -682,10 +682,9 @@ static int stm32mp_exti_probe(struct platform_device *pdev)
return -EINVAL;
}
- domain = irq_domain_add_hierarchy(parent_domain, 0,
- drv_data->bank_nr * IRQS_PER_BANK,
- np, &stm32mp_exti_domain_ops,
- host_data);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, drv_data->bank_nr * IRQS_PER_BANK,
+ of_fwnode_handle(np), &stm32mp_exti_domain_ops,
+ host_data);
if (!domain) {
dev_err(dev, "Could not register exti domain\n");
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index dd506ebfdacb..9c2c9caeca2a 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -133,7 +133,7 @@ static int __init sun4i_of_init(struct device_node *node,
/* Configure the external interrupt source type */
writel(0x00, irq_ic_data->irq_base + SUN4I_IRQ_NMI_CTRL_REG);
- irq_ic_data->irq_domain = irq_domain_add_linear(node, 3 * 32,
+ irq_ic_data->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), 3 * 32,
&sun4i_irq_ops, NULL);
if (!irq_ic_data->irq_domain)
panic("%pOF: unable to create IRQ domain\n", node);
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
index 99958d470d62..37d4b29763bc 100644
--- a/drivers/irqchip/irq-sun6i-r.c
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -338,8 +338,8 @@ static int __init sun6i_r_intc_init(struct device_node *node,
return PTR_ERR(base);
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, 0, node,
- &sun6i_r_intc_domain_ops, NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, 0, of_fwnode_handle(node),
+ &sun6i_r_intc_domain_ops, NULL);
if (!domain) {
pr_err("%pOF: Failed to allocate domain\n", node);
iounmap(base);
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 01b0d8321728..fe32dfdc2dd0 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -111,7 +111,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
unsigned int src_type;
unsigned int i;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
switch (flow_type & IRQF_TRIGGER_MASK) {
case IRQ_TYPE_EDGE_FALLING:
@@ -128,9 +128,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
break;
default:
- irq_gc_unlock(gc);
- pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
- data->irq);
+ pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", data->irq);
return -EBADR;
}
@@ -145,9 +143,6 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
src_type_reg |= src_type;
sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
-
- irq_gc_unlock(gc);
-
return IRQ_SET_MASK_OK;
}
@@ -159,7 +154,7 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
struct irq_domain *domain;
int ret;
- domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), 1, &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("Could not register interrupt domain.\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index d59bfbe8c6d0..94cbc5111d7e 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -41,11 +41,9 @@ static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg)
static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- uint32_t im, mod, pol;
+ uint32_t mod, pol, im = data->mask;
- im = data->mask;
-
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im;
pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im;
@@ -67,9 +65,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
case IRQ_TYPE_EDGE_RISING:
break;
default:
- irq_gc_unlock(gc);
- pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
- __func__, data->irq);
+ pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", __func__, data->irq);
return -EBADR;
}
@@ -79,9 +75,6 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod);
ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol);
ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im);
-
- irq_gc_unlock(gc);
-
return IRQ_SET_MASK_OK;
}
@@ -121,13 +114,13 @@ static int __init of_tb10x_init_irq(struct device_node *ictl,
goto ioremap_fail;
}
- domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(ictl), AB_IRQCTL_MAXIRQ,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
ret = -ENOMEM;
pr_err("%pOFn: Could not register interrupt domain.\n",
ictl);
- goto irq_domain_add_fail;
+ goto irq_domain_create_fail;
}
ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ,
@@ -174,7 +167,7 @@ static int __init of_tb10x_init_irq(struct device_node *ictl,
gc_alloc_fail:
irq_domain_remove(domain);
-irq_domain_add_fail:
+irq_domain_create_fail:
iounmap(reg_base);
ioremap_fail:
release_mem_region(mem.start, resource_size(&mem));
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index ad3e2c1b3c87..66cbb9f77ff3 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -330,9 +330,8 @@ static int __init tegra_ictlr_init(struct device_node *node,
node, num_ictlrs, soc->num_ictlrs);
- domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
- node, &tegra_ictlr_domain_ops,
- lic);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, num_ictlrs * 32,
+ of_fwnode_handle(node), &tegra_ictlr_domain_ops, lic);
if (!domain) {
pr_err("%pOF: failed to allocated domain\n", node);
err = -ENOMEM;
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index a887efba262c..7de59238e6b0 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -233,7 +233,7 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
INIT_LIST_HEAD(&vint_desc->list);
parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev));
- parent_fwspec.fwnode = of_node_to_fwnode(parent_node);
+ parent_fwspec.fwnode = of_fwnode_handle(parent_node);
if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
/* Parent is GIC */
@@ -701,15 +701,15 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
if (ret)
return ret;
- domain = irq_domain_add_linear(dev_of_node(dev),
- ti_sci_get_num_resources(inta->vint),
- &ti_sci_inta_irq_domain_ops, inta);
+ domain = irq_domain_create_linear(of_fwnode_handle(dev_of_node(dev)),
+ ti_sci_get_num_resources(inta->vint),
+ &ti_sci_inta_irq_domain_ops, inta);
if (!domain) {
dev_err(dev, "Failed to allocate IRQ domain\n");
return -ENOMEM;
}
- msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi_domain = ti_sci_inta_msi_create_irq_domain(of_fwnode_handle(node),
&ti_sci_inta_msi_domain_info,
domain);
if (!msi_domain) {
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index b49a73106c69..07fff5ae5ce0 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -149,7 +149,7 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
goto err_irqs;
parent_node = of_irq_find_parent(dev_of_node(intr->dev));
- fwspec.fwnode = of_node_to_fwnode(parent_node);
+ fwspec.fwnode = of_fwnode_handle(parent_node);
if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
/* Parent is GIC */
@@ -274,8 +274,9 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
return PTR_ERR(intr->out_irqs);
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev),
- &ti_sci_intr_irq_domain_ops, intr);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, 0,
+ of_fwnode_handle(dev_of_node(dev)),
+ &ti_sci_intr_irq_domain_ops, intr);
if (!domain) {
dev_err(dev, "Failed to allocate IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 960c343d5781..e625f4fb2bb8 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -125,7 +125,7 @@ static int ts4800_ic_probe(struct platform_device *pdev)
return -EINVAL;
}
- data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data);
+ data->domain = irq_domain_create_linear(of_fwnode_handle(node), 8, &ts4800_ic_ops, data);
if (!data->domain) {
dev_err(&pdev->dev, "cannot add IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
index 601f9343d5b3..6005c2d28dd9 100644
--- a/drivers/irqchip/irq-uniphier-aidet.c
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -188,7 +188,7 @@ static int uniphier_aidet_probe(struct platform_device *pdev)
priv->domain = irq_domain_create_hierarchy(
parent_domain, 0,
UNIPHIER_AIDET_NR_IRQS,
- of_node_to_fwnode(dev->of_node),
+ of_fwnode_handle(dev->of_node),
&uniphier_aidet_domain_ops, priv);
if (!priv->domain)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 0abc8934c2ee..034ce6afe170 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -176,8 +176,8 @@ static void __init fpga_irq_init(void __iomem *base, int parent_irq,
f);
}
- f->domain = irq_domain_add_linear(node, fls(valid),
- &fpga_irqdomain_ops, f);
+ f->domain = irq_domain_create_linear(of_fwnode_handle(node), fls(valid),
+ &fpga_irqdomain_ops, f);
/* This will allocate all valid descriptors in the linear case */
for (i = 0; i < fls(valid); i++)
diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c
index 2b9a8ba58e26..5d9c7503aa7f 100644
--- a/drivers/irqchip/irq-vf610-mscm-ir.c
+++ b/drivers/irqchip/irq-vf610-mscm-ir.c
@@ -209,9 +209,9 @@ static int __init vf610_mscm_ir_of_init(struct device_node *node,
regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid);
mscm_ir_data->cpu_mask = 0x1 << cpuid;
- domain = irq_domain_add_hierarchy(domain_parent, 0,
- MSCM_IRSPRC_NUM, node,
- &mscm_irq_domain_ops, mscm_ir_data);
+ domain = irq_domain_create_hierarchy(domain_parent, 0, MSCM_IRSPRC_NUM,
+ of_fwnode_handle(node), &mscm_irq_domain_ops,
+ mscm_ir_data);
if (!domain) {
ret = -ENOMEM;
goto out_unmap;
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index ea93e7236c4a..2bcdf216a000 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -289,8 +289,9 @@ static void __init vic_register(void __iomem *base, unsigned int parent_irq,
vic_handle_irq_cascaded, v);
}
- v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
- &vic_irqdomain_ops, v);
+ v->domain = irq_domain_create_simple(of_fwnode_handle(node),
+ fls(valid_sources), irq,
+ &vic_irqdomain_ops, v);
/* create an IRQ mapping for each valid IRQ */
for (i = 0; i < fls(valid_sources); i++)
if (valid_sources & (1 << i))
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index e17dd3a8c2d5..3b742590aec8 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -63,29 +64,28 @@ struct vt8500_irq_data {
struct irq_domain *domain; /* Domain for this controller */
};
-/* Global variable for accessing io-mem addresses */
-static struct vt8500_irq_data intc[VT8500_INTC_MAX];
-static u32 active_cnt = 0;
+/* Primary interrupt controller data */
+static struct vt8500_irq_data *primary_intc;
-static void vt8500_irq_mask(struct irq_data *d)
+static void vt8500_irq_ack(struct irq_data *d)
{
struct vt8500_irq_data *priv = d->domain->host_data;
void __iomem *base = priv->base;
void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4);
- u8 edge, dctr;
- u32 status;
+ u32 status = (1 << (d->hwirq & 0x1f));
- edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE;
- if (edge) {
- status = readl(stat_reg);
+ writel(status, stat_reg);
+}
- status |= (1 << (d->hwirq & 0x1f));
- writel(status, stat_reg);
- } else {
- dctr = readb(base + VT8500_ICDC + d->hwirq);
- dctr &= ~VT8500_INT_ENABLE;
- writeb(dctr, base + VT8500_ICDC + d->hwirq);
- }
+static void vt8500_irq_mask(struct irq_data *d)
+{
+ struct vt8500_irq_data *priv = d->domain->host_data;
+ void __iomem *base = priv->base;
+ u8 dctr;
+
+ dctr = readb(base + VT8500_ICDC + d->hwirq);
+ dctr &= ~VT8500_INT_ENABLE;
+ writeb(dctr, base + VT8500_ICDC + d->hwirq);
}
static void vt8500_irq_unmask(struct irq_data *d)
@@ -130,11 +130,11 @@ static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
}
static struct irq_chip vt8500_irq_chip = {
- .name = "vt8500",
- .irq_ack = vt8500_irq_mask,
- .irq_mask = vt8500_irq_mask,
- .irq_unmask = vt8500_irq_unmask,
- .irq_set_type = vt8500_irq_set_type,
+ .name = "vt8500",
+ .irq_ack = vt8500_irq_ack,
+ .irq_mask = vt8500_irq_mask,
+ .irq_unmask = vt8500_irq_unmask,
+ .irq_set_type = vt8500_irq_set_type,
};
static void __init vt8500_init_irq_hw(void __iomem *base)
@@ -163,82 +163,89 @@ static const struct irq_domain_ops vt8500_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
+static inline void vt8500_handle_irq_common(struct vt8500_irq_data *intc)
+{
+ unsigned long irqnr = readl_relaxed(intc->base) & 0x3F;
+ unsigned long stat;
+
+ /*
+ * Highest Priority register default = 63, so check that this
+ * is a real interrupt by checking the status register
+ */
+ if (irqnr == 63) {
+ stat = readl_relaxed(intc->base + VT8500_ICIS + 4);
+ if (!(stat & BIT(31)))
+ return;
+ }
+
+ generic_handle_domain_irq(intc->domain, irqnr);
+}
+
static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
{
- u32 stat, i;
- int irqnr;
- void __iomem *base;
-
- /* Loop through each active controller */
- for (i=0; i<active_cnt; i++) {
- base = intc[i].base;
- irqnr = readl_relaxed(base) & 0x3F;
- /*
- Highest Priority register default = 63, so check that this
- is a real interrupt by checking the status register
- */
- if (irqnr == 63) {
- stat = readl_relaxed(base + VT8500_ICIS + 4);
- if (!(stat & BIT(31)))
- continue;
- }
+ vt8500_handle_irq_common(primary_intc);
+}
- generic_handle_domain_irq(intc[i].domain, irqnr);
- }
+static void vt8500_handle_irq_chained(struct irq_desc *desc)
+{
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct vt8500_irq_data *intc = d->host_data;
+
+ chained_irq_enter(chip, desc);
+ vt8500_handle_irq_common(intc);
+ chained_irq_exit(chip, desc);
}
static int __init vt8500_irq_init(struct device_node *node,
struct device_node *parent)
{
- int irq, i;
- struct device_node *np = node;
+ struct vt8500_irq_data *intc;
+ int irq, i, ret = 0;
- if (active_cnt == VT8500_INTC_MAX) {
- pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n",
- __func__);
- goto out;
- }
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
- intc[active_cnt].base = of_iomap(np, 0);
- intc[active_cnt].domain = irq_domain_add_linear(node, 64,
- &vt8500_irq_domain_ops, &intc[active_cnt]);
-
- if (!intc[active_cnt].base) {
+ intc->base = of_iomap(node, 0);
+ if (!intc->base) {
pr_err("%s: Unable to map IO memory\n", __func__);
- goto out;
+ ret = -ENOMEM;
+ goto err_free;
}
- if (!intc[active_cnt].domain) {
+ intc->domain = irq_domain_create_linear(of_fwnode_handle(node), 64,
+ &vt8500_irq_domain_ops, intc);
+ if (!intc->domain) {
pr_err("%s: Unable to add irq domain!\n", __func__);
- goto out;
+ ret = -ENOMEM;
+ goto err_unmap;
}
- set_handle_irq(vt8500_handle_irq);
-
- vt8500_init_irq_hw(intc[active_cnt].base);
+ vt8500_init_irq_hw(intc->base);
pr_info("vt8500-irq: Added interrupt controller\n");
- active_cnt++;
-
- /* check if this is a slaved controller */
- if (of_irq_count(np) != 0) {
- /* check that we have the correct number of interrupts */
- if (of_irq_count(np) != 8) {
- pr_err("%s: Incorrect IRQ map for slaved controller\n",
- __func__);
- return -EINVAL;
- }
-
- for (i = 0; i < 8; i++) {
- irq = irq_of_parse_and_map(np, i);
- enable_irq(irq);
+ /* check if this is a chained controller */
+ if (of_irq_count(node) != 0) {
+ for (i = 0; i < of_irq_count(node); i++) {
+ irq = irq_of_parse_and_map(node, i);
+ irq_set_chained_handler_and_data(irq, vt8500_handle_irq_chained,
+ intc);
}
pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
+ } else {
+ primary_intc = intc;
+ set_handle_irq(vt8500_handle_irq);
}
-out:
return 0;
+
+err_unmap:
+ iounmap(intc->base);
+err_free:
+ kfree(intc);
+ return ret;
}
IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init);
diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
index 91df62a64cd9..a8ed4894d29e 100644
--- a/drivers/irqchip/irq-wpcm450-aic.c
+++ b/drivers/irqchip/irq-wpcm450-aic.c
@@ -154,7 +154,7 @@ static int __init wpcm450_aic_of_init(struct device_node *node,
set_handle_irq(wpcm450_aic_handle_irq);
- aic->domain = irq_domain_add_linear(node, AIC_NUM_IRQS, &wpcm450_aic_ops, aic);
+ aic->domain = irq_domain_create_linear(of_fwnode_handle(node), AIC_NUM_IRQS, &wpcm450_aic_ops, aic);
return 0;
}
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 38727e9cc713..92dcb9fdcb25 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -212,8 +212,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
xintc_write(irqc, MER, MER_HIE | MER_ME);
}
- irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
- &xintc_irq_domain_ops, irqc);
+ irqc->root_domain = irq_domain_create_linear(of_fwnode_handle(intc), irqc->nr_irq,
+ &xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n");
ret = -EINVAL;
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 9b441d180299..9fdacbd89a63 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -167,8 +167,7 @@ static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
- &xtensa_mx_irq_domain_ops,
+ irq_domain_create_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
xtensa_mx_init_common(root_domain);
return 0;
@@ -178,7 +177,7 @@ static int __init xtensa_mx_init(struct device_node *np,
struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
+ irq_domain_create_linear(of_fwnode_handle(np), NR_IRQS, &xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
xtensa_mx_init_common(root_domain);
return 0;
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 9be7b7c5cd23..44e7be051a2e 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -85,7 +85,7 @@ static struct irq_chip xtensa_irq_chip = {
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
+ irq_domain_create_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_irq_domain_ops, &xtensa_irq_chip);
irq_set_default_domain(root_domain);
return 0;
@@ -95,7 +95,7 @@ static int __init xtensa_pic_init(struct device_node *np,
struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops,
+ irq_domain_create_linear(of_fwnode_handle(np), NR_IRQS, &xtensa_irq_domain_ops,
&xtensa_irq_chip);
irq_set_default_domain(root_domain);
return 0;
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index 7a72620fc478..22d46c246594 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -92,8 +92,8 @@ static int __init zevio_of_init(struct device_node *node,
zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
- zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
- &irq_generic_chip_ops, NULL);
+ zevio_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), MAX_INTRS,
+ &irq_generic_chip_ops, NULL);
BUG_ON(!zevio_irq_domain);
ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 7c17a6f643ef..576e55569d77 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -239,7 +239,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
goto err_unmap;
}
- shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
+ shirq_domain = irq_domain_create_legacy(of_fwnode_handle(np), nr_irqs, virq_base, 0,
&irq_domain_simple_ops, NULL);
if (WARN_ON(!shirq_domain)) {
pr_warn("%s: irq domain init failed\n", __func__);
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 0b17a38ea6bf..ea44ffb5ce1a 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -312,8 +312,8 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
- &qcom_ipcc_irq_ops, ipcc);
+ ipcc->irq_domain = irq_domain_create_tree(of_fwnode_handle(pdev->dev.of_node),
+ &qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ed40d8600656..2cc2eb24dc8a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -36,6 +36,7 @@
#include <linux/sched/clock.h>
#include <linux/rculist.h>
#include <linux/delay.h>
+#include <linux/sort.h>
#include <trace/events/bcache.h>
/*
@@ -559,8 +560,6 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
}
}
-#define cmp_int(l, r) ((l > r) - (l < r))
-
#ifdef CONFIG_PROVE_LOCKING
static int btree_lock_cmp_fn(const struct lockdep_map *_a,
const struct lockdep_map *_b)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 813b38aec3e4..1a2ce1a4b456 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -293,8 +293,7 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
bio->bi_iter.bi_sector = SB_SECTOR;
- __bio_add_page(bio, virt_to_page(out), SB_SIZE,
- offset_in_page(out));
+ bio_add_virt_nofail(bio, out, SB_SIZE);
out->offset = cpu_to_le64(sb->offset);
@@ -546,7 +545,8 @@ static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
static struct uuid_entry *uuid_find_empty(struct cache_set *c)
{
- static const char zero_uuid[16] = { 0 };
+ static const char zero_uuid[16] __nonstring =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
return uuid_find(c, zero_uuid);
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index f0b5a6931161..d098e75e3461 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1364,7 +1364,7 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
ptr = (char *)b->data + offset;
len = n_sectors << SECTOR_SHIFT;
- __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
+ bio_add_virt_nofail(bio, ptr, len);
submit_bio(bio);
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index cc3d3897ef42..1f626066e8cc 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2557,14 +2557,8 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
char *mem;
outgoing_bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recheck_bios);
-
- r = bio_add_page(outgoing_bio, virt_to_page(outgoing_data), ic->sectors_per_block << SECTOR_SHIFT, 0);
- if (unlikely(r != (ic->sectors_per_block << SECTOR_SHIFT))) {
- bio_put(outgoing_bio);
- bio->bi_status = BLK_STS_RESOURCE;
- bio_endio(bio);
- return;
- }
+ bio_add_virt_nofail(outgoing_bio, outgoing_data,
+ ic->sectors_per_block << SECTOR_SHIFT);
bip = bio_integrity_alloc(outgoing_bio, GFP_NOIO, 1);
if (IS_ERR(bip)) {
@@ -3211,7 +3205,8 @@ next_chunk:
bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios);
bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
- __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+ bio_add_virt_nofail(bio, recalc_buffer,
+ range.n_sectors << SECTOR_SHIFT);
r = submit_bio_wait(bio);
bio_put(bio);
if (unlikely(r)) {
@@ -3228,7 +3223,8 @@ next_chunk:
bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios);
bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
- __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+ bio_add_virt_nofail(bio, recalc_buffer,
+ range.n_sectors << SECTOR_SHIFT);
bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
if (unlikely(IS_ERR(bip))) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6adc55fd90d3..127138c61be5 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -14,6 +14,7 @@
#include "raid5.h"
#include "raid10.h"
#include "md-bitmap.h"
+#include "dm-core.h"
#include <linux/device-mapper.h>
@@ -3308,6 +3309,7 @@ size_check:
/* Disable/enable discard support on raid set. */
configure_discard_support(rs);
+ rs->md.dm_gendisk = ti->table->md->disk;
mddev_unlock(&rs->md);
return 0;
@@ -3327,6 +3329,7 @@ static void raid_dtr(struct dm_target *ti)
mddev_lock_nointr(&rs->md);
md_stop(&rs->md);
+ rs->md.dm_gendisk = NULL;
mddev_unlock(&rs->md);
if (work_pending(&rs->md.event_work))
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9daa78c5fe33..0fde115e921f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -111,32 +111,48 @@ static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
/* Default safemode delay: 200 msec */
#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
/*
- * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
- * is 1000 KB/sec, so the extra system load does not show up that much.
- * Increase it if you want to have more _guaranteed_ speed. Note that
- * the RAID driver will use the maximum available bandwidth if the IO
- * subsystem is idle. There is also an 'absolute maximum' reconstruction
- * speed limit - in case reconstruction slows down your system despite
- * idle IO detection.
+ * Current RAID-1,4,5,6,10 parallel reconstruction 'guaranteed speed limit'
+ * is sysctl_speed_limit_min, 1000 KB/sec by default, so the extra system load
+ * does not show up that much. Increase it if you want to have more guaranteed
+ * speed. Note that the RAID driver will use the maximum bandwidth
+ * sysctl_speed_limit_max, 200 MB/sec by default, if the IO subsystem is idle.
*
- * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
- * or /sys/block/mdX/md/sync_speed_{min,max}
+ * Background sync IO speed control:
+ *
+ * - below speed min:
+ * no limit;
+ * - above speed min and below speed max:
+ * a) if mddev is idle, then no limit;
+ * b) if mddev is busy handling normal IO, then limit inflight sync IO
+ * to sync_io_depth;
+ * - above speed max:
+ * sync IO can't be issued;
+ *
+ * Following configurations can be changed via /proc/sys/dev/raid/ for system
+ * or /sys/block/mdX/md/ for one array.
*/
-
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
-static inline int speed_min(struct mddev *mddev)
+static int sysctl_sync_io_depth = 32;
+
+static int speed_min(struct mddev *mddev)
{
return mddev->sync_speed_min ?
mddev->sync_speed_min : sysctl_speed_limit_min;
}
-static inline int speed_max(struct mddev *mddev)
+static int speed_max(struct mddev *mddev)
{
return mddev->sync_speed_max ?
mddev->sync_speed_max : sysctl_speed_limit_max;
}
+static int sync_io_depth(struct mddev *mddev)
+{
+ return mddev->sync_io_depth ?
+ mddev->sync_io_depth : sysctl_sync_io_depth;
+}
+
static void rdev_uninit_serial(struct md_rdev *rdev)
{
if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
@@ -293,14 +309,21 @@ static const struct ctl_table raid_table[] = {
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
- .mode = S_IRUGO|S_IWUSR,
+ .mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
- .mode = S_IRUGO|S_IWUSR,
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sync_io_depth",
+ .data = &sysctl_sync_io_depth,
+ .maxlen = sizeof(int),
+ .mode = 0644,
.proc_handler = proc_dointvec,
},
};
@@ -5091,7 +5114,7 @@ static ssize_t
sync_min_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_min(mddev),
- mddev->sync_speed_min ? "local": "system");
+ mddev->sync_speed_min ? "local" : "system");
}
static ssize_t
@@ -5100,7 +5123,7 @@ sync_min_store(struct mddev *mddev, const char *buf, size_t len)
unsigned int min;
int rv;
- if (strncmp(buf, "system", 6)==0) {
+ if (strncmp(buf, "system", 6) == 0) {
min = 0;
} else {
rv = kstrtouint(buf, 10, &min);
@@ -5120,7 +5143,7 @@ static ssize_t
sync_max_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_max(mddev),
- mddev->sync_speed_max ? "local": "system");
+ mddev->sync_speed_max ? "local" : "system");
}
static ssize_t
@@ -5129,7 +5152,7 @@ sync_max_store(struct mddev *mddev, const char *buf, size_t len)
unsigned int max;
int rv;
- if (strncmp(buf, "system", 6)==0) {
+ if (strncmp(buf, "system", 6) == 0) {
max = 0;
} else {
rv = kstrtouint(buf, 10, &max);
@@ -5146,6 +5169,35 @@ static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
static ssize_t
+sync_io_depth_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%d (%s)\n", sync_io_depth(mddev),
+ mddev->sync_io_depth ? "local" : "system");
+}
+
+static ssize_t
+sync_io_depth_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ unsigned int max;
+ int rv;
+
+ if (strncmp(buf, "system", 6) == 0) {
+ max = 0;
+ } else {
+ rv = kstrtouint(buf, 10, &max);
+ if (rv < 0)
+ return rv;
+ if (max == 0)
+ return -EINVAL;
+ }
+ mddev->sync_io_depth = max;
+ return len;
+}
+
+static struct md_sysfs_entry md_sync_io_depth =
+__ATTR_RW(sync_io_depth);
+
+static ssize_t
degraded_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->degraded);
@@ -5671,6 +5723,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_mismatches.attr,
&md_sync_min.attr,
&md_sync_max.attr,
+ &md_sync_io_depth.attr,
&md_sync_speed.attr,
&md_sync_force_parallel.attr,
&md_sync_completed.attr,
@@ -8572,50 +8625,55 @@ void md_cluster_stop(struct mddev *mddev)
put_cluster_ops(mddev);
}
-static int is_mddev_idle(struct mddev *mddev, int init)
+static bool is_rdev_holder_idle(struct md_rdev *rdev, bool init)
{
+ unsigned long last_events = rdev->last_events;
+
+ if (!bdev_is_partition(rdev->bdev))
+ return true;
+
+ /*
+ * If rdev is partition, and user doesn't issue IO to the array, the
+ * array is still not idle if user issues IO to other partitions.
+ */
+ rdev->last_events = part_stat_read_accum(rdev->bdev->bd_disk->part0,
+ sectors) -
+ part_stat_read_accum(rdev->bdev, sectors);
+
+ return init || rdev->last_events <= last_events;
+}
+
+/*
+ * mddev is idle if following conditions are matched since last check:
+ * 1) mddev doesn't have normal IO completed;
+ * 2) mddev doesn't have inflight normal IO;
+ * 3) if any member disk is partition, and other partitions don't have IO
+ * completed;
+ *
+ * Noted this checking rely on IO accounting is enabled.
+ */
+static bool is_mddev_idle(struct mddev *mddev, int init)
+{
+ unsigned long last_events = mddev->normal_io_events;
+ struct gendisk *disk;
struct md_rdev *rdev;
- int idle;
- int curr_events;
+ bool idle = true;
- idle = 1;
- rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev) {
- struct gendisk *disk = rdev->bdev->bd_disk;
+ disk = mddev_is_dm(mddev) ? mddev->dm_gendisk : mddev->gendisk;
+ if (!disk)
+ return true;
- if (!init && !blk_queue_io_stat(disk->queue))
- continue;
+ mddev->normal_io_events = part_stat_read_accum(disk->part0, sectors);
+ if (!init && (mddev->normal_io_events > last_events ||
+ bdev_count_inflight(disk->part0)))
+ idle = false;
- curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
- atomic_read(&disk->sync_io);
- /* sync IO will cause sync_io to increase before the disk_stats
- * as sync_io is counted when a request starts, and
- * disk_stats is counted when it completes.
- * So resync activity will cause curr_events to be smaller than
- * when there was no such activity.
- * non-sync IO will cause disk_stat to increase without
- * increasing sync_io so curr_events will (eventually)
- * be larger than it was before. Once it becomes
- * substantially larger, the test below will cause
- * the array to appear non-idle, and resync will slow
- * down.
- * If there is a lot of outstanding resync activity when
- * we set last_event to curr_events, then all that activity
- * completing might cause the array to appear non-idle
- * and resync will be slowed down even though there might
- * not have been non-resync activity. This will only
- * happen once though. 'last_events' will soon reflect
- * the state where there is little or no outstanding
- * resync requests, and further resync activity will
- * always make curr_events less than last_events.
- *
- */
- if (init || curr_events - rdev->last_events > 64) {
- rdev->last_events = curr_events;
- idle = 0;
- }
- }
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (!is_rdev_holder_idle(rdev, init))
+ idle = false;
rcu_read_unlock();
+
return idle;
}
@@ -8927,6 +8985,23 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
}
}
+static bool sync_io_within_limit(struct mddev *mddev)
+{
+ int io_sectors;
+
+ /*
+ * For raid456, sync IO is stripe(4k) per IO, for other levels, it's
+ * RESYNC_PAGES(64k) per IO.
+ */
+ if (mddev->level == 4 || mddev->level == 5 || mddev->level == 6)
+ io_sectors = 8;
+ else
+ io_sectors = 128;
+
+ return atomic_read(&mddev->recovery_active) <
+ io_sectors * sync_io_depth(mddev);
+}
+
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
@@ -9195,7 +9270,8 @@ void md_do_sync(struct md_thread *thread)
msleep(500);
goto repeat;
}
- if (!is_mddev_idle(mddev, 0)) {
+ if (!sync_io_within_limit(mddev) &&
+ !is_mddev_idle(mddev, 0)) {
/*
* Give other IO more of a chance.
* The faster the devices, the less we wait.
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1cf00a04bcdd..d45a9e6ead80 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -132,7 +132,7 @@ struct md_rdev {
sector_t sectors; /* Device size (in 512bytes sectors) */
struct mddev *mddev; /* RAID array if running */
- int last_events; /* IO event timestamp */
+ unsigned long last_events; /* IO event timestamp */
/*
* If meta_bdev is non-NULL, it means that a separate device is
@@ -404,7 +404,8 @@ struct mddev {
* are happening, so run/
* takeover/stop are not safe
*/
- struct gendisk *gendisk;
+ struct gendisk *gendisk; /* mdraid gendisk */
+ struct gendisk *dm_gendisk; /* dm-raid gendisk */
struct kobject kobj;
int hold_active;
@@ -483,6 +484,7 @@ struct mddev {
/* if zero, use the system-wide default */
int sync_speed_min;
int sync_speed_max;
+ int sync_io_depth;
/* resync even though the same disks are shared among md-devices */
int parallel_resync;
@@ -518,6 +520,7 @@ struct mddev {
* adding a spare
*/
+ unsigned long normal_io_events; /* IO event timestamp */
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
sector_t recovery_cp;
@@ -714,17 +717,6 @@ static inline int mddev_trylock(struct mddev *mddev)
}
extern void mddev_unlock(struct mddev *mddev);
-static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
-{
- if (blk_queue_io_stat(bdev->bd_disk->queue))
- atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
-}
-
-static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
-{
- md_sync_acct(bio->bi_bdev, nr_sectors);
-}
-
struct md_personality
{
struct md_submodule_head head;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index de9bccbe7337..657d481525be 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2382,7 +2382,6 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
- md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
submit_bio_noacct(wbio);
}
@@ -3055,7 +3054,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
read_targets--;
- md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
submit_bio_noacct(bio);
@@ -3064,7 +3062,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
} else {
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
- md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
submit_bio_noacct(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ba32bac975b8..dce06bf65016 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2426,7 +2426,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
- md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
@@ -2448,8 +2447,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_copy_data(tbio, fbio);
d = r10_bio->devs[i].devnum;
atomic_inc(&r10_bio->remaining);
- md_sync_acct(conf->mirrors[d].replacement->bdev,
- bio_sectors(tbio));
submit_bio_noacct(tbio);
}
@@ -2583,13 +2580,10 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[1].devnum;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
submit_bio_noacct(wbio);
}
if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
- md_sync_acct(conf->mirrors[d].replacement->bdev,
- bio_sectors(wbio2));
submit_bio_noacct(wbio2);
}
}
@@ -3757,7 +3751,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sectors = nr_sectors;
if (bio->bi_end_io == end_sync_read) {
- md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0;
submit_bio_noacct(bio);
}
@@ -4880,7 +4873,6 @@ read_more:
r10_bio->sectors = nr_sectors;
/* Now submit the read */
- md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
submit_bio_noacct(read_bio);
@@ -4940,7 +4932,6 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue;
atomic_inc(&rdev->nr_pending);
- md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
submit_bio_noacct(b);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 6389383166c0..ca5b0e8ba707 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1240,10 +1240,6 @@ again:
}
if (rdev) {
- if (s->syncing || s->expanding || s->expanded
- || s->replacing)
- md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf));
-
set_bit(STRIPE_IO_STARTED, &sh->state);
bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
@@ -1300,10 +1296,6 @@ again:
submit_bio_noacct(bi);
}
if (rrdev) {
- if (s->syncing || s->expanding || s->expanded
- || s->replacing)
- md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf));
-
set_bit(STRIPE_IO_STARTED, &sh->state);
bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags);
diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index 12b73ea0f31d..419b9a7abcce 100644
--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
@@ -298,6 +298,7 @@ struct cec_dmi_match {
static const char *const port_b_conns[] = { "Port B", NULL };
static const char *const port_db_conns[] = { "Port D", "Port B", NULL };
static const char *const port_ba_conns[] = { "Port B", "Port A", NULL };
+static const char *const port_ab_conns[] = { "Port A", "Port B", NULL };
static const char *const port_d_conns[] = { "Port D", NULL };
static const struct cec_dmi_match cec_dmi_match_table[] = {
@@ -329,6 +330,10 @@ static const struct cec_dmi_match cec_dmi_match_table[] = {
{ "Google", "Dexi", "0000:00:02.0", port_db_conns },
/* Google Dita */
{ "Google", "Dita", "0000:00:02.0", port_db_conns },
+ /* Google Dirks */
+ { "Google", "Dirks", "0000:00:02.0", port_ab_conns },
+ /* Google Moxie */
+ { "Google", "Moxie", "0000:00:02.0", port_b_conns },
};
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
index cfbfc4c1b2e6..41d019b01ec0 100644
--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
@@ -1002,8 +1002,8 @@ static int extron_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct extron_port *port = cec_get_drvdata(adap);
- char buf[CEC_MAX_MSG_SIZE * 3 + 1];
- char cmd[CEC_MAX_MSG_SIZE * 3 + 13];
+ char buf[(CEC_MAX_MSG_SIZE - 1) * 3 + 1];
+ char cmd[sizeof(buf) + 14];
unsigned int i;
if (port->disconnected)
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index c6ddf2357c58..b3bf2173c14e 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -469,7 +469,7 @@ vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
struct vb2_dma_sg_buf *buf = dbuf->priv;
struct sg_table *sgt = buf->dma_sgt;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
return 0;
}
@@ -480,7 +480,7 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
struct vb2_dma_sg_buf *buf = dbuf->priv;
struct sg_table *sgt = buf->dma_sgt;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
return 0;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 9201d854dbcc..1cd26faee503 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -903,6 +903,11 @@ EXPORT_SYMBOL_GPL(vb2_expbuf);
int vb2_queue_init_name(struct vb2_queue *q, const char *name)
{
+ /* vb2_memory should match with v4l2_memory */
+ BUILD_BUG_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP);
+ BUILD_BUG_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR);
+ BUILD_BUG_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF);
+
/*
* Sanity check
*/
@@ -916,12 +921,6 @@ int vb2_queue_init_name(struct vb2_queue *q, const char *name)
WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
- /* Warn that vb2_memory should match with v4l2_memory */
- if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
- || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
- || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
- return -EINVAL;
-
if (q->buf_struct_size == 0)
q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index c5582d4fa5be..b40daf242046 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2630,7 +2630,7 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
dib7090_configMpegMux(state, 3, 1, 1);
dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else {/* Use Smooth block */
- dprintk("setting output mode TS_SERIAL using Smooth bloc\n");
+ dprintk("setting output mode TS_SERIAL using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (2<<6) | (0 << 1);
}
@@ -2654,7 +2654,7 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
outreg |= (1<<6);
break;
- case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux bloc */
+ case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux block */
dprintk("setting output mode TS_FIFO using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (5<<6);
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index cfe59c3255f7..d90f1b0b2051 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -1584,7 +1584,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
dib8096p_configMpegMux(state, 3, 1, 1);
dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else {/* Use Smooth block */
- dprintk("dib8096P setting output mode TS_SERIAL using Smooth bloc\n");
+ dprintk("dib8096P setting output mode TS_SERIAL using Smooth block\n");
dib8096p_setHostBusMux(state,
DEMOUT_ON_HOSTBUS);
outreg |= (2 << 6) | (0 << 1);
@@ -1612,7 +1612,8 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_FIFO:
/* Using Smooth block because not supported
- by new Mpeg Mux bloc */
+ * by new Mpeg Mux block
+ */
dprintk("dib8096P setting output mode TS_FIFO using Smooth block\n");
dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (5 << 6);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index e45ba127069f..e68202954a8f 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -217,6 +217,7 @@ config VIDEO_IMX319
config VIDEO_IMX334
tristate "Sony IMX334 sensor support"
depends on OF_GPIO
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX334 camera.
@@ -356,6 +357,26 @@ config VIDEO_OV02A10
To compile this driver as a module, choose M here: the
module will be called ov02a10.
+config VIDEO_OV02E10
+ tristate "OmniVision OV02E10 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV02E10 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov02e10.
+
+config VIDEO_OV02C10
+ tristate "OmniVision OV02C10 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV02C10 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov02c10.
+
config VIDEO_OV08D10
tristate "OmniVision OV08D10 sensor support"
help
@@ -691,6 +712,28 @@ config VIDEO_S5K6A3
This is a V4L2 sensor driver for Samsung S5K6A3 raw
camera sensor.
+config VIDEO_VD55G1
+ tristate "ST VD55G1 sensor support"
+ select V4L2_CCI_I2C
+ depends on GPIOLIB
+ help
+ This is a Video4Linux2 sensor driver for the ST VD55G1
+ camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vd55g1.
+
+config VIDEO_VD56G3
+ tristate "ST VD56G3 sensor support"
+ select V4L2_CCI_I2C
+ depends on GPIOLIB
+ help
+ This is a Video4Linux2 sensor driver for the ST VD56G3
+ camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vd56g3.
+
config VIDEO_VGXY61
tristate "ST VGXY61 sensor support"
select V4L2_CCI_I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 6c23a4463527..5873d29433ee 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -83,6 +83,8 @@ obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o
obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o
obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o
+obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o
+obj-$(CONFIG_VIDEO_OV02E10) += ov02e10.o
obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o
obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o
obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
@@ -153,6 +155,8 @@ obj-$(CONFIG_VIDEO_TW9910) += tw9910.o
obj-$(CONFIG_VIDEO_UDA1342) += uda1342.o
obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
+obj-$(CONFIG_VIDEO_VD55G1) += vd55g1.o
+obj-$(CONFIG_VIDEO_VD56G3) += vd56g3.o
obj-$(CONFIG_VIDEO_VGXY61) += vgxy61.o
obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index f95a99d85360..853c7806de92 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1370,9 +1370,9 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd,
case V4L2_COLORSPACE_BT2020:
c = HDMI_COLORIMETRY_EXTENDED;
if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
- ec = 5; /* Not yet available in hdmi.h */
+ ec = HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM;
else
- ec = 6; /* Not yet available in hdmi.h */
+ ec = HDMI_EXTENDED_COLORIMETRY_BT2020;
break;
default:
break;
diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c
index 34ccda666524..4eb83636e102 100644
--- a/drivers/media/i2c/ccs-pll.c
+++ b/drivers/media/i2c/ccs-pll.c
@@ -123,10 +123,15 @@ static void print_pll(struct device *dev, const struct ccs_pll *pll)
pll->pixel_rate_pixel_array);
dev_dbg(dev, "pixel rate on CSI-2 bus:\t%u\n",
pll->pixel_rate_csi);
+}
- dev_dbg(dev, "flags%s%s%s%s%s%s%s%s%s\n",
+static void print_pll_flags(struct device *dev, struct ccs_pll *pll)
+{
+ dev_dbg(dev, "PLL flags%s%s%s%s%s%s%s%s%s%s%s\n",
+ pll->flags & PLL_FL(OP_PIX_CLOCK_PER_LANE) ? " op-pix-clock-per-lane" : "",
+ pll->flags & PLL_FL(EVEN_PLL_MULTIPLIER) ? " even-pll-multiplier" : "",
+ pll->flags & PLL_FL(NO_OP_CLOCKS) ? " no-op-clocks" : "",
pll->flags & PLL_FL(LANE_SPEED_MODEL) ? " lane-speed" : "",
- pll->flags & PLL_FL(LINK_DECOUPLED) ? " link-decoupled" : "",
pll->flags & PLL_FL(EXT_IP_PLL_DIVIDER) ?
" ext-ip-pll-divider" : "",
pll->flags & PLL_FL(FLEXIBLE_OP_PIX_CLK_DIV) ?
@@ -311,14 +316,24 @@ __ccs_pll_calculate_vt_tree(struct device *dev,
more_mul *= DIV_ROUND_UP(lim_fr->min_pll_multiplier, mul * more_mul);
dev_dbg(dev, "more_mul2: %u\n", more_mul);
- pll_fr->pll_multiplier = mul * more_mul;
+ if (pll->flags & CCS_PLL_FLAG_EVEN_PLL_MULTIPLIER &&
+ (mul & 1) && (more_mul & 1))
+ more_mul <<= 1;
- if (pll_fr->pll_multiplier * pll_fr->pll_ip_clk_freq_hz >
- lim_fr->max_pll_op_clk_freq_hz)
+ pll_fr->pll_multiplier = mul * more_mul;
+ if (pll_fr->pll_multiplier > lim_fr->max_pll_multiplier) {
+ dev_dbg(dev, "pll multiplier %u too high\n",
+ pll_fr->pll_multiplier);
return -EINVAL;
+ }
pll_fr->pll_op_clk_freq_hz =
pll_fr->pll_ip_clk_freq_hz * pll_fr->pll_multiplier;
+ if (pll_fr->pll_op_clk_freq_hz > lim_fr->max_pll_op_clk_freq_hz) {
+ dev_dbg(dev, "too high OP clock %u\n",
+ pll_fr->pll_op_clk_freq_hz);
+ return -EINVAL;
+ }
vt_div = div * more_mul;
@@ -397,6 +412,8 @@ static int ccs_pll_calculate_vt_tree(struct device *dev,
min_pre_pll_clk_div = max_t(u16, min_pre_pll_clk_div,
pll->ext_clk_freq_hz /
lim_fr->max_pll_ip_clk_freq_hz);
+ if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER))
+ min_pre_pll_clk_div = clk_div_even(min_pre_pll_clk_div);
dev_dbg(dev, "vt min/max_pre_pll_clk_div: %u,%u\n",
min_pre_pll_clk_div, max_pre_pll_clk_div);
@@ -432,10 +449,11 @@ static int ccs_pll_calculate_vt_tree(struct device *dev,
return 0;
}
+ dev_dbg(dev, "unable to compute VT pre_pll divisor\n");
return -EINVAL;
}
-static void
+static int
ccs_pll_calculate_vt(struct device *dev, const struct ccs_pll_limits *lim,
const struct ccs_pll_branch_limits_bk *op_lim_bk,
struct ccs_pll *pll, struct ccs_pll_branch_fr *pll_fr,
@@ -558,6 +576,8 @@ ccs_pll_calculate_vt(struct device *dev, const struct ccs_pll_limits *lim,
if (best_pix_div < SHRT_MAX >> 1)
break;
}
+ if (best_pix_div == SHRT_MAX >> 1)
+ return -EINVAL;
pll->vt_bk.sys_clk_div = DIV_ROUND_UP(vt_div, best_pix_div);
pll->vt_bk.pix_clk_div = best_pix_div;
@@ -570,6 +590,8 @@ ccs_pll_calculate_vt(struct device *dev, const struct ccs_pll_limits *lim,
out_calc_pixel_rate:
pll->pixel_rate_pixel_array =
pll->vt_bk.pix_clk_freq_hz * pll->vt_lanes;
+
+ return 0;
}
/*
@@ -659,6 +681,10 @@ ccs_pll_calculate_op(struct device *dev, const struct ccs_pll_limits *lim,
if (!is_one_or_even(i))
i <<= 1;
+ if (pll->flags & CCS_PLL_FLAG_EVEN_PLL_MULTIPLIER &&
+ mul & 1 && i & 1)
+ i <<= 1;
+
dev_dbg(dev, "final more_mul: %u\n", i);
if (i > more_mul_max) {
dev_dbg(dev, "final more_mul is bad, max %u\n", more_mul_max);
@@ -716,6 +742,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
u32 i;
int rval = -EINVAL;
+ print_pll_flags(dev, pll);
+
if (!(pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL)) {
pll->op_lanes = 1;
pll->vt_lanes = 1;
@@ -792,7 +820,7 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
op_lim_fr->min_pre_pll_clk_div, op_lim_fr->max_pre_pll_clk_div);
max_op_pre_pll_clk_div =
min_t(u16, op_lim_fr->max_pre_pll_clk_div,
- clk_div_even(pll->ext_clk_freq_hz /
+ DIV_ROUND_UP(pll->ext_clk_freq_hz,
op_lim_fr->min_pll_ip_clk_freq_hz));
min_op_pre_pll_clk_div =
max_t(u16, op_lim_fr->min_pre_pll_clk_div,
@@ -815,6 +843,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
one_or_more(
DIV_ROUND_UP(op_lim_fr->max_pll_op_clk_freq_hz,
pll->ext_clk_freq_hz))));
+ if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER))
+ min_op_pre_pll_clk_div = clk_div_even(min_op_pre_pll_clk_div);
dev_dbg(dev, "pll_op check: min / max op_pre_pll_clk_div: %u / %u\n",
min_op_pre_pll_clk_div, max_op_pre_pll_clk_div);
@@ -843,8 +873,10 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
if (pll->flags & CCS_PLL_FLAG_DUAL_PLL)
break;
- ccs_pll_calculate_vt(dev, lim, op_lim_bk, pll, op_pll_fr,
- op_pll_bk, cphy, phy_const);
+ rval = ccs_pll_calculate_vt(dev, lim, op_lim_bk, pll, op_pll_fr,
+ op_pll_bk, cphy, phy_const);
+ if (rval)
+ continue;
rval = check_bk_bounds(dev, lim, pll, PLL_VT);
if (rval)
@@ -857,8 +889,7 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
}
if (rval) {
- dev_dbg(dev, "unable to compute pre_pll divisor\n");
-
+ dev_dbg(dev, "unable to compute OP pre_pll divisor\n");
return rval;
}
diff --git a/drivers/media/i2c/ccs-pll.h b/drivers/media/i2c/ccs-pll.h
index 6eb1b1c68e1e..e22903931e72 100644
--- a/drivers/media/i2c/ccs-pll.h
+++ b/drivers/media/i2c/ccs-pll.h
@@ -18,19 +18,40 @@
#define CCS_PLL_BUS_TYPE_CSI2_DPHY 0x00
#define CCS_PLL_BUS_TYPE_CSI2_CPHY 0x01
-/* Old SMIA and implementation specific flags */
-/* op pix clock is for all lanes in total normally */
+/* Old SMIA and implementation specific flags. */
+/* OP PIX clock is for all lanes in total normally. */
#define CCS_PLL_FLAG_OP_PIX_CLOCK_PER_LANE BIT(0)
-#define CCS_PLL_FLAG_NO_OP_CLOCKS BIT(1)
+/* If set, the PLL multipliers are required to be even. */
+#define CCS_PLL_FLAG_EVEN_PLL_MULTIPLIER BIT(3)
+
/* CCS PLL flags */
+
+/* The sensor doesn't have OP clocks at all. */
+#define CCS_PLL_FLAG_NO_OP_CLOCKS BIT(1)
+/* System speed model if this flag is unset. */
#define CCS_PLL_FLAG_LANE_SPEED_MODEL BIT(2)
-#define CCS_PLL_FLAG_LINK_DECOUPLED BIT(3)
+/* If set, the pre-PLL divider may have odd values, too. */
#define CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER BIT(4)
+/*
+ * If set, the OP PIX clock doesn't have to exactly match with data rate, it may
+ * be higher. See "OP Domain Formulas" in MIPI CCS 1.1 spec.
+ */
#define CCS_PLL_FLAG_FLEXIBLE_OP_PIX_CLK_DIV BIT(5)
+/* If set, the VT domain may run faster than the OP domain. */
#define CCS_PLL_FLAG_FIFO_DERATING BIT(6)
+/* If set, the VT domain may run slower than the OP domain. */
#define CCS_PLL_FLAG_FIFO_OVERRATING BIT(7)
+/* If set, the PLL tree has two PLLs instead of one. */
#define CCS_PLL_FLAG_DUAL_PLL BIT(8)
+/*
+ * If set, the OP SYS clock is a dual data rate clock, transferring two bits per
+ * cycle instead of one.
+ */
#define CCS_PLL_FLAG_OP_SYS_DDR BIT(9)
+/*
+ * If set, the OP PIX clock is a dual data rate clock, transferring two pixels
+ * per cycle instead of one.
+ */
#define CCS_PLL_FLAG_OP_PIX_DDR BIT(10)
/**
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 004d28c33287..487bcabb4a19 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -1354,8 +1354,10 @@ static int ccs_change_cci_addr(struct ccs_sensor *sensor)
client->addr = sensor->hwcfg.i2c_addr_dfl;
- rval = ccs_write(sensor, CCI_ADDRESS_CTRL,
- sensor->hwcfg.i2c_addr_alt << 1);
+ rval = read_poll_timeout(ccs_write, rval, !rval, CCS_RESET_DELAY_US,
+ CCS_RESET_TIMEOUT_US, false, sensor,
+ CCI_ADDRESS_CTRL,
+ sensor->hwcfg.i2c_addr_alt << 1);
if (rval)
return rval;
@@ -1575,44 +1577,38 @@ static int ccs_power_on(struct device *dev)
if (ccsdev->flags & CCS_DEVICE_FLAG_IS_SMIA)
sleep = SMIAPP_RESET_DELAY(sensor->hwcfg.ext_clk);
else
- sleep = 5000;
+ sleep = CCS_RESET_DELAY_US;
usleep_range(sleep, sleep);
}
/*
- * Failures to respond to the address change command have been noticed.
- * Those failures seem to be caused by the sensor requiring a longer
- * boot time than advertised. An additional 10ms delay seems to work
- * around the issue, but the SMIA++ I2C write retry hack makes the delay
- * unnecessary. The failures need to be investigated to find a proper
- * fix, and a delay will likely need to be added here if the I2C write
- * retry hack is reverted before the root cause of the boot time issue
- * is found.
+ * Some devices take longer than the spec-defined time to respond
+ * after reset. Try until some time has passed before flagging it
+ * an error.
*/
-
if (!sensor->reset && !sensor->xshutdown) {
- u8 retry = 100;
u32 reset;
- rval = ccs_write(sensor, SOFTWARE_RESET, CCS_SOFTWARE_RESET_ON);
+ rval = read_poll_timeout(ccs_write, rval, !rval,
+ CCS_RESET_DELAY_US,
+ CCS_RESET_TIMEOUT_US,
+ false, sensor, SOFTWARE_RESET,
+ CCS_SOFTWARE_RESET_ON);
if (rval < 0) {
dev_err(dev, "software reset failed\n");
goto out_cci_addr_fail;
}
- do {
- rval = ccs_read(sensor, SOFTWARE_RESET, &reset);
- reset = !rval && reset == CCS_SOFTWARE_RESET_OFF;
- if (reset)
- break;
-
- usleep_range(1000, 2000);
- } while (--retry);
-
- if (!reset) {
- dev_err(dev, "software reset failed\n");
- rval = -EIO;
+ rval = read_poll_timeout(ccs_read, rval,
+ !rval &&
+ reset == CCS_SOFTWARE_RESET_OFF,
+ CCS_RESET_DELAY_US,
+ CCS_RESET_TIMEOUT_US, false, sensor,
+ SOFTWARE_RESET, &reset);
+ if (rval < 0) {
+ dev_err_probe(dev, rval,
+ "failed to respond after reset\n");
goto out_cci_addr_fail;
}
}
@@ -2857,10 +2853,6 @@ static int ccs_identify_module(struct ccs_sensor *sensor)
break;
}
- if (i >= ARRAY_SIZE(ccs_module_idents))
- dev_warn(&client->dev,
- "no quirks for this module; let's hope it's fully compliant\n");
-
dev_dbg(&client->dev, "the sensor is called %s\n", minfo->name);
return 0;
@@ -3131,8 +3123,6 @@ static int ccs_get_hwconfig(struct ccs_sensor *sensor, struct device *dev)
rval = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&hwcfg->ext_clk);
- if (rval)
- dev_info(dev, "can't get clock-frequency\n");
dev_dbg(dev, "clk %u, mode %u\n", hwcfg->ext_clk,
hwcfg->csi_signalling_mode);
@@ -3451,7 +3441,6 @@ static int ccs_probe(struct i2c_client *client)
CCS_LIM(sensor, NUM_OF_VT_LANES) + 1;
sensor->pll.op_lanes =
CCS_LIM(sensor, NUM_OF_OP_LANES) + 1;
- sensor->pll.flags |= CCS_PLL_FLAG_LINK_DECOUPLED;
} else {
sensor->pll.vt_lanes = sensor->pll.csi2.lanes;
sensor->pll.op_lanes = sensor->pll.csi2.lanes;
diff --git a/drivers/media/i2c/ccs/ccs-quirk.c b/drivers/media/i2c/ccs/ccs-quirk.c
index e3d4c7a275bc..e48a4fa1f5dd 100644
--- a/drivers/media/i2c/ccs/ccs-quirk.c
+++ b/drivers/media/i2c/ccs/ccs-quirk.c
@@ -190,8 +190,7 @@ static int jt8ev1_post_streamoff(struct ccs_sensor *sensor)
static int jt8ev1_init(struct ccs_sensor *sensor)
{
- sensor->pll.flags |= CCS_PLL_FLAG_LANE_SPEED_MODEL |
- CCS_PLL_FLAG_LINK_DECOUPLED;
+ sensor->pll.flags |= CCS_PLL_FLAG_LANE_SPEED_MODEL;
sensor->pll.vt_lanes = 1;
sensor->pll.op_lanes = sensor->pll.csi2.lanes;
diff --git a/drivers/media/i2c/ccs/ccs-reg-access.c b/drivers/media/i2c/ccs/ccs-reg-access.c
index a696a0ec8ff5..fd36889ccc1d 100644
--- a/drivers/media/i2c/ccs/ccs-reg-access.c
+++ b/drivers/media/i2c/ccs/ccs-reg-access.c
@@ -210,7 +210,6 @@ int ccs_read_addr_noconv(struct ccs_sensor *sensor, u32 reg, u32 *val)
*/
int ccs_write_addr(struct ccs_sensor *sensor, u32 reg, u32 val)
{
- unsigned int retries = 10;
int rval;
rval = ccs_call_quirk(sensor, reg_access, true, &reg, &val);
@@ -219,13 +218,7 @@ int ccs_write_addr(struct ccs_sensor *sensor, u32 reg, u32 val)
if (rval < 0)
return rval;
- rval = 0;
- do {
- if (cci_write(sensor->regmap, reg, val, &rval))
- fsleep(1000);
- } while (rval && --retries);
-
- return rval;
+ return cci_write(sensor->regmap, reg, val, NULL);
}
#define MAX_WRITE_LEN 32U
diff --git a/drivers/media/i2c/ccs/ccs.h b/drivers/media/i2c/ccs/ccs.h
index 096573845a10..0726c4687f0f 100644
--- a/drivers/media/i2c/ccs/ccs.h
+++ b/drivers/media/i2c/ccs/ccs.h
@@ -43,6 +43,8 @@
#define SMIAPP_RESET_DELAY(clk) \
(1000 + (SMIAPP_RESET_DELAY_CLOCKS * 1000 \
+ (clk) / 1000 - 1) / ((clk) / 1000))
+#define CCS_RESET_DELAY_US 5000
+#define CCS_RESET_TIMEOUT_US 1000000
#define CCS_COLOUR_COMPONENTS 4
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index fd2d2d5272bf..6d3f8617ef13 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -12,7 +12,6 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/driver.h>
#include <linux/i2c-atr.h>
#include <linux/i2c.h>
@@ -119,44 +118,66 @@ static const struct ub913_format_info *ub913_find_format(u32 incode)
return NULL;
}
-static int ub913_read(const struct ub913_data *priv, u8 reg, u8 *val)
+static int ub913_read(const struct ub913_data *priv, u8 reg, u8 *val,
+ int *err)
{
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
ret = regmap_read(priv->regmap, reg, &v);
- if (ret < 0) {
+ if (ret) {
dev_err(&priv->client->dev,
"Cannot read register 0x%02x: %d!\n", reg, ret);
- return ret;
+ goto out;
}
*val = v;
- return 0;
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
}
-static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val)
+static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val,
+ int *err)
{
int ret;
+ if (err && *err)
+ return *err;
+
ret = regmap_write(priv->regmap, reg, val);
if (ret < 0)
dev_err(&priv->client->dev,
"Cannot write register 0x%02x: %d!\n", reg, ret);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub913_update_bits(const struct ub913_data *priv, u8 reg, u8 mask,
- u8 val)
+ u8 val, int *err)
{
int ret;
+ if (err && *err)
+ return *err;
+
ret = regmap_update_bits(priv->regmap, reg, mask, val);
if (ret < 0)
dev_err(&priv->client->dev,
"Cannot update register 0x%02x %d!\n", reg, ret);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -204,7 +225,7 @@ static int ub913_gpiochip_probe(struct ub913_data *priv)
int ret;
/* Initialize GPIOs 0 and 1 to local control, tri-state */
- ub913_write(priv, UB913_REG_GPIO_CFG(0), 0);
+ ub913_write(priv, UB913_REG_GPIO_CFG(0), 0, NULL);
gc->label = dev_name(dev);
gc->parent = dev;
@@ -450,10 +471,10 @@ static int ub913_set_fmt(struct v4l2_subdev *sd,
if (!fmt)
return -EINVAL;
- format->format.code = finfo->outcode;
-
*fmt = format->format;
+ fmt->code = finfo->outcode;
+
return 0;
}
@@ -482,25 +503,41 @@ static int ub913_log_status(struct v4l2_subdev *sd)
{
struct ub913_data *priv = sd_to_ub913(sd);
struct device *dev = &priv->client->dev;
- u8 v = 0, v1 = 0, v2 = 0;
+ u8 v, v1, v2;
+ int ret;
+
+ ret = ub913_read(priv, UB913_REG_MODE_SEL, &v, NULL);
+ if (ret)
+ return ret;
- ub913_read(priv, UB913_REG_MODE_SEL, &v);
dev_info(dev, "MODE_SEL %#02x\n", v);
- ub913_read(priv, UB913_REG_CRC_ERRORS_LSB, &v1);
- ub913_read(priv, UB913_REG_CRC_ERRORS_MSB, &v2);
+ ub913_read(priv, UB913_REG_CRC_ERRORS_LSB, &v1, &ret);
+ ub913_read(priv, UB913_REG_CRC_ERRORS_MSB, &v2, &ret);
+ if (ret)
+ return ret;
+
dev_info(dev, "CRC errors %u\n", v1 | (v2 << 8));
/* clear CRC errors */
- ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
+ ub913_read(priv, UB913_REG_GENERAL_CFG, &v, &ret);
ub913_write(priv, UB913_REG_GENERAL_CFG,
- v | UB913_REG_GENERAL_CFG_CRC_ERR_RESET);
- ub913_write(priv, UB913_REG_GENERAL_CFG, v);
+ v | UB913_REG_GENERAL_CFG_CRC_ERR_RESET, &ret);
+ ub913_write(priv, UB913_REG_GENERAL_CFG, v, &ret);
+
+ if (ret)
+ return ret;
+
+ ret = ub913_read(priv, UB913_REG_GENERAL_STATUS, &v, NULL);
+ if (ret)
+ return ret;
- ub913_read(priv, UB913_REG_GENERAL_STATUS, &v);
dev_info(dev, "GENERAL_STATUS %#02x\n", v);
- ub913_read(priv, UB913_REG_PLL_OVR, &v);
+ ret = ub913_read(priv, UB913_REG_PLL_OVR, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "PLL_OVR %#02x\n", v);
return 0;
@@ -656,11 +693,11 @@ static int ub913_i2c_master_init(struct ub913_data *priv)
scl_high = div64_u64((u64)scl_high * ref, 1000000000);
scl_low = div64_u64((u64)scl_low * ref, 1000000000);
- ret = ub913_write(priv, UB913_REG_SCL_HIGH_TIME, scl_high);
+ ret = ub913_write(priv, UB913_REG_SCL_HIGH_TIME, scl_high, NULL);
if (ret)
return ret;
- ret = ub913_write(priv, UB913_REG_SCL_LOW_TIME, scl_low);
+ ret = ub913_write(priv, UB913_REG_SCL_LOW_TIME, scl_low, NULL);
if (ret)
return ret;
@@ -670,6 +707,7 @@ static int ub913_i2c_master_init(struct ub913_data *priv)
static int ub913_add_i2c_adapter(struct ub913_data *priv)
{
struct device *dev = &priv->client->dev;
+ struct i2c_atr_adap_desc desc = { };
struct fwnode_handle *i2c_handle;
int ret;
@@ -677,8 +715,12 @@ static int ub913_add_i2c_adapter(struct ub913_data *priv)
if (!i2c_handle)
return 0;
- ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port,
- dev, i2c_handle);
+ desc.chan_id = priv->plat_data->port;
+ desc.parent = dev;
+ desc.bus_handle = i2c_handle;
+ desc.num_aliases = 0;
+
+ ret = i2c_atr_add_adapter(priv->plat_data->atr, &desc);
fwnode_handle_put(i2c_handle);
@@ -729,7 +771,7 @@ static int ub913_hw_init(struct ub913_data *priv)
int ret;
u8 v;
- ret = ub913_read(priv, UB913_REG_MODE_SEL, &v);
+ ret = ub913_read(priv, UB913_REG_MODE_SEL, &v, NULL);
if (ret)
return ret;
@@ -750,7 +792,7 @@ static int ub913_hw_init(struct ub913_data *priv)
ret = ub913_update_bits(priv, UB913_REG_GENERAL_CFG,
UB913_REG_GENERAL_CFG_PCLK_RISING,
FIELD_PREP(UB913_REG_GENERAL_CFG_PCLK_RISING,
- priv->pclk_polarity_rising));
+ priv->pclk_polarity_rising), NULL);
if (ret)
return ret;
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index 46569381b332..59bd92388845 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -11,7 +11,6 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/driver.h>
#include <linux/i2c-atr.h>
#include <linux/i2c.h>
@@ -28,6 +27,8 @@
#include <media/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
+#include "ds90ub953.h"
+
#define UB953_PAD_SINK 0
#define UB953_PAD_SOURCE 1
@@ -35,89 +36,6 @@
#define UB953_DEFAULT_CLKOUT_RATE 25000000UL
-#define UB953_REG_RESET_CTL 0x01
-#define UB953_REG_RESET_CTL_DIGITAL_RESET_1 BIT(1)
-#define UB953_REG_RESET_CTL_DIGITAL_RESET_0 BIT(0)
-
-#define UB953_REG_GENERAL_CFG 0x02
-#define UB953_REG_GENERAL_CFG_CONT_CLK BIT(6)
-#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT 4
-#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_MASK GENMASK(5, 4)
-#define UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE BIT(1)
-#define UB953_REG_GENERAL_CFG_I2C_STRAP_MODE BIT(0)
-
-#define UB953_REG_MODE_SEL 0x03
-#define UB953_REG_MODE_SEL_MODE_DONE BIT(3)
-#define UB953_REG_MODE_SEL_MODE_OVERRIDE BIT(4)
-#define UB953_REG_MODE_SEL_MODE_MASK GENMASK(2, 0)
-
-#define UB953_REG_CLKOUT_CTRL0 0x06
-#define UB953_REG_CLKOUT_CTRL1 0x07
-
-#define UB953_REG_SCL_HIGH_TIME 0x0b
-#define UB953_REG_SCL_LOW_TIME 0x0c
-
-#define UB953_REG_LOCAL_GPIO_DATA 0x0d
-#define UB953_REG_LOCAL_GPIO_DATA_GPIO_RMTEN(n) BIT(4 + (n))
-#define UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(n) BIT(0 + (n))
-
-#define UB953_REG_GPIO_INPUT_CTRL 0x0e
-#define UB953_REG_GPIO_INPUT_CTRL_OUT_EN(n) BIT(4 + (n))
-#define UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(n) BIT(0 + (n))
-
-#define UB953_REG_BC_CTRL 0x49
-#define UB953_REG_BC_CTRL_CRC_ERR_CLR BIT(3)
-
-#define UB953_REG_REV_MASK_ID 0x50
-#define UB953_REG_GENERAL_STATUS 0x52
-
-#define UB953_REG_GPIO_PIN_STS 0x53
-#define UB953_REG_GPIO_PIN_STS_GPIO_STS(n) BIT(0 + (n))
-
-#define UB953_REG_BIST_ERR_CNT 0x54
-#define UB953_REG_CRC_ERR_CNT1 0x55
-#define UB953_REG_CRC_ERR_CNT2 0x56
-
-#define UB953_REG_CSI_ERR_CNT 0x5c
-#define UB953_REG_CSI_ERR_STATUS 0x5d
-#define UB953_REG_CSI_ERR_DLANE01 0x5e
-#define UB953_REG_CSI_ERR_DLANE23 0x5f
-#define UB953_REG_CSI_ERR_CLK_LANE 0x60
-#define UB953_REG_CSI_PKT_HDR_VC_ID 0x61
-#define UB953_REG_PKT_HDR_WC_LSB 0x62
-#define UB953_REG_PKT_HDR_WC_MSB 0x63
-#define UB953_REG_CSI_ECC 0x64
-
-#define UB953_REG_IND_ACC_CTL 0xb0
-#define UB953_REG_IND_ACC_ADDR 0xb1
-#define UB953_REG_IND_ACC_DATA 0xb2
-
-#define UB953_REG_FPD3_RX_ID(n) (0xf0 + (n))
-#define UB953_REG_FPD3_RX_ID_LEN 6
-
-/* Indirect register blocks */
-#define UB953_IND_TARGET_PAT_GEN 0x00
-#define UB953_IND_TARGET_FPD3_TX 0x01
-#define UB953_IND_TARGET_DIE_ID 0x02
-
-#define UB953_IND_PGEN_CTL 0x01
-#define UB953_IND_PGEN_CTL_PGEN_ENABLE BIT(0)
-#define UB953_IND_PGEN_CFG 0x02
-#define UB953_IND_PGEN_CSI_DI 0x03
-#define UB953_IND_PGEN_LINE_SIZE1 0x04
-#define UB953_IND_PGEN_LINE_SIZE0 0x05
-#define UB953_IND_PGEN_BAR_SIZE1 0x06
-#define UB953_IND_PGEN_BAR_SIZE0 0x07
-#define UB953_IND_PGEN_ACT_LPF1 0x08
-#define UB953_IND_PGEN_ACT_LPF0 0x09
-#define UB953_IND_PGEN_TOT_LPF1 0x0a
-#define UB953_IND_PGEN_TOT_LPF0 0x0b
-#define UB953_IND_PGEN_LINE_PD1 0x0c
-#define UB953_IND_PGEN_LINE_PD0 0x0d
-#define UB953_IND_PGEN_VBP 0x0e
-#define UB953_IND_PGEN_VFP 0x0f
-#define UB953_IND_PGEN_COLOR(n) (0x10 + (n)) /* n <= 15 */
-
/* Note: Only sync mode supported for now */
enum ub953_mode {
/* FPD-Link III CSI-2 synchronous mode */
@@ -185,11 +103,14 @@ static inline struct ub953_data *sd_to_ub953(struct v4l2_subdev *sd)
* HW Access
*/
-static int ub953_read(struct ub953_data *priv, u8 reg, u8 *val)
+static int ub953_read(struct ub953_data *priv, u8 reg, u8 *val, int *err)
{
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_read(priv->regmap, reg, &v);
@@ -204,13 +125,19 @@ static int ub953_read(struct ub953_data *priv, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub953_write(struct ub953_data *priv, u8 reg, u8 val)
+static int ub953_write(struct ub953_data *priv, u8 reg, u8 val, int *err)
{
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_write(priv->regmap, reg, val);
@@ -220,6 +147,9 @@ static int ub953_write(struct ub953_data *priv, u8 reg, u8 val)
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -244,11 +174,15 @@ static int ub953_select_ind_reg_block(struct ub953_data *priv, u8 block)
}
__maybe_unused
-static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val)
+static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val,
+ int *err)
{
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub953_select_ind_reg_block(priv, block);
@@ -258,7 +192,7 @@ static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val)
ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_ADDR, reg);
if (ret) {
dev_err(&priv->client->dev,
- "Write to IND_ACC_ADDR failed when reading %u:%x02x: %d\n",
+ "Write to IND_ACC_ADDR failed when reading %u:0x%02x: %d\n",
block, reg, ret);
goto out_unlock;
}
@@ -266,7 +200,7 @@ static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val)
ret = regmap_read(priv->regmap, UB953_REG_IND_ACC_DATA, &v);
if (ret) {
dev_err(&priv->client->dev,
- "Write to IND_ACC_DATA failed when reading %u:%x02x: %d\n",
+ "Write to IND_ACC_DATA failed when reading %u:0x%02x: %d\n",
block, reg, ret);
goto out_unlock;
}
@@ -276,14 +210,21 @@ static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
__maybe_unused
-static int ub953_write_ind(struct ub953_data *priv, u8 block, u8 reg, u8 val)
+static int ub953_write_ind(struct ub953_data *priv, u8 block, u8 reg, u8 val,
+ int *err)
{
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub953_select_ind_reg_block(priv, block);
@@ -293,7 +234,7 @@ static int ub953_write_ind(struct ub953_data *priv, u8 block, u8 reg, u8 val)
ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_ADDR, reg);
if (ret) {
dev_err(&priv->client->dev,
- "Write to IND_ACC_ADDR failed when writing %u:%x02x: %d\n",
+ "Write to IND_ACC_ADDR failed when writing %u:0x%02x: %d\n",
block, reg, ret);
goto out_unlock;
}
@@ -301,13 +242,16 @@ static int ub953_write_ind(struct ub953_data *priv, u8 block, u8 reg, u8 val)
ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_DATA, val);
if (ret) {
dev_err(&priv->client->dev,
- "Write to IND_ACC_DATA failed when writing %u:%x02x\n: %d\n",
+ "Write to IND_ACC_DATA failed when writing %u:0x%02x: %d\n",
block, reg, ret);
}
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -320,7 +264,7 @@ static int ub953_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
int ret;
u8 v;
- ret = ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &v);
+ ret = ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &v, NULL);
if (ret)
return ret;
@@ -366,7 +310,7 @@ static int ub953_gpio_get(struct gpio_chip *gc, unsigned int offset)
int ret;
u8 v;
- ret = ub953_read(priv, UB953_REG_GPIO_PIN_STS, &v);
+ ret = ub953_read(priv, UB953_REG_GPIO_PIN_STS, &v, NULL);
if (ret)
return ret;
@@ -400,11 +344,11 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
int ret;
/* Set all GPIOs to local input mode */
- ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
+ ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0, NULL);
if (ret)
return ret;
- ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
+ ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf, NULL);
if (ret)
return ret;
@@ -607,23 +551,33 @@ static int ub953_log_status(struct v4l2_subdev *sd)
{
struct ub953_data *priv = sd_to_ub953(sd);
struct device *dev = &priv->client->dev;
- u8 v = 0, v1 = 0, v2 = 0;
- unsigned int i;
char id[UB953_REG_FPD3_RX_ID_LEN];
- u8 gpio_local_data = 0;
- u8 gpio_input_ctrl = 0;
- u8 gpio_pin_sts = 0;
+ u8 gpio_local_data;
+ u8 gpio_input_ctrl;
+ u8 gpio_pin_sts;
+ unsigned int i;
+ u8 v, v1, v2;
+ int ret;
- for (i = 0; i < sizeof(id); i++)
- ub953_read(priv, UB953_REG_FPD3_RX_ID(i), &id[i]);
+ for (i = 0; i < sizeof(id); i++) {
+ ret = ub953_read(priv, UB953_REG_FPD3_RX_ID(i), &id[i], NULL);
+ if (ret)
+ return ret;
+ }
dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
- ub953_read(priv, UB953_REG_GENERAL_STATUS, &v);
+ ret = ub953_read(priv, UB953_REG_GENERAL_STATUS, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "GENERAL_STATUS %#02x\n", v);
- ub953_read(priv, UB953_REG_CRC_ERR_CNT1, &v1);
- ub953_read(priv, UB953_REG_CRC_ERR_CNT2, &v2);
+ ub953_read(priv, UB953_REG_CRC_ERR_CNT1, &v1, &ret);
+ ub953_read(priv, UB953_REG_CRC_ERR_CNT2, &v2, &ret);
+ if (ret)
+ return ret;
+
dev_info(dev, "CRC error count %u\n", v1 | (v2 << 8));
/* Clear CRC error counter */
@@ -632,34 +586,60 @@ static int ub953_log_status(struct v4l2_subdev *sd)
UB953_REG_BC_CTRL_CRC_ERR_CLR,
UB953_REG_BC_CTRL_CRC_ERR_CLR);
- ub953_read(priv, UB953_REG_CSI_ERR_CNT, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_CNT, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI error count %u\n", v);
- ub953_read(priv, UB953_REG_CSI_ERR_STATUS, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_STATUS, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI_ERR_STATUS %#02x\n", v);
- ub953_read(priv, UB953_REG_CSI_ERR_DLANE01, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_DLANE01, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI_ERR_DLANE01 %#02x\n", v);
- ub953_read(priv, UB953_REG_CSI_ERR_DLANE23, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_DLANE23, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI_ERR_DLANE23 %#02x\n", v);
- ub953_read(priv, UB953_REG_CSI_ERR_CLK_LANE, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_CLK_LANE, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI_ERR_CLK_LANE %#02x\n", v);
- ub953_read(priv, UB953_REG_CSI_PKT_HDR_VC_ID, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_PKT_HDR_VC_ID, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI packet header VC %u ID %u\n", v >> 6, v & 0x3f);
- ub953_read(priv, UB953_REG_PKT_HDR_WC_LSB, &v1);
- ub953_read(priv, UB953_REG_PKT_HDR_WC_MSB, &v2);
+ ub953_read(priv, UB953_REG_PKT_HDR_WC_LSB, &v1, &ret);
+ ub953_read(priv, UB953_REG_PKT_HDR_WC_MSB, &v2, &ret);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI packet header WC %u\n", (v2 << 8) | v1);
- ub953_read(priv, UB953_REG_CSI_ECC, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ECC, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI ECC %#02x\n", v);
- ub953_read(priv, UB953_REG_LOCAL_GPIO_DATA, &gpio_local_data);
- ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &gpio_input_ctrl);
- ub953_read(priv, UB953_REG_GPIO_PIN_STS, &gpio_pin_sts);
+ ub953_read(priv, UB953_REG_LOCAL_GPIO_DATA, &gpio_local_data, &ret);
+ ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &gpio_input_ctrl, &ret);
+ ub953_read(priv, UB953_REG_GPIO_PIN_STS, &gpio_pin_sts, &ret);
+ if (ret)
+ return ret;
for (i = 0; i < UB953_NUM_GPIOS; i++) {
dev_info(dev,
@@ -843,11 +823,11 @@ static int ub953_i2c_master_init(struct ub953_data *priv)
scl_high = div64_u64((u64)scl_high * ref, 1000000000) - 5;
scl_low = div64_u64((u64)scl_low * ref, 1000000000) - 5;
- ret = ub953_write(priv, UB953_REG_SCL_HIGH_TIME, scl_high);
+ ret = ub953_write(priv, UB953_REG_SCL_HIGH_TIME, scl_high, NULL);
if (ret)
return ret;
- ret = ub953_write(priv, UB953_REG_SCL_LOW_TIME, scl_low);
+ ret = ub953_write(priv, UB953_REG_SCL_LOW_TIME, scl_low, NULL);
if (ret)
return ret;
@@ -986,11 +966,11 @@ static int ub953_write_clkout_regs(struct ub953_data *priv,
clkout_ctrl1 = clkout_data->n;
- ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0, NULL);
if (ret)
return ret;
- ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1, NULL);
if (ret)
return ret;
@@ -1009,13 +989,13 @@ static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
u64 rate;
int ret;
- ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL0, &ctrl0);
+ ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL0, &ctrl0, NULL);
if (ret) {
dev_err(dev, "Failed to read CLKOUT_CTRL0: %d\n", ret);
return 0;
}
- ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL1, &ctrl1);
+ ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL1, &ctrl1, NULL);
if (ret) {
dev_err(dev, "Failed to read CLKOUT_CTRL1: %d\n", ret);
return 0;
@@ -1122,6 +1102,7 @@ static int ub953_register_clkout(struct ub953_data *priv)
static int ub953_add_i2c_adapter(struct ub953_data *priv)
{
struct device *dev = &priv->client->dev;
+ struct i2c_atr_adap_desc desc = { };
struct fwnode_handle *i2c_handle;
int ret;
@@ -1129,8 +1110,12 @@ static int ub953_add_i2c_adapter(struct ub953_data *priv)
if (!i2c_handle)
return 0;
- ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port,
- dev, i2c_handle);
+ desc.chan_id = priv->plat_data->port;
+ desc.parent = dev;
+ desc.bus_handle = i2c_handle;
+ desc.num_aliases = 0;
+
+ ret = i2c_atr_add_adapter(priv->plat_data->atr, &desc);
fwnode_handle_put(i2c_handle);
@@ -1191,7 +1176,7 @@ static int ub953_hw_init(struct ub953_data *priv)
int ret;
u8 v;
- ret = ub953_read(priv, UB953_REG_MODE_SEL, &v);
+ ret = ub953_read(priv, UB953_REG_MODE_SEL, &v, NULL);
if (ret)
return ret;
@@ -1231,13 +1216,13 @@ static int ub953_hw_init(struct ub953_data *priv)
return dev_err_probe(dev, -EINVAL,
"clkin required for non-sync ext mode\n");
- ret = ub953_read(priv, UB953_REG_REV_MASK_ID, &v);
+ ret = ub953_read(priv, UB953_REG_REV_MASK_ID, &v, NULL);
if (ret)
return dev_err_probe(dev, ret, "Failed to read revision");
dev_info(dev, "Found %s rev/mask %#04x\n", priv->hw_data->model, v);
- ret = ub953_read(priv, UB953_REG_GENERAL_CFG, &v);
+ ret = ub953_read(priv, UB953_REG_GENERAL_CFG, &v, NULL);
if (ret)
return ret;
@@ -1254,11 +1239,16 @@ static int ub953_hw_init(struct ub953_data *priv)
UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT;
v |= UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE;
- ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v);
+ ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v, NULL);
if (ret)
return ret;
- return 0;
+ v = 1U << UB953_REG_I2C_CONTROL2_SDA_OUTPUT_SETUP_SHIFT;
+ v |= UB953_REG_I2C_CONTROL2_BUS_SPEEDUP;
+
+ ret = ub953_write(priv, UB953_REG_I2C_CONTROL2, v, NULL);
+
+ return ret;
}
static int ub953_subdev_init(struct ub953_data *priv)
diff --git a/drivers/media/i2c/ds90ub953.h b/drivers/media/i2c/ds90ub953.h
new file mode 100644
index 000000000000..97a6b3af326e
--- /dev/null
+++ b/drivers/media/i2c/ds90ub953.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEDIA_I2C_DS90UB953_H__
+#define __MEDIA_I2C_DS90UB953_H__
+
+#include <linux/types.h>
+
+#define UB953_REG_RESET_CTL 0x01
+#define UB953_REG_RESET_CTL_DIGITAL_RESET_1 BIT(1)
+#define UB953_REG_RESET_CTL_DIGITAL_RESET_0 BIT(0)
+
+#define UB953_REG_GENERAL_CFG 0x02
+#define UB953_REG_GENERAL_CFG_CONT_CLK BIT(6)
+#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT 4
+#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_MASK GENMASK(5, 4)
+#define UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE BIT(1)
+#define UB953_REG_GENERAL_CFG_I2C_STRAP_MODE BIT(0)
+
+#define UB953_REG_MODE_SEL 0x03
+#define UB953_REG_MODE_SEL_MODE_DONE BIT(3)
+#define UB953_REG_MODE_SEL_MODE_OVERRIDE BIT(4)
+#define UB953_REG_MODE_SEL_MODE_MASK GENMASK(2, 0)
+
+#define UB953_REG_CLKOUT_CTRL0 0x06
+#define UB953_REG_CLKOUT_CTRL1 0x07
+
+#define UB953_REG_I2C_CONTROL2 0x0a
+#define UB953_REG_I2C_CONTROL2_SDA_OUTPUT_SETUP_SHIFT 4
+#define UB953_REG_I2C_CONTROL2_BUS_SPEEDUP BIT(1)
+
+#define UB953_REG_SCL_HIGH_TIME 0x0b
+#define UB953_REG_SCL_LOW_TIME 0x0c
+
+#define UB953_REG_LOCAL_GPIO_DATA 0x0d
+#define UB953_REG_LOCAL_GPIO_DATA_GPIO_RMTEN(n) BIT(4 + (n))
+#define UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(n) BIT(0 + (n))
+
+#define UB953_REG_GPIO_INPUT_CTRL 0x0e
+#define UB953_REG_GPIO_INPUT_CTRL_OUT_EN(n) BIT(4 + (n))
+#define UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(n) BIT(0 + (n))
+
+#define UB953_REG_BC_CTRL 0x49
+#define UB953_REG_BC_CTRL_CRC_ERR_CLR BIT(3)
+
+#define UB953_REG_REV_MASK_ID 0x50
+#define UB953_REG_GENERAL_STATUS 0x52
+
+#define UB953_REG_GPIO_PIN_STS 0x53
+#define UB953_REG_GPIO_PIN_STS_GPIO_STS(n) BIT(0 + (n))
+
+#define UB953_REG_BIST_ERR_CNT 0x54
+#define UB953_REG_CRC_ERR_CNT1 0x55
+#define UB953_REG_CRC_ERR_CNT2 0x56
+
+#define UB953_REG_CSI_ERR_CNT 0x5c
+#define UB953_REG_CSI_ERR_STATUS 0x5d
+#define UB953_REG_CSI_ERR_DLANE01 0x5e
+#define UB953_REG_CSI_ERR_DLANE23 0x5f
+#define UB953_REG_CSI_ERR_CLK_LANE 0x60
+#define UB953_REG_CSI_PKT_HDR_VC_ID 0x61
+#define UB953_REG_PKT_HDR_WC_LSB 0x62
+#define UB953_REG_PKT_HDR_WC_MSB 0x63
+#define UB953_REG_CSI_ECC 0x64
+
+#define UB953_REG_IND_ACC_CTL 0xb0
+#define UB953_REG_IND_ACC_ADDR 0xb1
+#define UB953_REG_IND_ACC_DATA 0xb2
+
+#define UB953_REG_FPD3_RX_ID(n) (0xf0 + (n))
+#define UB953_REG_FPD3_RX_ID_LEN 6
+
+/* Indirect register blocks */
+#define UB953_IND_TARGET_PAT_GEN 0x00
+#define UB953_IND_TARGET_ANALOG 0x01
+#define UB953_IND_TARGET_DIE_ID 0x02
+
+#define UB953_IND_PGEN_CTL 0x01
+#define UB953_IND_PGEN_CTL_PGEN_ENABLE BIT(0)
+#define UB953_IND_PGEN_CFG 0x02
+#define UB953_IND_PGEN_CSI_DI 0x03
+#define UB953_IND_PGEN_LINE_SIZE1 0x04
+#define UB953_IND_PGEN_LINE_SIZE0 0x05
+#define UB953_IND_PGEN_BAR_SIZE1 0x06
+#define UB953_IND_PGEN_BAR_SIZE0 0x07
+#define UB953_IND_PGEN_ACT_LPF1 0x08
+#define UB953_IND_PGEN_ACT_LPF0 0x09
+#define UB953_IND_PGEN_TOT_LPF1 0x0a
+#define UB953_IND_PGEN_TOT_LPF0 0x0b
+#define UB953_IND_PGEN_LINE_PD1 0x0c
+#define UB953_IND_PGEN_LINE_PD0 0x0d
+#define UB953_IND_PGEN_VBP 0x0e
+#define UB953_IND_PGEN_VFP 0x0f
+#define UB953_IND_PGEN_COLOR(n) (0x10 + (n)) /* n <= 15 */
+
+#define UB953_IND_ANA_TEMP_DYNAMIC_CFG 0x4b
+#define UB953_IND_ANA_TEMP_DYNAMIC_CFG_OV BIT(5)
+#define UB953_IND_ANA_TEMP_STATIC_CFG 0x4c
+#define UB953_IND_ANA_TEMP_STATIC_CFG_MASK GENMASK(6, 4)
+
+/* UB971 Registers */
+
+#define UB971_ENH_BC_CHK 0x4b
+
+#endif /* __MEDIA_I2C_DS90UB953_H__ */
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
index 5dde8452739b..082fc62b0f5b 100644
--- a/drivers/media/i2c/ds90ub960.c
+++ b/drivers/media/i2c/ds90ub960.c
@@ -27,6 +27,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/fwnode.h>
@@ -52,6 +53,8 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
+#include "ds90ub953.h"
+
#define MHZ(v) ((u32)((v) * HZ_PER_MHZ))
/*
@@ -243,13 +246,17 @@
#define UB960_RR_BIST_ERR_COUNT 0x57
#define UB960_RR_BCC_CONFIG 0x58
+#define UB960_RR_BCC_CONFIG_BC_ALWAYS_ON BIT(4)
+#define UB960_RR_BCC_CONFIG_AUTO_ACK_ALL BIT(5)
#define UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH BIT(6)
#define UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK GENMASK(2, 0)
#define UB960_RR_DATAPATH_CTL1 0x59
#define UB960_RR_DATAPATH_CTL2 0x5a
#define UB960_RR_SER_ID 0x5b
+#define UB960_RR_SER_ID_FREEZE_DEVICE_ID BIT(0)
#define UB960_RR_SER_ALIAS_ID 0x5c
+#define UB960_RR_SER_ALIAS_ID_AUTO_ACK BIT(0)
/* For these two register sets: n < UB960_MAX_PORT_ALIASES */
#define UB960_RR_SLAVE_ID(n) (0x5d + (n))
@@ -307,8 +314,6 @@
#define UB960_XR_REFCLK_FREQ 0xa5 /* UB960 */
-#define UB960_RR_VC_ID_MAP(x) (0xa0 + (x)) /* UB9702 */
-
#define UB960_SR_IND_ACC_CTL 0xb0
#define UB960_SR_IND_ACC_CTL_IA_AUTO_INC BIT(1)
@@ -321,9 +326,6 @@
#define UB960_SR_FV_MIN_TIME 0xbc
#define UB960_SR_GPIO_PD_CTL 0xbe
-#define UB960_SR_FPD_RATE_CFG 0xc2 /* UB9702 */
-#define UB960_SR_CSI_PLL_DIV 0xc9 /* UB9702 */
-
#define UB960_RR_PORT_DEBUG 0xd0
#define UB960_RR_AEQ_CTL2 0xd2
#define UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR BIT(2)
@@ -354,15 +356,12 @@
#define UB960_RR_SEN_INT_RISE_STS 0xde
#define UB960_RR_SEN_INT_FALL_STS 0xdf
-#define UB960_RR_CHANNEL_MODE 0xe4 /* UB9702 */
#define UB960_SR_FPD3_RX_ID(n) (0xf0 + (n))
#define UB960_SR_FPD3_RX_ID_LEN 6
#define UB960_SR_I2C_RX_ID(n) (0xf8 + (n))
-#define UB9702_SR_REFCLK_FREQ 0x3d
-
/* Indirect register blocks */
#define UB960_IND_TARGET_PAT_GEN 0x00
#define UB960_IND_TARGET_RX_ANA(n) (0x01 + (n))
@@ -397,6 +396,49 @@
#define UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY BIT(3)
#define UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK GENMASK(2, 0)
+/* UB9702 Registers */
+
+#define UB9702_SR_CSI_EXCLUSIVE_FWD2 0x3c
+#define UB9702_SR_REFCLK_FREQ 0x3d
+#define UB9702_RR_RX_CTL_1 0x80
+#define UB9702_RR_RX_CTL_2 0x87
+#define UB9702_RR_VC_ID_MAP(x) (0xa0 + (x))
+#define UB9702_SR_FPD_RATE_CFG 0xc2
+#define UB9702_SR_CSI_PLL_DIV 0xc9
+#define UB9702_RR_RX_SM_SEL_2 0xd4
+#define UB9702_RR_CHANNEL_MODE 0xe4
+
+#define UB9702_IND_TARGET_SAR_ADC 0x0a
+
+#define UB9702_IR_RX_ANA_FPD_BC_CTL0 0x04
+#define UB9702_IR_RX_ANA_FPD_BC_CTL1 0x0d
+#define UB9702_IR_RX_ANA_FPD_BC_CTL2 0x1b
+#define UB9702_IR_RX_ANA_SYSTEM_INIT_REG0 0x21
+#define UB9702_IR_RX_ANA_AEQ_ALP_SEL6 0x27
+#define UB9702_IR_RX_ANA_AEQ_ALP_SEL7 0x28
+#define UB9702_IR_RX_ANA_AEQ_ALP_SEL10 0x2b
+#define UB9702_IR_RX_ANA_AEQ_ALP_SEL11 0x2c
+#define UB9702_IR_RX_ANA_EQ_ADAPT_CTRL 0x2e
+#define UB9702_IR_RX_ANA_AEQ_CFG_1 0x34
+#define UB9702_IR_RX_ANA_AEQ_CFG_2 0x4d
+#define UB9702_IR_RX_ANA_GAIN_CTRL_0 0x71
+#define UB9702_IR_RX_ANA_GAIN_CTRL_0 0x71
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_1 0x72
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_2 0x73
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_3 0x74
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_6 0x77
+#define UB9702_IR_RX_ANA_AEQ_CFG_3 0x79
+#define UB9702_IR_RX_ANA_AEQ_CFG_4 0x85
+#define UB9702_IR_RX_ANA_EQ_CTRL_SEL_15 0x87
+#define UB9702_IR_RX_ANA_EQ_CTRL_SEL_24 0x90
+#define UB9702_IR_RX_ANA_EQ_CTRL_SEL_38 0x9e
+#define UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5 0xa5
+#define UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1 0xa8
+#define UB9702_IR_RX_ANA_EQ_OVERRIDE_CTRL 0xf0
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_8 0xf1
+
+#define UB9702_IR_CSI_ANA_CSIPLL_REG_1 0x92
+
/* EQ related */
#define UB960_MIN_AEQ_STROBE_POS -7
@@ -450,7 +492,9 @@ struct ub960_rxport {
struct fwnode_handle *fwnode;
struct i2c_client *client;
unsigned short alias; /* I2C alias (lower 7 bits) */
+ short addr; /* Local I2C address (lower 7 bits) */
struct ds90ub9xx_platform_data pdata;
+ struct regmap *regmap;
} ser;
enum ub960_rxport_mode rx_mode;
@@ -478,7 +522,9 @@ struct ub960_rxport {
};
} eq;
- const struct i2c_client *aliased_clients[UB960_MAX_PORT_ALIASES];
+ /* lock for aliased_addrs and associated registers */
+ struct mutex aliased_addrs_lock;
+ u16 aliased_addrs[UB960_MAX_PORT_ALIASES];
};
struct ub960_asd {
@@ -614,16 +660,76 @@ static const struct ub960_format_info *ub960_find_format(u32 code)
return NULL;
}
+struct ub960_rxport_iter {
+ unsigned int nport;
+ struct ub960_rxport *rxport;
+};
+
+enum ub960_iter_flags {
+ UB960_ITER_ACTIVE_ONLY = BIT(0),
+ UB960_ITER_FPD4_ONLY = BIT(1),
+};
+
+static struct ub960_rxport_iter ub960_iter_rxport(struct ub960_data *priv,
+ struct ub960_rxport_iter it,
+ enum ub960_iter_flags flags)
+{
+ for (; it.nport < priv->hw_data->num_rxports; it.nport++) {
+ it.rxport = priv->rxports[it.nport];
+
+ if ((flags & UB960_ITER_ACTIVE_ONLY) && !it.rxport)
+ continue;
+
+ if ((flags & UB960_ITER_FPD4_ONLY) &&
+ it.rxport->cdr_mode != RXPORT_CDR_FPD4)
+ continue;
+
+ return it;
+ }
+
+ it.rxport = NULL;
+
+ return it;
+}
+
+#define for_each_rxport(priv, it) \
+ for (struct ub960_rxport_iter it = \
+ ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
+ 0); \
+ it.nport < (priv)->hw_data->num_rxports; \
+ it.nport++, it = ub960_iter_rxport(priv, it, 0))
+
+#define for_each_active_rxport(priv, it) \
+ for (struct ub960_rxport_iter it = \
+ ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
+ UB960_ITER_ACTIVE_ONLY); \
+ it.nport < (priv)->hw_data->num_rxports; \
+ it.nport++, it = ub960_iter_rxport(priv, it, \
+ UB960_ITER_ACTIVE_ONLY))
+
+#define for_each_active_rxport_fpd4(priv, it) \
+ for (struct ub960_rxport_iter it = \
+ ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
+ UB960_ITER_ACTIVE_ONLY | \
+ UB960_ITER_FPD4_ONLY); \
+ it.nport < (priv)->hw_data->num_rxports; \
+ it.nport++, it = ub960_iter_rxport(priv, it, \
+ UB960_ITER_ACTIVE_ONLY | \
+ UB960_ITER_FPD4_ONLY))
+
/* -----------------------------------------------------------------------------
* Basic device access
*/
-static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val)
+static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val, int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_read(priv->regmap, reg, &v);
@@ -638,14 +744,20 @@ static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_write(struct ub960_data *priv, u8 reg, u8 val)
+static int ub960_write(struct ub960_data *priv, u8 reg, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_write(priv->regmap, reg, val);
@@ -655,14 +767,21 @@ static int ub960_write(struct ub960_data *priv, u8 reg, u8 val)
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val)
+static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val,
+ int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_update_bits(priv->regmap, reg, mask, val);
@@ -672,15 +791,21 @@ static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val)
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val)
+static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val, int *err)
{
struct device *dev = &priv->client->dev;
__be16 __v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
@@ -695,6 +820,9 @@ static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -721,12 +849,16 @@ static int ub960_rxport_select(struct ub960_data *priv, u8 nport)
return 0;
}
-static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
+static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 *val, int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
@@ -745,14 +877,21 @@ static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
+static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
@@ -767,15 +906,21 @@ static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
- u8 mask, u8 val)
+ u8 mask, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
@@ -790,16 +935,22 @@ static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg,
- u16 *val)
+ u16 *val, int *err)
{
struct device *dev = &priv->client->dev;
__be16 __v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
@@ -818,6 +969,9 @@ static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg,
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -844,12 +998,16 @@ static int ub960_txport_select(struct ub960_data *priv, u8 nport)
return 0;
}
-static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
+static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 *val, int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_txport_select(priv, nport);
@@ -868,14 +1026,21 @@ static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
+static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_txport_select(priv, nport);
@@ -890,15 +1055,21 @@ static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
- u8 mask, u8 val)
+ u8 mask, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_txport_select(priv, nport);
@@ -913,6 +1084,9 @@ static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -938,12 +1112,16 @@ static int ub960_select_ind_reg_block(struct ub960_data *priv, u8 block)
return 0;
}
-static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val)
+static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val,
+ int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_select_ind_reg_block(priv, block);
@@ -971,14 +1149,21 @@ static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val)
+static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val,
+ int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_select_ind_reg_block(priv, block);
@@ -1004,15 +1189,21 @@ static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg,
- u8 mask, u8 val)
+ u8 mask, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_select_ind_reg_block(priv, block);
@@ -1039,6 +1230,36 @@ static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg,
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int ub960_reset(struct ub960_data *priv, bool reset_regs)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+ u8 bit;
+
+ bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 :
+ UB960_SR_RESET_DIGITAL_RESET0;
+
+ ret = ub960_write(priv, UB960_SR_RESET, bit, NULL);
+ if (ret)
+ return ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v,
+ (v & bit) == 0, 2000, 100000);
+
+ mutex_unlock(&priv->reg_lock);
+
+ if (ret)
+ dev_err(dev, "reset failed: %d\n", ret);
+
return ret;
}
@@ -1046,67 +1267,82 @@ out_unlock:
* I2C-ATR (address translator)
*/
-static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id,
- const struct i2c_client *client, u16 alias)
+static int ub960_atr_attach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr, u16 alias)
{
struct ub960_data *priv = i2c_atr_get_driver_data(atr);
struct ub960_rxport *rxport = priv->rxports[chan_id];
struct device *dev = &priv->client->dev;
unsigned int reg_idx;
+ int ret = 0;
+
+ guard(mutex)(&rxport->aliased_addrs_lock);
- for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
- if (!rxport->aliased_clients[reg_idx])
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
+ if (!rxport->aliased_addrs[reg_idx])
break;
}
- if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
+ if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport);
return -EADDRNOTAVAIL;
}
- rxport->aliased_clients[reg_idx] = client;
+ rxport->aliased_addrs[reg_idx] = addr;
ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx),
- client->addr << 1);
+ addr << 1, &ret);
ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
- alias << 1);
+ alias << 1, &ret);
+
+ if (ret)
+ return ret;
dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n",
- rxport->nport, client->addr, alias, reg_idx);
+ rxport->nport, addr, alias, reg_idx);
return 0;
}
-static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id,
- const struct i2c_client *client)
+static void ub960_atr_detach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr)
{
struct ub960_data *priv = i2c_atr_get_driver_data(atr);
struct ub960_rxport *rxport = priv->rxports[chan_id];
struct device *dev = &priv->client->dev;
unsigned int reg_idx;
+ int ret;
- for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
- if (rxport->aliased_clients[reg_idx] == client)
+ guard(mutex)(&rxport->aliased_addrs_lock);
+
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
+ if (rxport->aliased_addrs[reg_idx] == addr)
break;
}
- if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
+ if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
dev_err(dev, "rx%u: client 0x%02x is not mapped!\n",
- rxport->nport, client->addr);
+ rxport->nport, addr);
return;
}
- rxport->aliased_clients[reg_idx] = NULL;
+ rxport->aliased_addrs[reg_idx] = 0;
- ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx), 0);
+ ret = ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
+ 0, NULL);
+ if (ret) {
+ dev_err(dev, "rx%u: unable to fully unmap client 0x%02x: %d\n",
+ rxport->nport, addr, ret);
+ return;
+ }
dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport,
- client->addr, reg_idx);
+ addr, reg_idx);
}
static const struct i2c_atr_ops ub960_atr_ops = {
- .attach_client = ub960_atr_attach_client,
- .detach_client = ub960_atr_detach_client,
+ .attach_addr = ub960_atr_attach_addr,
+ .detach_addr = ub960_atr_detach_addr,
};
static int ub960_init_atr(struct ub960_data *priv)
@@ -1115,7 +1351,7 @@ static int ub960_init_atr(struct ub960_data *priv)
struct i2c_adapter *parent_adap = priv->client->adapter;
priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops,
- priv->hw_data->num_rxports);
+ priv->hw_data->num_rxports, 0);
if (IS_ERR(priv->atr))
return PTR_ERR(priv->atr);
@@ -1193,21 +1429,24 @@ err_free_txport:
return ret;
}
-static void ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
+static int ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
{
struct device *dev = &priv->client->dev;
u8 csi_tx_isr;
int ret;
- ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr);
+ ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr,
+ NULL);
if (ret)
- return;
+ return ret;
if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR)
dev_warn(dev, "TX%u: CSI_SYNC_ERROR\n", nport);
if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR)
dev_warn(dev, "TX%u: CSI_PASS_ERROR\n", nport);
+
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -1216,25 +1455,25 @@ static void ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
static int ub960_rxport_enable_vpocs(struct ub960_data *priv)
{
- unsigned int nport;
+ unsigned int failed_nport;
int ret;
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport || !rxport->vpoc)
+ for_each_active_rxport(priv, it) {
+ if (!it.rxport->vpoc)
continue;
- ret = regulator_enable(rxport->vpoc);
- if (ret)
+ ret = regulator_enable(it.rxport->vpoc);
+ if (ret) {
+ failed_nport = it.nport;
goto err_disable_vpocs;
+ }
}
return 0;
err_disable_vpocs:
- while (nport--) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ while (failed_nport--) {
+ struct ub960_rxport *rxport = priv->rxports[failed_nport];
if (!rxport || !rxport->vpoc)
continue;
@@ -1247,40 +1486,44 @@ err_disable_vpocs:
static void ub960_rxport_disable_vpocs(struct ub960_data *priv)
{
- unsigned int nport;
-
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport || !rxport->vpoc)
+ for_each_active_rxport(priv, it) {
+ if (!it.rxport->vpoc)
continue;
- regulator_disable(rxport->vpoc);
+ regulator_disable(it.rxport->vpoc);
}
}
-static void ub960_rxport_clear_errors(struct ub960_data *priv,
- unsigned int nport)
+static int ub960_rxport_clear_errors(struct ub960_data *priv,
+ unsigned int nport)
{
+ int ret = 0;
u8 v;
- ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v);
- ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v);
- ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v);
- ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v, &ret);
- ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v);
- ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v, &ret);
- ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v, &ret);
+
+ return ret;
}
-static void ub960_clear_rx_errors(struct ub960_data *priv)
+static int ub960_clear_rx_errors(struct ub960_data *priv)
{
- unsigned int nport;
+ int ret;
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++)
- ub960_rxport_clear_errors(priv, nport);
+ for_each_rxport(priv, it) {
+ ret = ub960_rxport_clear_errors(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
@@ -1290,25 +1533,29 @@ static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
u8 clk_delay, data_delay;
int ret;
- ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
- UB960_IR_RX_ANA_STROBE_SET_CLK, &v);
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB960_IR_RX_ANA_STROBE_SET_CLK, &v, NULL);
+ if (ret)
+ return ret;
clk_delay = (v & UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY) ?
0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
- ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
- UB960_IR_RX_ANA_STROBE_SET_DATA, &v);
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB960_IR_RX_ANA_STROBE_SET_DATA, &v, NULL);
+ if (ret)
+ return ret;
data_delay = (v & UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) ?
0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
- ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v, NULL);
if (ret)
return ret;
clk_delay += v & UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK;
- ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v, NULL);
if (ret)
return ret;
@@ -1319,10 +1566,11 @@ static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
return 0;
}
-static void ub960_rxport_set_strobe_pos(struct ub960_data *priv,
- unsigned int nport, s8 strobe_pos)
+static int ub960_rxport_set_strobe_pos(struct ub960_data *priv,
+ unsigned int nport, s8 strobe_pos)
{
u8 clk_delay, data_delay;
+ int ret = 0;
clk_delay = UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
data_delay = UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
@@ -1337,22 +1585,25 @@ static void ub960_rxport_set_strobe_pos(struct ub960_data *priv,
data_delay = strobe_pos | UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
- UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay);
+ UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay, &ret);
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
- UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay);
+ UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay, &ret);
+
+ return ret;
}
-static void ub960_rxport_set_strobe_range(struct ub960_data *priv,
- s8 strobe_min, s8 strobe_max)
+static int ub960_rxport_set_strobe_range(struct ub960_data *priv, s8 strobe_min,
+ s8 strobe_max)
{
/* Convert the signed strobe pos to positive zero based value */
strobe_min -= UB960_MIN_AEQ_STROBE_POS;
strobe_max -= UB960_MIN_AEQ_STROBE_POS;
- ub960_write(priv, UB960_XR_SFILTER_CFG,
- ((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) |
- ((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT));
+ return ub960_write(priv, UB960_XR_SFILTER_CFG,
+ ((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) |
+ ((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT),
+ NULL);
}
static int ub960_rxport_get_eq_level(struct ub960_data *priv,
@@ -1361,7 +1612,7 @@ static int ub960_rxport_get_eq_level(struct ub960_data *priv,
int ret;
u8 v;
- ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v, NULL);
if (ret)
return ret;
@@ -1371,11 +1622,12 @@ static int ub960_rxport_get_eq_level(struct ub960_data *priv,
return 0;
}
-static void ub960_rxport_set_eq_level(struct ub960_data *priv,
- unsigned int nport, u8 eq_level)
+static int ub960_rxport_set_eq_level(struct ub960_data *priv,
+ unsigned int nport, u8 eq_level)
{
u8 eq_stage_1_select_value, eq_stage_2_select_value;
const unsigned int eq_stage_max = 7;
+ int ret;
u8 v;
if (eq_level <= eq_stage_max) {
@@ -1386,7 +1638,9 @@ static void ub960_rxport_set_eq_level(struct ub960_data *priv,
eq_stage_2_select_value = eq_level - eq_stage_max;
}
- ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v, NULL);
+ if (ret)
+ return ret;
v &= ~(UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK |
UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK);
@@ -1394,67 +1648,102 @@ static void ub960_rxport_set_eq_level(struct ub960_data *priv,
v |= eq_stage_2_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT;
v |= UB960_RR_AEQ_BYPASS_ENABLE;
- ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v);
+ ret = ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
}
-static void ub960_rxport_set_eq_range(struct ub960_data *priv,
- unsigned int nport, u8 eq_min, u8 eq_max)
+static int ub960_rxport_set_eq_range(struct ub960_data *priv,
+ unsigned int nport, u8 eq_min, u8 eq_max)
{
+ int ret = 0;
+
ub960_rxport_write(priv, nport, UB960_RR_AEQ_MIN_MAX,
(eq_min << UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) |
- (eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT));
+ (eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT),
+ &ret);
/* Enable AEQ min setting */
ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_CTL2,
UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR,
- UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR);
+ UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR, &ret);
+
+ return ret;
}
-static void ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport)
+static int ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport)
{
struct ub960_rxport *rxport = priv->rxports[nport];
+ int ret;
/* We also set common settings here. Should be moved elsewhere. */
if (priv->strobe.manual) {
/* Disable AEQ_SFILTER_EN */
- ub960_update_bits(priv, UB960_XR_AEQ_CTL1,
- UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0);
+ ret = ub960_update_bits(priv, UB960_XR_AEQ_CTL1,
+ UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0,
+ NULL);
+ if (ret)
+ return ret;
} else {
/* Enable SFILTER and error control */
- ub960_write(priv, UB960_XR_AEQ_CTL1,
- UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK |
- UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN);
+ ret = ub960_write(priv, UB960_XR_AEQ_CTL1,
+ UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK |
+ UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN,
+ NULL);
+
+ if (ret)
+ return ret;
/* Set AEQ strobe range */
- ub960_rxport_set_strobe_range(priv, priv->strobe.min,
- priv->strobe.max);
+ ret = ub960_rxport_set_strobe_range(priv, priv->strobe.min,
+ priv->strobe.max);
+ if (ret)
+ return ret;
}
/* The rest are port specific */
if (priv->strobe.manual)
- ub960_rxport_set_strobe_pos(priv, nport, rxport->eq.strobe_pos);
+ ret = ub960_rxport_set_strobe_pos(priv, nport,
+ rxport->eq.strobe_pos);
else
- ub960_rxport_set_strobe_pos(priv, nport, 0);
+ ret = ub960_rxport_set_strobe_pos(priv, nport, 0);
+
+ if (ret)
+ return ret;
if (rxport->eq.manual_eq) {
- ub960_rxport_set_eq_level(priv, nport,
- rxport->eq.manual.eq_level);
+ ret = ub960_rxport_set_eq_level(priv, nport,
+ rxport->eq.manual.eq_level);
+ if (ret)
+ return ret;
/* Enable AEQ Bypass */
- ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
- UB960_RR_AEQ_BYPASS_ENABLE,
- UB960_RR_AEQ_BYPASS_ENABLE);
+ ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
+ UB960_RR_AEQ_BYPASS_ENABLE,
+ UB960_RR_AEQ_BYPASS_ENABLE,
+ NULL);
+ if (ret)
+ return ret;
} else {
- ub960_rxport_set_eq_range(priv, nport,
- rxport->eq.aeq.eq_level_min,
- rxport->eq.aeq.eq_level_max);
+ ret = ub960_rxport_set_eq_range(priv, nport,
+ rxport->eq.aeq.eq_level_min,
+ rxport->eq.aeq.eq_level_max);
+ if (ret)
+ return ret;
/* Disable AEQ Bypass */
- ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
- UB960_RR_AEQ_BYPASS_ENABLE, 0);
+ ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
+ UB960_RR_AEQ_BYPASS_ENABLE, 0,
+ NULL);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
@@ -1469,7 +1758,7 @@ static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
bool errors;
ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
- &rx_port_sts1);
+ &rx_port_sts1, NULL);
if (ret)
return ret;
@@ -1479,25 +1768,27 @@ static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
}
ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
- &rx_port_sts2);
+ &rx_port_sts2, NULL);
if (ret)
return ret;
- ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts,
+ NULL);
if (ret)
return ret;
ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
- &csi_err_cnt);
+ &csi_err_cnt, NULL);
if (ret)
return ret;
- ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts,
+ NULL);
if (ret)
return ret;
ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
- &parity_errors);
+ &parity_errors, NULL);
if (ret)
return ret;
@@ -1512,6 +1803,23 @@ static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
return 0;
}
+static int ub960_rxport_lockup_wa_ub9702(struct ub960_data *priv)
+{
+ int ret;
+
+ /* Toggle PI_MODE to avoid possible FPD RX lockup */
+
+ ret = ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
+ 2 << 3, NULL);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 5000);
+
+ return ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
+ 0, NULL);
+}
+
/*
* Wait for the RX ports to lock, have no errors and have stable strobe position
* and EQ level.
@@ -1542,6 +1850,7 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
link_ok_mask = 0;
while (time_before(jiffies, timeout)) {
+ bool fpd4_wa = false;
missing = 0;
for_each_set_bit(nport, &port_mask,
@@ -1556,6 +1865,9 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
if (ret)
return ret;
+ if (!ok && rxport->cdr_mode == RXPORT_CDR_FPD4)
+ fpd4_wa = true;
+
/*
* We want the link to be ok for two consecutive loops,
* as a link could get established just before our test
@@ -1575,6 +1887,12 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
if (missing == 0)
break;
+ if (fpd4_wa) {
+ ret = ub960_rxport_lockup_wa_ub9702(priv);
+ if (ret)
+ return ret;
+ }
+
/*
* The sleep time of 10 ms was found by testing to give a lock
* with a few iterations. It can be decreased if on some setups
@@ -1600,7 +1918,11 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
continue;
}
- ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH,
+ &v, NULL);
+
+ if (ret)
+ return ret;
if (priv->hw_data->is_ub9702) {
dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n",
@@ -1676,13 +1998,188 @@ static unsigned long ub960_calc_bc_clk_rate_ub9702(struct ub960_data *priv,
}
}
+static int ub960_rxport_serializer_write(struct ub960_rxport *rxport, u8 reg,
+ u8 val, int *err)
+{
+ struct ub960_data *priv = rxport->priv;
+ struct device *dev = &priv->client->dev;
+ union i2c_smbus_data data;
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ data.byte = val;
+
+ ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias, 0,
+ I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA, &data);
+ if (ret)
+ dev_err(dev,
+ "rx%u: cannot write serializer register 0x%02x (%d)!\n",
+ rxport->nport, reg, ret);
+
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int ub960_rxport_serializer_read(struct ub960_rxport *rxport, u8 reg,
+ u8 *val, int *err)
+{
+ struct ub960_data *priv = rxport->priv;
+ struct device *dev = &priv->client->dev;
+ union i2c_smbus_data data = { 0 };
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias,
+ priv->client->flags, I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &data);
+ if (ret)
+ dev_err(dev,
+ "rx%u: cannot read serializer register 0x%02x (%d)!\n",
+ rxport->nport, reg, ret);
+ else
+ *val = data.byte;
+
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int ub960_serializer_temp_ramp(struct ub960_rxport *rxport)
+{
+ struct ub960_data *priv = rxport->priv;
+ short temp_dynamic_offset[] = {-1, -1, 0, 0, 1, 1, 1, 3};
+ u8 temp_dynamic_cfg;
+ u8 nport = rxport->nport;
+ u8 ser_temp_code;
+ int ret = 0;
+
+ /* Configure temp ramp only on UB953 */
+ if (!fwnode_device_is_compatible(rxport->ser.fwnode, "ti,ds90ub953-q1"))
+ return 0;
+
+ /* Read current serializer die temperature */
+ ub960_rxport_read(priv, nport, UB960_RR_SENSOR_STS_2, &ser_temp_code,
+ &ret);
+
+ /* Enable I2C passthrough on back channel */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
+
+ if (ret)
+ return ret;
+
+ /* Select indirect page for analog regs on the serializer */
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_CTL,
+ UB953_IND_TARGET_ANALOG << 2, &ret);
+
+ /* Set temperature ramp dynamic and static config */
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
+ UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
+ ub960_rxport_serializer_read(rxport, UB953_REG_IND_ACC_DATA,
+ &temp_dynamic_cfg, &ret);
+
+ if (ret)
+ return ret;
+
+ temp_dynamic_cfg |= UB953_IND_ANA_TEMP_DYNAMIC_CFG_OV;
+ temp_dynamic_cfg += temp_dynamic_offset[ser_temp_code];
+
+ /* Update temp static config */
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
+ UB953_IND_ANA_TEMP_STATIC_CFG, &ret);
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
+ UB953_IND_ANA_TEMP_STATIC_CFG_MASK, &ret);
+
+ /* Update temperature ramp dynamic config */
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
+ UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
+
+ /* Enable I2C auto ack on BC before we set dynamic cfg and reset */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL, &ret);
+
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
+ temp_dynamic_cfg, &ret);
+
+ if (ret)
+ return ret;
+
+ /* Soft reset to apply PLL updates */
+ ub960_rxport_serializer_write(rxport, UB953_REG_RESET_CTL,
+ UB953_REG_RESET_CTL_DIGITAL_RESET_0,
+ &ret);
+ msleep(20);
+
+ /* Disable I2C passthrough and auto-ack on BC */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ 0x0, &ret);
+
+ return ret;
+}
+
+static int ub960_rxport_bc_ser_config(struct ub960_rxport *rxport)
+{
+ struct ub960_data *priv = rxport->priv;
+ struct device *dev = &priv->client->dev;
+ u8 nport = rxport->nport;
+ int ret = 0;
+
+ /* Skip port if serializer's address is not known */
+ if (rxport->ser.addr < 0) {
+ dev_dbg(dev,
+ "rx%u: serializer address missing, skip configuration\n",
+ nport);
+ return 0;
+ }
+
+ /*
+ * Note: the code here probably only works for CSI-2 serializers in
+ * sync mode. To support other serializers the BC related configuration
+ * should be done before calling this function.
+ */
+
+ /* Enable I2C passthrough and auto-ack on BC */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ &ret);
+
+ if (ret)
+ return ret;
+
+ /* Disable BC alternate mode auto detect */
+ ub960_rxport_serializer_write(rxport, UB971_ENH_BC_CHK, 0x02, &ret);
+ /* Decrease link detect timer */
+ ub960_rxport_serializer_write(rxport, UB953_REG_BC_CTRL, 0x06, &ret);
+
+ /* Disable I2C passthrough and auto-ack on BC */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ 0x0, &ret);
+
+ return ret;
+}
+
static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport)
{
struct ub960_rxport *rxport = priv->rxports[nport];
struct device *dev = &priv->client->dev;
struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata;
struct i2c_board_info ser_info = {
- .of_node = to_of_node(rxport->ser.fwnode),
.fwnode = rxport->ser.fwnode,
.platform_data = ser_pdata,
};
@@ -1726,30 +2223,27 @@ static void ub960_rxport_remove_serializer(struct ub960_data *priv, u8 nport)
/* Add serializer i2c devices for all initialized ports */
static int ub960_rxport_add_serializers(struct ub960_data *priv)
{
- unsigned int nport;
+ unsigned int failed_nport;
int ret;
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport)
- continue;
-
- ret = ub960_rxport_add_serializer(priv, nport);
- if (ret)
+ for_each_active_rxport(priv, it) {
+ ret = ub960_rxport_add_serializer(priv, it.nport);
+ if (ret) {
+ failed_nport = it.nport;
goto err_remove_sers;
+ }
}
return 0;
err_remove_sers:
- while (nport--) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ while (failed_nport--) {
+ struct ub960_rxport *rxport = priv->rxports[failed_nport];
if (!rxport)
continue;
- ub960_rxport_remove_serializer(priv, nport);
+ ub960_rxport_remove_serializer(priv, failed_nport);
}
return ret;
@@ -1757,20 +2251,12 @@ err_remove_sers:
static void ub960_rxport_remove_serializers(struct ub960_data *priv)
{
- unsigned int nport;
-
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport)
- continue;
-
- ub960_rxport_remove_serializer(priv, nport);
- }
+ for_each_active_rxport(priv, it)
+ ub960_rxport_remove_serializer(priv, it.nport);
}
-static void ub960_init_tx_port(struct ub960_data *priv,
- struct ub960_txport *txport)
+static int ub960_init_tx_port(struct ub960_data *priv,
+ struct ub960_txport *txport)
{
unsigned int nport = txport->nport;
u8 csi_ctl = 0;
@@ -1787,76 +2273,114 @@ static void ub960_init_tx_port(struct ub960_data *priv,
if (!txport->non_continous_clk)
csi_ctl |= UB960_TR_CSI_CTL_CSI_CONTS_CLOCK;
- ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl);
+ return ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl, NULL);
}
-static int ub960_init_tx_ports(struct ub960_data *priv)
+static int ub960_init_tx_ports_ub960(struct ub960_data *priv)
{
- unsigned int nport;
u8 speed_select;
- u8 pll_div;
-
- /* TX ports */
switch (priv->tx_data_rate) {
+ case MHZ(400):
+ speed_select = 3;
+ break;
+ case MHZ(800):
+ speed_select = 2;
+ break;
+ case MHZ(1200):
+ speed_select = 1;
+ break;
case MHZ(1600):
default:
speed_select = 0;
+ break;
+ }
+
+ return ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, NULL);
+}
+
+static int ub960_init_tx_ports_ub9702(struct ub960_data *priv)
+{
+ u8 speed_select;
+ u8 ana_pll_div;
+ u8 pll_div;
+ int ret = 0;
+
+ switch (priv->tx_data_rate) {
+ case MHZ(400):
+ speed_select = 3;
+ pll_div = 0x10;
+ ana_pll_div = 0xa2;
+ break;
+ case MHZ(800):
+ speed_select = 2;
pll_div = 0x10;
+ ana_pll_div = 0x92;
break;
case MHZ(1200):
speed_select = 1;
pll_div = 0x18;
+ ana_pll_div = 0x90;
break;
- case MHZ(800):
- speed_select = 2;
- pll_div = 0x10;
+ case MHZ(1500):
+ speed_select = 0;
+ pll_div = 0x0f;
+ ana_pll_div = 0x82;
break;
- case MHZ(400):
- speed_select = 3;
+ case MHZ(1600):
+ default:
+ speed_select = 0;
pll_div = 0x10;
+ ana_pll_div = 0x82;
+ break;
+ case MHZ(2500):
+ speed_select = 0x10;
+ pll_div = 0x19;
+ ana_pll_div = 0x80;
break;
}
- ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select);
+ ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, &ret);
+ ub960_write(priv, UB9702_SR_CSI_PLL_DIV, pll_div, &ret);
+ ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA,
+ UB9702_IR_CSI_ANA_CSIPLL_REG_1, ana_pll_div, &ret);
- if (priv->hw_data->is_ub9702) {
- ub960_write(priv, UB960_SR_CSI_PLL_DIV, pll_div);
+ return ret;
+}
- switch (priv->tx_data_rate) {
- case MHZ(1600):
- default:
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x80);
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a);
- break;
- case MHZ(800):
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x90);
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4f, 0x2a);
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a);
- break;
- case MHZ(400):
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0xa0);
- break;
- }
- }
+static int ub960_init_tx_ports(struct ub960_data *priv)
+{
+ int ret;
- for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
+ if (priv->hw_data->is_ub9702)
+ ret = ub960_init_tx_ports_ub9702(priv);
+ else
+ ret = ub960_init_tx_ports_ub960(priv);
+
+ if (ret)
+ return ret;
+
+ for (unsigned int nport = 0; nport < priv->hw_data->num_txports;
+ nport++) {
struct ub960_txport *txport = priv->txports[nport];
if (!txport)
continue;
- ub960_init_tx_port(priv, txport);
+ ret = ub960_init_tx_port(priv, txport);
+ if (ret)
+ return ret;
}
return 0;
}
-static void ub960_init_rx_port_ub960(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_init_rx_port_ub960(struct ub960_data *priv,
+ struct ub960_rxport *rxport)
{
unsigned int nport = rxport->nport;
u32 bc_freq_val;
+ int ret = 0;
/*
* Back channel frequency select.
@@ -1885,306 +2409,870 @@ static void ub960_init_rx_port_ub960(struct ub960_data *priv,
break;
default:
- return;
+ return -EINVAL;
}
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK,
- bc_freq_val);
+ bc_freq_val, &ret);
switch (rxport->rx_mode) {
case RXPORT_MODE_RAW10:
/* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG,
UB960_RR_PORT_CONFIG_FPD3_MODE_MASK,
- 0x3);
+ 0x3, &ret);
/*
* RAW10_8BIT_CTL = 0b10 : 8-bit processing using upper 8 bits
*/
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK,
- 0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT);
+ 0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT,
+ &ret);
break;
case RXPORT_MODE_RAW12_HF:
case RXPORT_MODE_RAW12_LF:
/* Not implemented */
- return;
+ return -EINVAL;
case RXPORT_MODE_CSI2_SYNC:
case RXPORT_MODE_CSI2_NONSYNC:
/* CSI-2 Mode (DS90UB953-Q1 compatible) */
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3,
- 0x0);
+ 0x0, &ret);
break;
}
/* LV_POLARITY & FV_POLARITY */
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
- rxport->lv_fv_pol);
+ rxport->lv_fv_pol, &ret);
/* Enable all interrupt sources from this port */
- ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07);
- ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f);
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07, &ret);
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f, &ret);
/* Enable I2C_PASS_THROUGH */
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
- UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH);
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
/* Enable I2C communication to the serializer via the alias addr */
ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
- rxport->ser.alias << 1);
+ rxport->ser.alias << 1, &ret);
/* Configure EQ related settings */
ub960_rxport_config_eq(priv, nport);
/* Enable RX port */
- ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport));
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
+
+ return ret;
}
-static void ub960_init_rx_port_ub9702_fpd3(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_init_rx_ports_ub960(struct ub960_data *priv)
{
- unsigned int nport = rxport->nport;
- u8 bc_freq_val;
- u8 fpd_func_mode;
+ struct device *dev = &priv->client->dev;
+ unsigned int port_lock_mask;
+ unsigned int port_mask;
+ int ret;
- switch (rxport->rx_mode) {
- case RXPORT_MODE_RAW10:
- bc_freq_val = 0;
- fpd_func_mode = 5;
- break;
+ for_each_active_rxport(priv, it) {
+ ret = ub960_init_rx_port_ub960(priv, it.rxport);
+ if (ret)
+ return ret;
+ }
- case RXPORT_MODE_RAW12_HF:
- bc_freq_val = 0;
- fpd_func_mode = 4;
- break;
+ ret = ub960_reset(priv, false);
+ if (ret)
+ return ret;
- case RXPORT_MODE_RAW12_LF:
- bc_freq_val = 0;
- fpd_func_mode = 6;
- break;
+ port_mask = 0;
- case RXPORT_MODE_CSI2_SYNC:
- bc_freq_val = 6;
- fpd_func_mode = 2;
- break;
+ for_each_active_rxport(priv, it)
+ port_mask |= BIT(it.nport);
- case RXPORT_MODE_CSI2_NONSYNC:
- bc_freq_val = 2;
- fpd_func_mode = 2;
- break;
+ ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
+ if (ret)
+ return ret;
- default:
- return;
+ if (port_mask != port_lock_mask) {
+ ret = -EIO;
+ dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
+ return ret;
}
- ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7,
- bc_freq_val);
- ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, fpd_func_mode);
+ /* Set temperature ramp on serializer */
+ for_each_active_rxport(priv, it) {
+ ret = ub960_serializer_temp_ramp(it.rxport);
+ if (ret)
+ return ret;
+
+ ub960_rxport_update_bits(priv, it.nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ &ret);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Clear any errors caused by switching the RX port settings while
+ * probing.
+ */
+ ret = ub960_clear_rx_errors(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * UB9702 specific initial RX port configuration
+ */
+
+static int ub960_turn_off_rxport_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ int ret = 0;
+
+ /* Disable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), 0, &ret);
+
+ /* Disable FPD Rx and FPD BC CMR */
+ ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_2, 0x1b, &ret);
- /* set serdes_eq_mode = 1 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa8, 0x80);
+ /* Disable FPD BC Tx */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, BIT(4), 0,
+ &ret);
- /* enable serdes driver */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x0d, 0x7f);
+ /* Disable internal RX blocks */
+ ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_1, 0x15, &ret);
- /* set serdes_eq_offset=4 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04);
+ /* Disable AEQ */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_CFG_2, 0x03, &ret);
+
+ /* PI disabled and oDAC disabled */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_CFG_4, 0x09, &ret);
- /* init default serdes_eq_max in 0xa9 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa9, 0x23);
+ /* AEQ configured for disabled link */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_CFG_1, 0x20, &ret);
- /* init serdes_eq_min in 0xaa */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xaa, 0);
+ /* disable AEQ clock and DFE */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_CFG_3, 0x45, &ret);
- /* serdes_driver_ctl2 control: DS90UB953-Q1/DS90UB933-Q1/DS90UB913A-Q1 */
- ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b,
- BIT(3), BIT(3));
+ /* Powerdown FPD3 CDR */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5, 0x82, &ret);
- /* RX port to half-rate */
- ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2),
- BIT(nport * 2));
+ return ret;
}
-static void ub960_init_rx_port_ub9702_fpd4_aeq(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_set_bc_drv_config_ub9702(struct ub960_data *priv,
+ unsigned int nport)
{
- unsigned int nport = rxport->nport;
- bool first_time_power_up = true;
-
- if (first_time_power_up) {
- u8 v;
+ u8 fpd_bc_ctl0;
+ u8 fpd_bc_ctl1;
+ u8 fpd_bc_ctl2;
+ int ret = 0;
- /* AEQ init */
- ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2c, &v);
+ if (priv->rxports[nport]->cdr_mode == RXPORT_CDR_FPD4) {
+ /* Set FPD PBC drv into FPD IV mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, v);
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, v + 1);
+ fpd_bc_ctl0 = 0;
+ fpd_bc_ctl1 = 0;
+ fpd_bc_ctl2 = 0;
+ } else {
+ /* Set FPD PBC drv into FPD III mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x00);
+ fpd_bc_ctl0 = 2;
+ fpd_bc_ctl1 = 1;
+ fpd_bc_ctl2 = 5;
}
- /* enable serdes_eq_ctl2 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x9e, 0x00);
+ ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD_BC_CTL0, GENMASK(7, 5),
+ fpd_bc_ctl0 << 5, &ret);
- /* enable serdes_eq_ctl1 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x90, 0x40);
+ ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD_BC_CTL1, BIT(6),
+ fpd_bc_ctl1 << 6, &ret);
- /* enable serdes_eq_en */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2e, 0x40);
+ ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD_BC_CTL2, GENMASK(6, 3),
+ fpd_bc_ctl2 << 3, &ret);
- /* disable serdes_eq_override */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xf0, 0x00);
+ return ret;
+}
- /* disable serdes_gain_override */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x71, 0x00);
+static int ub960_set_fpd4_sync_mode_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ int ret = 0;
+
+ /* FPD4 Sync Mode */
+ ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x0, &ret);
+
+ /* BC_FREQ_SELECT = (PLL_FREQ/3200) Mbps */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 6, &ret);
+
+ if (ret)
+ return ret;
+
+ ret = ub960_set_bc_drv_config_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ /* Set AEQ timer to 400us/step */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0x2f, &ret);
+
+ /* Disable FPD4 Auto Recovery */
+ ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4), 0,
+ &ret);
+
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
+
+ /* Enable FPD4 Auto Recovery */
+ ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4),
+ BIT(4), &ret);
+
+ return ret;
}
-static void ub960_init_rx_port_ub9702_fpd4(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_set_fpd4_async_mode_ub9702(struct ub960_data *priv,
+ unsigned int nport)
{
- unsigned int nport = rxport->nport;
- u8 bc_freq_val;
+ int ret = 0;
- switch (rxport->rx_mode) {
- case RXPORT_MODE_RAW10:
- bc_freq_val = 0;
- break;
+ /* FPD4 ASync Mode */
+ ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x1, &ret);
- case RXPORT_MODE_RAW12_HF:
- bc_freq_val = 0;
- break;
+ /* 10Mbps w/ BC enabled */
+ /* BC_FREQ_SELECT=(PLL_FREQ/3200) Mbps */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 2, &ret);
- case RXPORT_MODE_RAW12_LF:
- bc_freq_val = 0;
- break;
+ if (ret)
+ return ret;
- case RXPORT_MODE_CSI2_SYNC:
- bc_freq_val = 6;
- break;
+ ret = ub960_set_bc_drv_config_ub9702(priv, nport);
+ if (ret)
+ return ret;
- case RXPORT_MODE_CSI2_NONSYNC:
- bc_freq_val = 2;
- break;
+ /* Set AEQ timer to 400us/step */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0x2f, &ret);
- default:
- return;
- }
+ /* Disable FPD4 Auto Recover */
+ ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4), 0,
+ &ret);
- ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7,
- bc_freq_val);
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
- /* FPD4 Sync Mode */
- ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, 0);
+ /* Enable FPD4 Auto Recovery */
+ ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4),
+ BIT(4), &ret);
- /* add serdes_eq_offset of 4 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04);
+ return ret;
+}
+
+static int ub960_set_fpd3_sync_mode_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ int ret = 0;
- /* FPD4 serdes_start_eq in 0x27: assign default */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, 0x0);
- /* FPD4 serdes_end_eq in 0x28: assign default */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, 0x23);
+ /* FPD3 Sync Mode */
+ ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x2, &ret);
- /* set serdes_driver_mode into FPD IV mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x04, 0x00);
- /* set FPD PBC drv into FPD IV mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b, 0x00);
+ /* BC_FREQ_SELECT=(PLL_FREQ/3200) Mbps */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 6, &ret);
- /* set serdes_system_init to 0x2f */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x21, 0x2f);
- /* set serdes_system_rst in reset mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0xc1);
+ /* Set AEQ_LOCK_MODE = 1 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1, BIT(7), &ret);
- /* RX port to 7.55G mode */
- ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2),
- 0 << (nport * 2));
+ if (ret)
+ return ret;
- ub960_init_rx_port_ub9702_fpd4_aeq(priv, rxport);
+ ret = ub960_set_bc_drv_config_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
+
+ return ret;
}
-static void ub960_init_rx_port_ub9702(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_set_raw10_dvp_mode_ub9702(struct ub960_data *priv,
+ unsigned int nport)
{
- unsigned int nport = rxport->nport;
+ int ret = 0;
- if (rxport->cdr_mode == RXPORT_CDR_FPD3)
- ub960_init_rx_port_ub9702_fpd3(priv, rxport);
- else /* RXPORT_CDR_FPD4 */
- ub960_init_rx_port_ub9702_fpd4(priv, rxport);
+ /* FPD3 RAW10 Mode */
+ ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x5, &ret);
- switch (rxport->rx_mode) {
- case RXPORT_MODE_RAW10:
- /*
- * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits
- * 0b10 : 8-bit processing using upper 8 bits
- */
- ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
- 0x3 << 6, 0x2 << 6);
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 0, &ret);
+
+ /* Set AEQ_LOCK_MODE = 1 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1, BIT(7), &ret);
+
+ /*
+ * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits
+ * 0b10 : 8-bit processing using upper 8 bits
+ */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3 << 6,
+ 0x2 << 6, &ret);
+
+ /* LV_POLARITY & FV_POLARITY */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
+ priv->rxports[nport]->lv_fv_pol, &ret);
+
+ if (ret)
+ return ret;
+
+ ret = ub960_set_bc_drv_config_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
+
+ return ret;
+}
+
+static int ub960_configure_rx_port_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ int ret;
+
+ if (!rxport) {
+ ret = ub960_turn_off_rxport_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: disabled\n", nport);
+ return 0;
+ }
+
+ switch (rxport->cdr_mode) {
+ case RXPORT_CDR_FPD4:
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_CSI2_SYNC:
+ ret = ub960_set_fpd4_sync_mode_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: FPD-Link IV SYNC mode\n", nport);
+ break;
+ case RXPORT_MODE_CSI2_NONSYNC:
+ ret = ub960_set_fpd4_async_mode_ub9702(priv, nport);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "rx%u: FPD-Link IV ASYNC mode\n", nport);
+ break;
+ default:
+ dev_err(dev, "rx%u: unsupported FPD4 mode %u\n", nport,
+ rxport->rx_mode);
+ return -EINVAL;
+ }
break;
- case RXPORT_MODE_RAW12_HF:
- case RXPORT_MODE_RAW12_LF:
- /* Not implemented */
- return;
+ case RXPORT_CDR_FPD3:
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_CSI2_SYNC:
+ ret = ub960_set_fpd3_sync_mode_ub9702(priv, nport);
+ if (ret)
+ return ret;
- case RXPORT_MODE_CSI2_SYNC:
- case RXPORT_MODE_CSI2_NONSYNC:
+ dev_dbg(dev, "rx%u: FPD-Link III SYNC mode\n", nport);
+ break;
+ case RXPORT_MODE_RAW10:
+ ret = ub960_set_raw10_dvp_mode_ub9702(priv, nport);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "rx%u: FPD-Link III RAW10 DVP mode\n",
+ nport);
+ break;
+ default:
+ dev_err(&priv->client->dev,
+ "rx%u: unsupported FPD3 mode %u\n", nport,
+ rxport->rx_mode);
+ return -EINVAL;
+ }
break;
+
+ default:
+ dev_err(&priv->client->dev, "rx%u: unsupported CDR mode %u\n",
+ nport, rxport->cdr_mode);
+ return -EINVAL;
}
- /* LV_POLARITY & FV_POLARITY */
- ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
- rxport->lv_fv_pol);
+ return 0;
+}
- /* Enable all interrupt sources from this port */
- ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07);
- ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f);
+static int ub960_lock_recovery_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+ /* Assumption that max AEQ should be under 16 */
+ const u8 rx_aeq_limit = 16;
+ u8 prev_aeq = 0xff;
+ bool rx_lock;
+
+ for (unsigned int retry = 0; retry < 3; ++retry) {
+ u8 port_sts1;
+ u8 rx_aeq;
+ int ret;
- /* Enable I2C_PASS_THROUGH */
- ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
- UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
- UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
+ &port_sts1, NULL);
+ if (ret)
+ return ret;
- /* Enable I2C communication to the serializer via the alias addr */
- ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
- rxport->ser.alias << 1);
+ rx_lock = port_sts1 & UB960_RR_RX_PORT_STS1_PORT_PASS;
- /* Enable RX port */
- ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport));
+ if (!rx_lock) {
+ ret = ub960_rxport_lockup_wa_ub9702(priv);
+ if (ret)
+ return ret;
+
+ /* Restart AEQ by changing max to 0 --> 0x23 */
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7, 0,
+ NULL);
+ if (ret)
+ return ret;
+
+ msleep(20);
+
+ /* AEQ Restart */
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
+ 0x23, NULL);
+
+ if (ret)
+ return ret;
+
+ msleep(20);
+ dev_dbg(dev, "rx%u: no lock, retry = %u\n", nport,
+ retry);
+
+ continue;
+ }
+
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &rx_aeq,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (rx_aeq < rx_aeq_limit) {
+ dev_dbg(dev,
+ "rx%u: locked and AEQ normal before setting AEQ window\n",
+ nport);
+ return 0;
+ }
+
+ if (rx_aeq != prev_aeq) {
+ ret = ub960_rxport_lockup_wa_ub9702(priv);
+ if (ret)
+ return ret;
+
+ /* Restart AEQ by changing max to 0 --> 0x23 */
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
+ 0, NULL);
+ if (ret)
+ return ret;
+
+ msleep(20);
- if (rxport->cdr_mode == RXPORT_CDR_FPD4) {
- /* unreset 960 AEQ */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0x41);
+ /* AEQ Restart */
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
+ 0x23, NULL);
+ if (ret)
+ return ret;
+
+ msleep(20);
+
+ dev_dbg(dev,
+ "rx%u: high AEQ at initial check recovery loop, retry=%u\n",
+ nport, retry);
+
+ prev_aeq = rx_aeq;
+ } else {
+ dev_dbg(dev,
+ "rx%u: lossy cable detected, RX_AEQ %#x, RX_AEQ_LIMIT %#x, retry %u\n",
+ nport, rx_aeq, rx_aeq_limit, retry);
+ dev_dbg(dev,
+ "rx%u: will continue with initiation sequence but high AEQ\n",
+ nport);
+ return 0;
+ }
}
+
+ dev_err(dev, "rx%u: max number of retries: %s\n", nport,
+ rx_lock ? "unstable AEQ" : "no lock");
+
+ return -EIO;
}
-static int ub960_init_rx_ports(struct ub960_data *priv)
+static int ub960_enable_aeq_lms_ub9702(struct ub960_data *priv,
+ unsigned int nport)
{
- unsigned int nport;
+ struct device *dev = &priv->client->dev;
+ u8 read_aeq_init;
+ int ret;
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &read_aeq_init,
+ NULL);
+ if (ret)
+ return ret;
- if (!rxport)
- continue;
+ dev_dbg(dev, "rx%u: initial AEQ = %#x\n", nport, read_aeq_init);
- if (priv->hw_data->is_ub9702)
- ub960_init_rx_port_ub9702(priv, rxport);
- else
- ub960_init_rx_port_ub960(priv, rxport);
+ /* Set AEQ Min */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL6, read_aeq_init, &ret);
+ /* Set AEQ Max */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7, read_aeq_init + 1, &ret);
+ /* Set AEQ offset to 0 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL10, 0x0, &ret);
+
+ /* Enable AEQ tap2 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_CTRL_SEL_38, 0x00, &ret);
+ /* Set VGA Gain 1 Gain 2 override to 0 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_8, 0x00, &ret);
+ /* Set VGA Initial Sweep Gain to 0 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_6, 0x80, &ret);
+ /* Set VGA_Adapt (VGA Gain) override to 0 (thermometer encoded) */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_3, 0x00, &ret);
+ /* Enable VGA_SWEEP */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_ADAPT_CTRL, 0x40, &ret);
+ /* Disable VGA_SWEEP_GAIN_OV, disable VGA_TUNE_OV */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_OVERRIDE_CTRL, 0x00, &ret);
+
+ /* Set VGA HIGH Threshold to 43 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_1, 0x2b, &ret);
+ /* Set VGA LOW Threshold to 18 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_2, 0x12, &ret);
+ /* Set vga_sweep_th to 32 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_CTRL_SEL_15, 0x20, &ret);
+ /* Set AEQ timer to 400us/step and parity threshold to 7 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0xef, &ret);
+
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: enable FPD-Link IV AEQ LMS\n", nport);
+
+ return 0;
+}
+
+static int ub960_enable_dfe_lms_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+ int ret = 0;
+
+ /* Enable DFE LMS */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_CTRL_SEL_24, 0x40, &ret);
+ /* Disable VGA Gain1 override */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_GAIN_CTRL_0, 0x20, &ret);
+
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 5000);
+
+ /* Disable VGA Gain2 override */
+ ret = ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_GAIN_CTRL_0, 0x00, NULL);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: enabled FPD-Link IV DFE LMS", nport);
+
+ return 0;
+}
+
+static int ub960_init_rx_ports_ub9702(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int port_lock_mask;
+ unsigned int port_mask = 0;
+ bool have_fpd4 = false;
+ int ret;
+
+ for_each_active_rxport(priv, it) {
+ ret = ub960_rxport_update_bits(priv, it.nport,
+ UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_ALWAYS_ON,
+ UB960_RR_BCC_CONFIG_BC_ALWAYS_ON,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable FPD4 Auto Recovery */
+ ret = ub960_write(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, 0x0f, NULL);
+ if (ret)
+ return ret;
+
+ for_each_active_rxport(priv, it) {
+ if (it.rxport->ser.addr >= 0) {
+ /*
+ * Set serializer's I2C address if set in the dts file,
+ * and freeze it to prevent updates from the FC.
+ */
+ ub960_rxport_write(priv, it.nport, UB960_RR_SER_ID,
+ it.rxport->ser.addr << 1 |
+ UB960_RR_SER_ID_FREEZE_DEVICE_ID,
+ &ret);
+ }
+
+ /* Set serializer I2C alias with auto-ack */
+ ub960_rxport_write(priv, it.nport, UB960_RR_SER_ALIAS_ID,
+ it.rxport->ser.alias << 1 |
+ UB960_RR_SER_ALIAS_ID_AUTO_ACK, &ret);
+
+ if (ret)
+ return ret;
+ }
+
+ for_each_active_rxport(priv, it) {
+ if (fwnode_device_is_compatible(it.rxport->ser.fwnode,
+ "ti,ds90ub971-q1")) {
+ ret = ub960_rxport_bc_ser_config(it.rxport);
+ if (ret)
+ return ret;
+ }
+ }
+
+ for_each_active_rxport_fpd4(priv, it) {
+ /* Hold state machine in reset */
+ ub960_rxport_write(priv, it.nport, UB9702_RR_RX_SM_SEL_2, 0x10,
+ &ret);
+
+ /* Set AEQ max to 0 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(it.nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7, 0, &ret);
+
+ if (ret)
+ return ret;
+
+ dev_dbg(dev,
+ "rx%u: holding state machine and adjusting AEQ max to 0",
+ it.nport);
+ }
+
+ for_each_active_rxport(priv, it) {
+ port_mask |= BIT(it.nport);
+
+ if (it.rxport->cdr_mode == RXPORT_CDR_FPD4)
+ have_fpd4 = true;
+ }
+
+ for_each_rxport(priv, it) {
+ ret = ub960_configure_rx_port_ub9702(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ ret = ub960_reset(priv, false);
+ if (ret)
+ return ret;
+
+ if (have_fpd4) {
+ for_each_active_rxport_fpd4(priv, it) {
+ /* Release state machine */
+ ret = ub960_rxport_write(priv, it.nport,
+ UB9702_RR_RX_SM_SEL_2, 0x0,
+ NULL);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: state machine released\n",
+ it.nport);
+ }
+
+ /* Wait for SM to resume */
+ fsleep(5000);
+
+ for_each_active_rxport_fpd4(priv, it) {
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(it.nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
+ 0x23, NULL);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: AEQ restart\n", it.nport);
+ }
+
+ /* Wait for lock */
+ fsleep(20000);
+
+ for_each_active_rxport_fpd4(priv, it) {
+ ret = ub960_lock_recovery_ub9702(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ for_each_active_rxport_fpd4(priv, it) {
+ ret = ub960_enable_aeq_lms_ub9702(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ for_each_active_rxport_fpd4(priv, it) {
+ /* Hold state machine in reset */
+ ret = ub960_rxport_write(priv, it.nport,
+ UB9702_RR_RX_SM_SEL_2, 0x10,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ ret = ub960_reset(priv, false);
+ if (ret)
+ return ret;
+
+ for_each_active_rxport_fpd4(priv, it) {
+ /* Release state machine */
+ ret = ub960_rxport_write(priv, it.nport,
+ UB9702_RR_RX_SM_SEL_2, 0,
+ NULL);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Wait time for stable lock */
+ fsleep(15000);
+
+ /* Set temperature ramp on serializer */
+ for_each_active_rxport(priv, it) {
+ ret = ub960_serializer_temp_ramp(it.rxport);
+ if (ret)
+ return ret;
+ }
+
+ for_each_active_rxport_fpd4(priv, it) {
+ ret = ub960_enable_dfe_lms_ub9702(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for DFE and LMS to adapt */
+ fsleep(5000);
+
+ ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
+ if (ret)
+ return ret;
+
+ if (port_mask != port_lock_mask) {
+ ret = -EIO;
+ dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
+ return ret;
+ }
+
+ for_each_active_rxport(priv, it) {
+ /* Enable all interrupt sources from this port */
+ ub960_rxport_write(priv, it.nport, UB960_RR_PORT_ICR_HI, 0x07,
+ &ret);
+ ub960_rxport_write(priv, it.nport, UB960_RR_PORT_ICR_LO, 0x7f,
+ &ret);
+
+ /* Clear serializer I2C alias auto-ack */
+ ub960_rxport_update_bits(priv, it.nport, UB960_RR_SER_ALIAS_ID,
+ UB960_RR_SER_ALIAS_ID_AUTO_ACK, 0,
+ &ret);
+
+ /* Enable I2C_PASS_THROUGH */
+ ub960_rxport_update_bits(priv, it.nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ &ret);
+
+ if (ret)
+ return ret;
}
+ /* Enable FPD4 Auto Recovery, Recovery loop active */
+ ret = ub960_write(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, 0x18, NULL);
+ if (ret)
+ return ret;
+
+ for_each_active_rxport_fpd4(priv, it) {
+ u8 final_aeq;
+
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(it.nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &final_aeq,
+ NULL);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: final AEQ = %#x\n", it.nport, final_aeq);
+ }
+
+ /*
+ * Clear any errors caused by switching the RX port settings while
+ * probing.
+ */
+
+ ret = ub960_clear_rx_errors(priv);
+ if (ret)
+ return ret;
+
return 0;
}
-static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
+static int ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
{
struct device *dev = &priv->client->dev;
u8 rx_port_sts1;
@@ -2194,27 +3282,21 @@ static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
int ret = 0;
/* Read interrupts (also clears most of them) */
- if (!ret)
- ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
- &rx_port_sts1);
- if (!ret)
- ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
- &rx_port_sts2);
- if (!ret)
- ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS,
- &csi_rx_sts);
- if (!ret)
- ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS,
- &bcc_sts);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &rx_port_sts1,
+ &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &rx_port_sts2,
+ &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts, &ret);
if (ret)
- return;
+ return ret;
if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_PARITY_ERROR) {
u16 v;
ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
- &v);
+ &v, NULL);
if (!ret)
dev_err(dev, "rx%u parity errors: %u\n", nport, v);
}
@@ -2273,7 +3355,8 @@ static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_CHG) {
u16 v;
- ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1,
+ &v, NULL);
if (!ret)
dev_dbg(dev, "rx%u line len changed: %u\n", nport, v);
}
@@ -2282,7 +3365,7 @@ static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
u16 v;
ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
- &v);
+ &v, NULL);
if (!ret)
dev_dbg(dev, "rx%u line count changed: %u\n", nport, v);
}
@@ -2302,6 +3385,8 @@ static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
"stable freq" :
"unstable freq");
}
+
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -2354,17 +3439,17 @@ static int ub960_enable_tx_port(struct ub960_data *priv, unsigned int nport)
return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
UB960_TR_CSI_CTL_CSI_ENABLE,
- UB960_TR_CSI_CTL_CSI_ENABLE);
+ UB960_TR_CSI_CTL_CSI_ENABLE, NULL);
}
-static void ub960_disable_tx_port(struct ub960_data *priv, unsigned int nport)
+static int ub960_disable_tx_port(struct ub960_data *priv, unsigned int nport)
{
struct device *dev = &priv->client->dev;
dev_dbg(dev, "disable TX port %u\n", nport);
- ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
- UB960_TR_CSI_CTL_CSI_ENABLE, 0);
+ return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
+ UB960_TR_CSI_CTL_CSI_ENABLE, 0, NULL);
}
static int ub960_enable_rx_port(struct ub960_data *priv, unsigned int nport)
@@ -2375,19 +3460,19 @@ static int ub960_enable_rx_port(struct ub960_data *priv, unsigned int nport)
/* Enable forwarding */
return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
- UB960_SR_FWD_CTL1_PORT_DIS(nport), 0);
+ UB960_SR_FWD_CTL1_PORT_DIS(nport), 0, NULL);
}
-static void ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
+static int ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
{
struct device *dev = &priv->client->dev;
dev_dbg(dev, "disable RX port %u\n", nport);
/* Disable forwarding */
- ub960_update_bits(priv, UB960_SR_FWD_CTL1,
- UB960_SR_FWD_CTL1_PORT_DIS(nport),
- UB960_SR_FWD_CTL1_PORT_DIS(nport));
+ return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
+ UB960_SR_FWD_CTL1_PORT_DIS(nport),
+ UB960_SR_FWD_CTL1_PORT_DIS(nport), NULL);
}
/*
@@ -2396,20 +3481,14 @@ static void ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
*/
static int ub960_validate_stream_vcs(struct ub960_data *priv)
{
- unsigned int nport;
- unsigned int i;
-
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ for_each_active_rxport(priv, it) {
struct v4l2_mbus_frame_desc desc;
int ret;
u8 vc;
- if (!rxport)
- continue;
-
- ret = v4l2_subdev_call(rxport->source.sd, pad, get_frame_desc,
- rxport->source.pad, &desc);
+ ret = v4l2_subdev_call(it.rxport->source.sd, pad,
+ get_frame_desc, it.rxport->source.pad,
+ &desc);
if (ret)
return ret;
@@ -2421,13 +3500,13 @@ static int ub960_validate_stream_vcs(struct ub960_data *priv)
vc = desc.entry[0].bus.csi2.vc;
- for (i = 1; i < desc.num_entries; i++) {
+ for (unsigned int i = 1; i < desc.num_entries; i++) {
if (vc == desc.entry[i].bus.csi2.vc)
continue;
dev_err(&priv->client->dev,
"rx%u: source with multiple virtual-channels is not supported\n",
- nport);
+ it.nport);
return -ENODEV;
}
}
@@ -2517,23 +3596,24 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
*/
fwd_ctl = GENMASK(7, 4);
- for (unsigned int nport = 0; nport < priv->hw_data->num_rxports;
- nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ for_each_active_rxport(priv, it) {
+ unsigned long nport = it.nport;
+
u8 vc = vc_map[nport];
if (rx_data[nport].num_streams == 0)
continue;
- switch (rxport->rx_mode) {
+ switch (it.rxport->rx_mode) {
case RXPORT_MODE_RAW10:
ub960_rxport_write(priv, nport, UB960_RR_RAW10_ID,
- rx_data[nport].pixel_dt | (vc << UB960_RR_RAW10_ID_VC_SHIFT));
+ rx_data[nport].pixel_dt | (vc << UB960_RR_RAW10_ID_VC_SHIFT),
+ &ret);
- ub960_rxport_write(priv, rxport->nport,
+ ub960_rxport_write(priv, nport,
UB960_RR_RAW_EMBED_DTYPE,
(rx_data[nport].meta_lines << UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT) |
- rx_data[nport].meta_dt);
+ rx_data[nport].meta_dt, &ret);
break;
@@ -2550,15 +3630,17 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
(vc << UB960_RR_CSI_VC_MAP_SHIFT(3)) |
(vc << UB960_RR_CSI_VC_MAP_SHIFT(2)) |
(vc << UB960_RR_CSI_VC_MAP_SHIFT(1)) |
- (vc << UB960_RR_CSI_VC_MAP_SHIFT(0)));
+ (vc << UB960_RR_CSI_VC_MAP_SHIFT(0)),
+ &ret);
} else {
unsigned int i;
/* Map all VCs from this port to VC(nport) */
for (i = 0; i < 8; i++)
ub960_rxport_write(priv, nport,
- UB960_RR_VC_ID_MAP(i),
- (nport << 4) | nport);
+ UB9702_RR_VC_ID_MAP(i),
+ (nport << 4) | nport,
+ &ret);
}
break;
@@ -2570,9 +3652,9 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
fwd_ctl &= ~BIT(nport); /* forward to TX0 */
}
- ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl);
+ ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl, &ret);
- return 0;
+ return ret;
}
static void ub960_update_streaming_status(struct ub960_data *priv)
@@ -2596,7 +3678,6 @@ static int ub960_enable_streams(struct v4l2_subdev *sd,
u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
struct v4l2_subdev_route *route;
unsigned int failed_port;
- unsigned int nport;
int ret;
if (!priv->streaming) {
@@ -2618,6 +3699,8 @@ static int ub960_enable_streams(struct v4l2_subdev *sd,
/* Collect sink streams per pad which we need to enable */
for_each_active_route(&state->routing, route) {
+ unsigned int nport;
+
if (route->source_pad != source_pad)
continue;
@@ -2629,7 +3712,9 @@ static int ub960_enable_streams(struct v4l2_subdev *sd,
sink_streams[nport] |= BIT_ULL(route->sink_stream);
}
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ for_each_rxport(priv, it) {
+ unsigned int nport = it.nport;
+
if (!sink_streams[nport])
continue;
@@ -2667,7 +3752,7 @@ static int ub960_enable_streams(struct v4l2_subdev *sd,
return 0;
err:
- for (nport = 0; nport < failed_port; nport++) {
+ for (unsigned int nport = 0; nport < failed_port; nport++) {
if (!sink_streams[nport])
continue;
@@ -2707,11 +3792,12 @@ static int ub960_disable_streams(struct v4l2_subdev *sd,
struct device *dev = &priv->client->dev;
u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
struct v4l2_subdev_route *route;
- unsigned int nport;
int ret;
/* Collect sink streams per pad which we need to disable */
for_each_active_route(&state->routing, route) {
+ unsigned int nport;
+
if (route->source_pad != source_pad)
continue;
@@ -2723,7 +3809,9 @@ static int ub960_disable_streams(struct v4l2_subdev *sd,
sink_streams[nport] |= BIT_ULL(route->sink_stream);
}
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ for_each_rxport(priv, it) {
+ unsigned int nport = it.nport;
+
if (!sink_streams[nport])
continue;
@@ -2975,8 +4063,8 @@ static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
.set_fmt = ub960_set_fmt,
};
-static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
- unsigned int nport)
+static int ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
+ unsigned int nport)
{
struct device *dev = &priv->client->dev;
u8 eq_level;
@@ -2986,18 +4074,18 @@ static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
/* Strobe */
- ret = ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
+ ret = ub960_read(priv, UB960_XR_AEQ_CTL1, &v, NULL);
if (ret)
- return;
+ return ret;
dev_info(dev, "\t%s strobe\n",
(v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
"Manual");
if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
- ret = ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
+ ret = ub960_read(priv, UB960_XR_SFILTER_CFG, &v, NULL);
if (ret)
- return;
+ return ret;
dev_info(dev, "\tStrobe range [%d, %d]\n",
((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
@@ -3006,32 +4094,38 @@ static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
if (ret)
- return;
+ return ret;
dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
/* EQ */
- ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v, NULL);
if (ret)
- return;
+ return ret;
dev_info(dev, "\t%s EQ\n",
(v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
"Adaptive");
if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
- ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v,
+ NULL);
if (ret)
- return;
+ return ret;
dev_info(dev, "\tEQ range [%u, %u]\n",
(v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
(v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
}
- if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
- dev_info(dev, "\tEQ level %u\n", eq_level);
+ ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "\tEQ level %u\n", eq_level);
+
+ return 0;
}
static int ub960_log_status(struct v4l2_subdev *sd)
@@ -3039,19 +4133,23 @@ static int ub960_log_status(struct v4l2_subdev *sd)
struct ub960_data *priv = sd_to_ub960(sd);
struct device *dev = &priv->client->dev;
struct v4l2_subdev_state *state;
- unsigned int nport;
u16 v16 = 0;
u8 v = 0;
u8 id[UB960_SR_FPD3_RX_ID_LEN];
+ int ret = 0;
state = v4l2_subdev_lock_and_get_active_state(sd);
- for (unsigned int i = 0; i < sizeof(id); i++)
- ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i]);
+ for (unsigned int i = 0; i < sizeof(id); i++) {
+ ret = ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i], NULL);
+ if (ret)
+ return ret;
+ }
dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
- for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
+ for (unsigned int nport = 0; nport < priv->hw_data->num_txports;
+ nport++) {
struct ub960_txport *txport = priv->txports[nport];
dev_info(dev, "TX %u\n", nport);
@@ -3061,34 +4159,56 @@ static int ub960_log_status(struct v4l2_subdev *sd)
continue;
}
- ub960_txport_read(priv, nport, UB960_TR_CSI_STS, &v);
+ ret = ub960_txport_read(priv, nport, UB960_TR_CSI_STS, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tsync %u, pass %u\n", v & (u8)BIT(1),
v & (u8)BIT(0));
- ub960_read16(priv, UB960_SR_CSI_FRAME_COUNT_HI(nport), &v16);
+ ret = ub960_read16(priv, UB960_SR_CSI_FRAME_COUNT_HI(nport),
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tframe counter %u\n", v16);
- ub960_read16(priv, UB960_SR_CSI_FRAME_ERR_COUNT_HI(nport), &v16);
+ ret = ub960_read16(priv, UB960_SR_CSI_FRAME_ERR_COUNT_HI(nport),
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tframe error counter %u\n", v16);
- ub960_read16(priv, UB960_SR_CSI_LINE_COUNT_HI(nport), &v16);
+ ret = ub960_read16(priv, UB960_SR_CSI_LINE_COUNT_HI(nport),
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tline counter %u\n", v16);
- ub960_read16(priv, UB960_SR_CSI_LINE_ERR_COUNT_HI(nport), &v16);
+ ret = ub960_read16(priv, UB960_SR_CSI_LINE_ERR_COUNT_HI(nport),
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tline error counter %u\n", v16);
}
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ for_each_rxport(priv, it) {
+ unsigned int nport = it.nport;
dev_info(dev, "RX %u\n", nport);
- if (!rxport) {
+ if (!it.rxport) {
dev_info(dev, "\tNot initialized\n");
continue;
}
- ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v,
+ NULL);
+ if (ret)
+ return ret;
if (v & UB960_RR_RX_PORT_STS1_LOCK_STS)
dev_info(dev, "\tLocked\n");
@@ -3096,26 +4216,53 @@ static int ub960_log_status(struct v4l2_subdev *sd)
dev_info(dev, "\tNot locked\n");
dev_info(dev, "\trx_port_sts1 %#02x\n", v);
- ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v,
+ NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\trx_port_sts2 %#02x\n", v);
- ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v16);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH,
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tlink freq %llu Hz\n", ((u64)v16 * HZ_PER_MHZ) >> 8);
- ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v16);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tparity errors %u\n", v16);
- ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI, &v16);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tlines per frame %u\n", v16);
- ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v16);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1,
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tbytes per line %u\n", v16);
- ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
+ &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tcsi_err_counter %u\n", v);
- if (!priv->hw_data->is_ub9702)
- ub960_log_status_ub960_sp_eq(priv, nport);
+ if (!priv->hw_data->is_ub9702) {
+ ret = ub960_log_status_ub960_sp_eq(priv, nport);
+ if (ret)
+ return ret;
+ }
/* GPIOs */
for (unsigned int i = 0; i < UB960_NUM_BC_GPIOS; i++) {
@@ -3125,7 +4272,9 @@ static int ub960_log_status(struct v4l2_subdev *sd)
ctl_reg = UB960_RR_BC_GPIO_CTL(i / 2);
ctl_shift = (i % 2) * 4;
- ub960_rxport_read(priv, nport, ctl_reg, &v);
+ ret = ub960_rxport_read(priv, nport, ctl_reg, &v, NULL);
+ if (ret)
+ return ret;
dev_info(dev, "\tGPIO%u: mode %u\n", i,
(v >> ctl_shift) & 0xf);
@@ -3163,34 +4312,36 @@ static const struct media_entity_operations ub960_entity_ops = {
static irqreturn_t ub960_handle_events(int irq, void *arg)
{
struct ub960_data *priv = arg;
- unsigned int i;
u8 int_sts;
u8 fwd_sts;
int ret;
- ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts);
+ ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts, NULL);
if (ret || !int_sts)
return IRQ_NONE;
dev_dbg(&priv->client->dev, "INTERRUPT_STS %x\n", int_sts);
- ret = ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts);
+ ret = ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts, NULL);
if (ret)
return IRQ_NONE;
dev_dbg(&priv->client->dev, "FWD_STS %#02x\n", fwd_sts);
- for (i = 0; i < priv->hw_data->num_txports; i++) {
- if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i))
- ub960_csi_handle_events(priv, i);
+ for (unsigned int i = 0; i < priv->hw_data->num_txports; i++) {
+ if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i)) {
+ ret = ub960_csi_handle_events(priv, i);
+ if (ret)
+ return IRQ_NONE;
+ }
}
- for (i = 0; i < priv->hw_data->num_rxports; i++) {
- if (!priv->rxports[i])
- continue;
-
- if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(i))
- ub960_rxport_handle_events(priv, i);
+ for_each_active_rxport(priv, it) {
+ if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(it.nport)) {
+ ret = ub960_rxport_handle_events(priv, it.nport);
+ if (ret)
+ return IRQ_NONE;
+ }
}
return IRQ_HANDLED;
@@ -3225,19 +4376,14 @@ static void ub960_txport_free_ports(struct ub960_data *priv)
static void ub960_rxport_free_ports(struct ub960_data *priv)
{
- unsigned int nport;
+ for_each_active_rxport(priv, it) {
+ fwnode_handle_put(it.rxport->source.ep_fwnode);
+ fwnode_handle_put(it.rxport->ser.fwnode);
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ mutex_destroy(&it.rxport->aliased_addrs_lock);
- if (!rxport)
- continue;
-
- fwnode_handle_put(rxport->source.ep_fwnode);
- fwnode_handle_put(rxport->ser.fwnode);
-
- kfree(rxport);
- priv->rxports[nport] = NULL;
+ kfree(it.rxport);
+ priv->rxports[it.nport] = NULL;
}
}
@@ -3253,6 +4399,7 @@ ub960_parse_dt_rxport_link_properties(struct ub960_data *priv,
s32 strobe_pos;
u32 eq_level;
u32 ser_i2c_alias;
+ u32 ser_i2c_addr;
int ret;
cdr_mode = RXPORT_CDR_FPD3;
@@ -3364,6 +4511,13 @@ ub960_parse_dt_rxport_link_properties(struct ub960_data *priv,
return -EINVAL;
}
+ ret = fwnode_property_read_u32(rxport->ser.fwnode, "reg",
+ &ser_i2c_addr);
+ if (ret)
+ rxport->ser.addr = -EINVAL;
+ else
+ rxport->ser.addr = ser_i2c_addr;
+
return 0;
}
@@ -3456,6 +4610,8 @@ static int ub960_parse_dt_rxport(struct ub960_data *priv, unsigned int nport,
if (ret)
goto err_put_remote_fwnode;
+ mutex_init(&rxport->aliased_addrs_lock);
+
return 0;
err_put_remote_fwnode:
@@ -3496,7 +4652,6 @@ static int ub960_parse_dt_rxports(struct ub960_data *priv)
{
struct device *dev = &priv->client->dev;
struct fwnode_handle *links_fwnode;
- unsigned int nport;
int ret;
links_fwnode = fwnode_get_named_child_node(dev_fwnode(dev), "links");
@@ -3511,9 +4666,10 @@ static int ub960_parse_dt_rxports(struct ub960_data *priv)
priv->strobe.manual = fwnode_property_read_bool(links_fwnode, "ti,manual-strobe");
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ for_each_rxport(priv, it) {
struct fwnode_handle *link_fwnode;
struct fwnode_handle *ep_fwnode;
+ unsigned int nport = it.nport;
link_fwnode = ub960_fwnode_get_link_by_regs(links_fwnode, nport);
if (!link_fwnode)
@@ -3602,7 +4758,6 @@ static int ub960_notify_bound(struct v4l2_async_notifier *notifier,
struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
struct device *dev = &priv->client->dev;
u8 nport = rxport->nport;
- unsigned int i;
int ret;
ret = media_entity_get_fwnode_pad(&subdev->entity,
@@ -3627,8 +4782,8 @@ static int ub960_notify_bound(struct v4l2_async_notifier *notifier,
return ret;
}
- for (i = 0; i < priv->hw_data->num_rxports; i++) {
- if (priv->rxports[i] && !priv->rxports[i]->source.sd) {
+ for_each_active_rxport(priv, it) {
+ if (!it.rxport->source.sd) {
dev_dbg(dev, "Waiting for more subdevs to be bound\n");
return 0;
}
@@ -3654,29 +4809,24 @@ static const struct v4l2_async_notifier_operations ub960_notify_ops = {
static int ub960_v4l2_notifier_register(struct ub960_data *priv)
{
struct device *dev = &priv->client->dev;
- unsigned int i;
int ret;
v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
- for (i = 0; i < priv->hw_data->num_rxports; i++) {
- struct ub960_rxport *rxport = priv->rxports[i];
+ for_each_active_rxport(priv, it) {
struct ub960_asd *asd;
- if (!rxport)
- continue;
-
asd = v4l2_async_nf_add_fwnode(&priv->notifier,
- rxport->source.ep_fwnode,
+ it.rxport->source.ep_fwnode,
struct ub960_asd);
if (IS_ERR(asd)) {
dev_err(dev, "Failed to add subdev for source %u: %pe",
- i, asd);
+ it.nport, asd);
v4l2_async_nf_cleanup(&priv->notifier);
return PTR_ERR(asd);
}
- asd->rxport = rxport;
+ asd->rxport = it.rxport;
}
priv->notifier.ops = &ub960_notify_ops;
@@ -3794,29 +4944,6 @@ static const struct regmap_config ub960_regmap_config = {
.disable_locking = true,
};
-static void ub960_reset(struct ub960_data *priv, bool reset_regs)
-{
- struct device *dev = &priv->client->dev;
- unsigned int v;
- int ret;
- u8 bit;
-
- bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 :
- UB960_SR_RESET_DIGITAL_RESET0;
-
- ub960_write(priv, UB960_SR_RESET, bit);
-
- mutex_lock(&priv->reg_lock);
-
- ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v,
- (v & bit) == 0, 2000, 100000);
-
- mutex_unlock(&priv->reg_lock);
-
- if (ret)
- dev_err(dev, "reset failed: %d\n", ret);
-}
-
static int ub960_get_hw_resources(struct ub960_data *priv)
{
struct device *dev = &priv->client->dev;
@@ -3873,10 +5000,12 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
fsleep(2000);
}
- ub960_reset(priv, true);
+ ret = ub960_reset(priv, true);
+ if (ret)
+ goto err_pd_gpio;
/* Runtime check register accessibility */
- ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask);
+ ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask, NULL);
if (ret) {
dev_err_probe(dev, ret, "Cannot read first register, abort\n");
goto err_pd_gpio;
@@ -3885,14 +5014,16 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
dev_dbg(dev, "Found %s (rev/mask %#04x)\n", priv->hw_data->model,
rev_mask);
- ret = ub960_read(priv, UB960_SR_DEVICE_STS, &dev_sts);
+ ret = ub960_read(priv, UB960_SR_DEVICE_STS, &dev_sts, NULL);
if (ret)
goto err_pd_gpio;
if (priv->hw_data->is_ub9702)
- ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq);
+ ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq,
+ NULL);
else
- ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
+ ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq,
+ NULL);
if (ret)
goto err_pd_gpio;
@@ -3901,7 +5032,7 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
clk_get_rate(priv->refclk) / HZ_PER_MHZ);
/* Disable all RX ports by default */
- ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0);
+ ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0, NULL);
if (ret)
goto err_pd_gpio;
@@ -3909,7 +5040,8 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
if (priv->hw_data->is_ub9702) {
ret = ub960_update_bits(priv, UB960_SR_RESET,
UB960_SR_RESET_GPIO_LOCK_RELEASE,
- UB960_SR_RESET_GPIO_LOCK_RELEASE);
+ UB960_SR_RESET_GPIO_LOCK_RELEASE,
+ NULL);
if (ret)
goto err_pd_gpio;
}
@@ -3936,9 +5068,6 @@ static int ub960_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ub960_data *priv;
- unsigned int port_lock_mask;
- unsigned int port_mask;
- unsigned int nport;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -3981,39 +5110,14 @@ static int ub960_probe(struct i2c_client *client)
if (ret)
goto err_free_ports;
- ret = ub960_init_rx_ports(priv);
- if (ret)
- goto err_disable_vpocs;
-
- ub960_reset(priv, false);
-
- port_mask = 0;
-
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport)
- continue;
-
- port_mask |= BIT(nport);
- }
+ if (priv->hw_data->is_ub9702)
+ ret = ub960_init_rx_ports_ub9702(priv);
+ else
+ ret = ub960_init_rx_ports_ub960(priv);
- ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
if (ret)
goto err_disable_vpocs;
- if (port_mask != port_lock_mask) {
- ret = -EIO;
- dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
- goto err_disable_vpocs;
- }
-
- /*
- * Clear any errors caused by switching the RX port settings while
- * probing.
- */
- ub960_clear_rx_errors(priv);
-
ret = ub960_init_atr(priv);
if (ret)
goto err_disable_vpocs;
@@ -4033,9 +5137,9 @@ static int ub960_probe(struct i2c_client *client)
msecs_to_jiffies(UB960_POLL_TIME_MS));
#ifdef UB960_DEBUG_I2C_RX_ID
- for (unsigned int i = 0; i < priv->hw_data->num_rxports; i++)
- ub960_write(priv, UB960_SR_I2C_RX_ID(i),
- (UB960_DEBUG_I2C_RX_ID + i) << 1);
+ for_each_rxport(priv, it)
+ ub960_write(priv, UB960_SR_I2C_RX_ID(it.nport),
+ (UB960_DEBUG_I2C_RX_ID + it.nport) << 1, NULL);
#endif
return 0;
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 04262bbf6306..3b4f68543342 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -718,9 +718,11 @@ static int imx219_configure_lanes(struct imx219 *imx219)
ARRAY_SIZE(imx219_4lane_regs), NULL);
};
-static int imx219_start_streaming(struct imx219 *imx219,
- struct v4l2_subdev_state *state)
+static int imx219_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
+ struct imx219 *imx219 = to_imx219(sd);
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
int ret;
@@ -769,12 +771,16 @@ static int imx219_start_streaming(struct imx219 *imx219,
return 0;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
return ret;
}
-static void imx219_stop_streaming(struct imx219 *imx219)
+static int imx219_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
+ struct imx219 *imx219 = to_imx219(sd);
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
int ret;
@@ -787,23 +793,9 @@ static void imx219_stop_streaming(struct imx219 *imx219)
__v4l2_ctrl_grab(imx219->vflip, false);
__v4l2_ctrl_grab(imx219->hflip, false);
- pm_runtime_put(&client->dev);
-}
-
-static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct imx219 *imx219 = to_imx219(sd);
- struct v4l2_subdev_state *state;
- int ret = 0;
-
- state = v4l2_subdev_lock_and_get_active_state(sd);
-
- if (enable)
- ret = imx219_start_streaming(imx219, state);
- else
- imx219_stop_streaming(imx219);
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
- v4l2_subdev_unlock_state(state);
return ret;
}
@@ -995,7 +987,7 @@ static int imx219_init_state(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_video_ops imx219_video_ops = {
- .s_stream = imx219_set_stream,
+ .s_stream = v4l2_subdev_s_stream_helper,
};
static const struct v4l2_subdev_pad_ops imx219_pad_ops = {
@@ -1004,6 +996,8 @@ static const struct v4l2_subdev_pad_ops imx219_pad_ops = {
.set_fmt = imx219_set_pad_format,
.get_selection = imx219_get_selection,
.enum_frame_size = imx219_enum_frame_size,
+ .enable_streams = imx219_enable_streams,
+ .disable_streams = imx219_disable_streams,
};
static const struct v4l2_subdev_ops imx219_subdev_ops = {
@@ -1280,6 +1274,8 @@ static int imx219_probe(struct i2c_client *client)
}
pm_runtime_idle(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index beb9169f93ad..da618c8cbadc 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -1082,7 +1082,7 @@ static int imx283_start_streaming(struct imx283 *imx283,
cci_write(imx283->cci, IMX283_REG_SVR, 0x00, &ret);
dev_dbg(imx283->dev, "Mode: Size %d x %d\n", mode->width, mode->height);
- dev_dbg(imx283->dev, "Analogue Crop (in the mode) %d,%d %dx%d\n",
+ dev_dbg(imx283->dev, "Analogue Crop (in the mode) (%d,%d)/%ux%u\n",
mode->crop.left,
mode->crop.top,
mode->crop.width,
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index a544fc3df39c..846b9928d4e8 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -12,77 +12,125 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
/* Streaming Mode */
-#define IMX334_REG_MODE_SELECT 0x3000
-#define IMX334_MODE_STANDBY 0x01
-#define IMX334_MODE_STREAMING 0x00
+#define IMX334_REG_MODE_SELECT CCI_REG8(0x3000)
+#define IMX334_MODE_STANDBY 0x01
+#define IMX334_MODE_STREAMING 0x00
/* Lines per frame */
-#define IMX334_REG_LPFR 0x3030
+#define IMX334_REG_VMAX CCI_REG24_LE(0x3030)
+
+#define IMX334_REG_HMAX CCI_REG16_LE(0x3034)
+
+#define IMX334_REG_OPB_SIZE_V CCI_REG8(0x304c)
+#define IMX334_REG_ADBIT CCI_REG8(0x3050)
+#define IMX334_REG_MDBIT CCI_REG8(0x319d)
+#define IMX334_REG_ADBIT1 CCI_REG16_LE(0x341c)
+#define IMX334_REG_Y_OUT_SIZE CCI_REG16_LE(0x3308)
+#define IMX334_REG_XVS_XHS_OUTSEL CCI_REG8(0x31a0)
+#define IMX334_REG_XVS_XHS_DRV CCI_REG8(0x31a1)
/* Chip ID */
-#define IMX334_REG_ID 0x3044
-#define IMX334_ID 0x1e
+#define IMX334_REG_ID CCI_REG8(0x3044)
+#define IMX334_ID 0x1e
/* Exposure control */
-#define IMX334_REG_SHUTTER 0x3058
-#define IMX334_EXPOSURE_MIN 1
-#define IMX334_EXPOSURE_OFFSET 5
-#define IMX334_EXPOSURE_STEP 1
-#define IMX334_EXPOSURE_DEFAULT 0x0648
+#define IMX334_REG_SHUTTER CCI_REG24_LE(0x3058)
+#define IMX334_EXPOSURE_MIN 1
+#define IMX334_EXPOSURE_OFFSET 5
+#define IMX334_EXPOSURE_STEP 1
+#define IMX334_EXPOSURE_DEFAULT 0x0648
+
+#define IMX334_REG_LANEMODE CCI_REG8(0x3a01)
+#define IMX334_CSI_4_LANE_MODE 3
+#define IMX334_CSI_8_LANE_MODE 7
+
+/* Window cropping Settings */
+#define IMX334_REG_AREA3_ST_ADR_1 CCI_REG16_LE(0x3074)
+#define IMX334_REG_AREA3_ST_ADR_2 CCI_REG16_LE(0x308e)
+#define IMX334_REG_UNREAD_PARAM5 CCI_REG16_LE(0x30b6)
+#define IMX334_REG_AREA3_WIDTH_1 CCI_REG16_LE(0x3076)
+#define IMX334_REG_AREA3_WIDTH_2 CCI_REG16_LE(0x3090)
+#define IMX334_REG_BLACK_OFSET_ADR CCI_REG16_LE(0x30c6)
+#define IMX334_REG_UNRD_LINE_MAX CCI_REG16_LE(0x30ce)
+#define IMX334_REG_UNREAD_ED_ADR CCI_REG16_LE(0x30d8)
+#define IMX334_REG_UNREAD_PARAM6 CCI_REG16_LE(0x3116)
+
+#define IMX334_REG_VREVERSE CCI_REG8(0x304f)
+#define IMX334_REG_HREVERSE CCI_REG8(0x304e)
+
+/* Binning Settings */
+#define IMX334_REG_HADD_VADD CCI_REG8(0x3199)
+#define IMX334_REG_VALID_EXPAND CCI_REG8(0x31dd)
+#define IMX334_REG_TCYCLE CCI_REG8(0x3300)
/* Analog gain control */
-#define IMX334_REG_AGAIN 0x30e8
-#define IMX334_AGAIN_MIN 0
-#define IMX334_AGAIN_MAX 240
-#define IMX334_AGAIN_STEP 1
-#define IMX334_AGAIN_DEFAULT 0
+#define IMX334_REG_AGAIN CCI_REG16_LE(0x30e8)
+#define IMX334_AGAIN_MIN 0
+#define IMX334_AGAIN_MAX 240
+#define IMX334_AGAIN_STEP 1
+#define IMX334_AGAIN_DEFAULT 0
/* Group hold register */
-#define IMX334_REG_HOLD 0x3001
+#define IMX334_REG_HOLD CCI_REG8(0x3001)
+
+#define IMX334_REG_MASTER_MODE CCI_REG8(0x3002)
+#define IMX334_REG_WINMODE CCI_REG8(0x3018)
+#define IMX334_REG_HTRIMMING_START CCI_REG16_LE(0x302c)
+#define IMX334_REG_HNUM CCI_REG16_LE(0x302e)
/* Input clock rate */
-#define IMX334_INCLK_RATE 24000000
+#define IMX334_INCLK_RATE 24000000
+
+/* INCK Setting Register */
+#define IMX334_REG_BCWAIT_TIME CCI_REG8(0x300c)
+#define IMX334_REG_CPWAIT_TIME CCI_REG8(0x300d)
+#define IMX334_REG_INCKSEL1 CCI_REG16_LE(0x314c)
+#define IMX334_REG_INCKSEL2 CCI_REG8(0x315a)
+#define IMX334_REG_INCKSEL3 CCI_REG8(0x3168)
+#define IMX334_REG_INCKSEL4 CCI_REG8(0x316a)
+#define IMX334_REG_SYS_MODE CCI_REG8(0x319e)
+
+#define IMX334_REG_TCLKPOST CCI_REG16_LE(0x3a18)
+#define IMX334_REG_TCLKPREPARE CCI_REG16_LE(0x3a1a)
+#define IMX334_REG_TCLKTRAIL CCI_REG16_LE(0x3a1c)
+#define IMX334_REG_TCLKZERO CCI_REG16_LE(0x3a1e)
+#define IMX334_REG_THSPREPARE CCI_REG16_LE(0x3a20)
+#define IMX334_REG_THSZERO CCI_REG16_LE(0x3a22)
+#define IMX334_REG_THSTRAIL CCI_REG16_LE(0x3a24)
+#define IMX334_REG_THSEXIT CCI_REG16_LE(0x3a26)
+#define IMX334_REG_TPLX CCI_REG16_LE(0x3a28)
/* CSI2 HW configuration */
-#define IMX334_LINK_FREQ_891M 891000000
-#define IMX334_LINK_FREQ_445M 445500000
-#define IMX334_NUM_DATA_LANES 4
+#define IMX334_LINK_FREQ_891M 891000000
+#define IMX334_LINK_FREQ_445M 445500000
+#define IMX334_NUM_DATA_LANES 4
-#define IMX334_REG_MIN 0x00
-#define IMX334_REG_MAX 0xfffff
+#define IMX334_REG_MIN 0x00
+#define IMX334_REG_MAX 0xfffff
/* Test Pattern Control */
-#define IMX334_REG_TP 0x329e
-#define IMX334_TP_COLOR_HBARS 0xA
-#define IMX334_TP_COLOR_VBARS 0xB
+#define IMX334_REG_TP CCI_REG8(0x329e)
+#define IMX334_TP_COLOR_HBARS 0xa
+#define IMX334_TP_COLOR_VBARS 0xb
-#define IMX334_TPG_EN_DOUT 0x329c
-#define IMX334_TP_ENABLE 0x1
-#define IMX334_TP_DISABLE 0x0
+#define IMX334_TPG_EN_DOUT CCI_REG8(0x329c)
+#define IMX334_TP_ENABLE 0x1
+#define IMX334_TP_DISABLE 0x0
-#define IMX334_TPG_COLORW 0x32a0
-#define IMX334_TPG_COLORW_120P 0x13
+#define IMX334_TPG_COLORW CCI_REG8(0x32a0)
+#define IMX334_TPG_COLORW_120P 0x13
-#define IMX334_TP_CLK_EN 0x3148
-#define IMX334_TP_CLK_EN_VAL 0x10
-#define IMX334_TP_CLK_DIS_VAL 0x0
+#define IMX334_TP_CLK_EN CCI_REG8(0x3148)
+#define IMX334_TP_CLK_EN_VAL 0x10
+#define IMX334_TP_CLK_DIS_VAL 0x0
-#define IMX334_DIG_CLP_MODE 0x3280
-
-/**
- * struct imx334_reg - imx334 sensor register
- * @address: Register address
- * @val: Register value
- */
-struct imx334_reg {
- u16 address;
- u8 val;
-};
+#define IMX334_DIG_CLP_MODE CCI_REG8(0x3280)
/**
* struct imx334_reg_list - imx334 sensor register list
@@ -91,7 +139,7 @@ struct imx334_reg {
*/
struct imx334_reg_list {
u32 num_of_regs;
- const struct imx334_reg *regs;
+ const struct cci_reg_sequence *regs;
};
/**
@@ -121,6 +169,7 @@ struct imx334_mode {
/**
* struct imx334 - imx334 sensor device structure
* @dev: Pointer to generic device
+ * @cci: CCI register map
* @client: Pointer to i2c client
* @sd: V4L2 sub-device
* @pad: Media pad. Only one pad supported
@@ -135,12 +184,12 @@ struct imx334_mode {
* @again_ctrl: Pointer to analog gain control
* @vblank: Vertical blanking in lines
* @cur_mode: Pointer to current selected sensor mode
- * @mutex: Mutex for serializing sensor controls
* @link_freq_bitmap: Menu bitmap for link_freq_ctrl
* @cur_code: current selected format code
*/
struct imx334 {
struct device *dev;
+ struct regmap *cci;
struct i2c_client *client;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -157,7 +206,6 @@ struct imx334 {
};
u32 vblank;
const struct imx334_mode *cur_mode;
- struct mutex mutex;
unsigned long link_freq_bitmap;
u32 cur_code;
};
@@ -167,283 +215,183 @@ static const s64 link_freq[] = {
IMX334_LINK_FREQ_445M,
};
+/* Sensor common mode registers values */
+static const struct cci_reg_sequence common_mode_regs[] = {
+ { IMX334_REG_MODE_SELECT, IMX334_MODE_STANDBY },
+ { IMX334_REG_WINMODE, 0x04 },
+ { IMX334_REG_VMAX, 0x0008ca },
+ { IMX334_REG_HMAX, 0x044c },
+ { IMX334_REG_BLACK_OFSET_ADR, 0x0000 },
+ { IMX334_REG_UNRD_LINE_MAX, 0x0000 },
+ { IMX334_REG_OPB_SIZE_V, 0x00 },
+ { IMX334_REG_HREVERSE, 0x00 },
+ { IMX334_REG_VREVERSE, 0x00 },
+ { IMX334_REG_UNREAD_PARAM5, 0x0000 },
+ { IMX334_REG_UNREAD_PARAM6, 0x0008 },
+ { IMX334_REG_XVS_XHS_OUTSEL, 0x20 },
+ { IMX334_REG_XVS_XHS_DRV, 0x0f },
+ { IMX334_REG_BCWAIT_TIME, 0x3b },
+ { IMX334_REG_CPWAIT_TIME, 0x2a },
+ { IMX334_REG_INCKSEL1, 0x0129 },
+ { IMX334_REG_INCKSEL2, 0x06 },
+ { IMX334_REG_INCKSEL3, 0xa0 },
+ { IMX334_REG_INCKSEL4, 0x7e },
+ { IMX334_REG_SYS_MODE, 0x02 },
+ { IMX334_REG_HADD_VADD, 0x00 },
+ { IMX334_REG_VALID_EXPAND, 0x03 },
+ { IMX334_REG_TCYCLE, 0x00 },
+ { IMX334_REG_TCLKPOST, 0x007f },
+ { IMX334_REG_TCLKPREPARE, 0x0037 },
+ { IMX334_REG_TCLKTRAIL, 0x0037 },
+ { IMX334_REG_TCLKZERO, 0xf7 },
+ { IMX334_REG_THSPREPARE, 0x002f },
+ { CCI_REG8(0x3078), 0x02 },
+ { CCI_REG8(0x3079), 0x00 },
+ { CCI_REG8(0x307a), 0x00 },
+ { CCI_REG8(0x307b), 0x00 },
+ { CCI_REG8(0x3080), 0x02 },
+ { CCI_REG8(0x3081), 0x00 },
+ { CCI_REG8(0x3082), 0x00 },
+ { CCI_REG8(0x3083), 0x00 },
+ { CCI_REG8(0x3088), 0x02 },
+ { CCI_REG8(0x3094), 0x00 },
+ { CCI_REG8(0x3095), 0x00 },
+ { CCI_REG8(0x3096), 0x00 },
+ { CCI_REG8(0x309b), 0x02 },
+ { CCI_REG8(0x309c), 0x00 },
+ { CCI_REG8(0x309d), 0x00 },
+ { CCI_REG8(0x309e), 0x00 },
+ { CCI_REG8(0x30a4), 0x00 },
+ { CCI_REG8(0x30a5), 0x00 },
+ { CCI_REG8(0x3288), 0x21 },
+ { CCI_REG8(0x328a), 0x02 },
+ { CCI_REG8(0x3414), 0x05 },
+ { CCI_REG8(0x3416), 0x18 },
+ { CCI_REG8(0x35Ac), 0x0e },
+ { CCI_REG8(0x3648), 0x01 },
+ { CCI_REG8(0x364a), 0x04 },
+ { CCI_REG8(0x364c), 0x04 },
+ { CCI_REG8(0x3678), 0x01 },
+ { CCI_REG8(0x367c), 0x31 },
+ { CCI_REG8(0x367e), 0x31 },
+ { CCI_REG8(0x3708), 0x02 },
+ { CCI_REG8(0x3714), 0x01 },
+ { CCI_REG8(0x3715), 0x02 },
+ { CCI_REG8(0x3716), 0x02 },
+ { CCI_REG8(0x3717), 0x02 },
+ { CCI_REG8(0x371c), 0x3d },
+ { CCI_REG8(0x371d), 0x3f },
+ { CCI_REG8(0x372c), 0x00 },
+ { CCI_REG8(0x372d), 0x00 },
+ { CCI_REG8(0x372e), 0x46 },
+ { CCI_REG8(0x372f), 0x00 },
+ { CCI_REG8(0x3730), 0x89 },
+ { CCI_REG8(0x3731), 0x00 },
+ { CCI_REG8(0x3732), 0x08 },
+ { CCI_REG8(0x3733), 0x01 },
+ { CCI_REG8(0x3734), 0xfe },
+ { CCI_REG8(0x3735), 0x05 },
+ { CCI_REG8(0x375d), 0x00 },
+ { CCI_REG8(0x375e), 0x00 },
+ { CCI_REG8(0x375f), 0x61 },
+ { CCI_REG8(0x3760), 0x06 },
+ { CCI_REG8(0x3768), 0x1b },
+ { CCI_REG8(0x3769), 0x1b },
+ { CCI_REG8(0x376a), 0x1a },
+ { CCI_REG8(0x376b), 0x19 },
+ { CCI_REG8(0x376c), 0x18 },
+ { CCI_REG8(0x376d), 0x14 },
+ { CCI_REG8(0x376e), 0x0f },
+ { CCI_REG8(0x3776), 0x00 },
+ { CCI_REG8(0x3777), 0x00 },
+ { CCI_REG8(0x3778), 0x46 },
+ { CCI_REG8(0x3779), 0x00 },
+ { CCI_REG8(0x377a), 0x08 },
+ { CCI_REG8(0x377b), 0x01 },
+ { CCI_REG8(0x377c), 0x45 },
+ { CCI_REG8(0x377d), 0x01 },
+ { CCI_REG8(0x377e), 0x23 },
+ { CCI_REG8(0x377f), 0x02 },
+ { CCI_REG8(0x3780), 0xd9 },
+ { CCI_REG8(0x3781), 0x03 },
+ { CCI_REG8(0x3782), 0xf5 },
+ { CCI_REG8(0x3783), 0x06 },
+ { CCI_REG8(0x3784), 0xa5 },
+ { CCI_REG8(0x3788), 0x0f },
+ { CCI_REG8(0x378a), 0xd9 },
+ { CCI_REG8(0x378b), 0x03 },
+ { CCI_REG8(0x378c), 0xeb },
+ { CCI_REG8(0x378d), 0x05 },
+ { CCI_REG8(0x378e), 0x87 },
+ { CCI_REG8(0x378f), 0x06 },
+ { CCI_REG8(0x3790), 0xf5 },
+ { CCI_REG8(0x3792), 0x43 },
+ { CCI_REG8(0x3794), 0x7a },
+ { CCI_REG8(0x3796), 0xa1 },
+ { CCI_REG8(0x37b0), 0x37 },
+ { CCI_REG8(0x3e04), 0x0e },
+ { IMX334_REG_AGAIN, 0x0050 },
+ { IMX334_REG_MASTER_MODE, 0x00 },
+};
+
+/* Sensor mode registers for 640x480@30fps */
+static const struct cci_reg_sequence mode_640x480_regs[] = {
+ { IMX334_REG_HTRIMMING_START, 0x0670 },
+ { IMX334_REG_HNUM, 0x0280 },
+ { IMX334_REG_AREA3_ST_ADR_1, 0x0748 },
+ { IMX334_REG_AREA3_ST_ADR_2, 0x0749 },
+ { IMX334_REG_AREA3_WIDTH_1, 0x01e0 },
+ { IMX334_REG_AREA3_WIDTH_2, 0x01e0 },
+ { IMX334_REG_Y_OUT_SIZE, 0x01e0 },
+ { IMX334_REG_UNREAD_ED_ADR, 0x0b30 },
+};
+
+/* Sensor mode registers for 1280x720@30fps */
+static const struct cci_reg_sequence mode_1280x720_regs[] = {
+ { IMX334_REG_HTRIMMING_START, 0x0530 },
+ { IMX334_REG_HNUM, 0x0500 },
+ { IMX334_REG_AREA3_ST_ADR_1, 0x0384 },
+ { IMX334_REG_AREA3_ST_ADR_2, 0x0385 },
+ { IMX334_REG_AREA3_WIDTH_1, 0x02d0 },
+ { IMX334_REG_AREA3_WIDTH_2, 0x02d0 },
+ { IMX334_REG_Y_OUT_SIZE, 0x02d0 },
+ { IMX334_REG_UNREAD_ED_ADR, 0x0b30 },
+};
+
/* Sensor mode registers for 1920x1080@30fps */
-static const struct imx334_reg mode_1920x1080_regs[] = {
- {0x3000, 0x01},
- {0x3018, 0x04},
- {0x3030, 0xca},
- {0x3031, 0x08},
- {0x3032, 0x00},
- {0x3034, 0x4c},
- {0x3035, 0x04},
- {0x302c, 0xf0},
- {0x302d, 0x03},
- {0x302e, 0x80},
- {0x302f, 0x07},
- {0x3074, 0xcc},
- {0x3075, 0x02},
- {0x308e, 0xcd},
- {0x308f, 0x02},
- {0x3076, 0x38},
- {0x3077, 0x04},
- {0x3090, 0x38},
- {0x3091, 0x04},
- {0x3308, 0x38},
- {0x3309, 0x04},
- {0x30C6, 0x00},
- {0x30c7, 0x00},
- {0x30ce, 0x00},
- {0x30cf, 0x00},
- {0x30d8, 0x18},
- {0x30d9, 0x0a},
- {0x304c, 0x00},
- {0x304e, 0x00},
- {0x304f, 0x00},
- {0x3050, 0x00},
- {0x30b6, 0x00},
- {0x30b7, 0x00},
- {0x3116, 0x08},
- {0x3117, 0x00},
- {0x31a0, 0x20},
- {0x31a1, 0x0f},
- {0x300c, 0x3b},
- {0x300d, 0x29},
- {0x314c, 0x29},
- {0x314d, 0x01},
- {0x315a, 0x06},
- {0x3168, 0xa0},
- {0x316a, 0x7e},
- {0x319e, 0x02},
- {0x3199, 0x00},
- {0x319d, 0x00},
- {0x31dd, 0x03},
- {0x3300, 0x00},
- {0x341c, 0xff},
- {0x341d, 0x01},
- {0x3a01, 0x03},
- {0x3a18, 0x7f},
- {0x3a19, 0x00},
- {0x3a1a, 0x37},
- {0x3a1b, 0x00},
- {0x3a1c, 0x37},
- {0x3a1d, 0x00},
- {0x3a1e, 0xf7},
- {0x3a1f, 0x00},
- {0x3a20, 0x3f},
- {0x3a21, 0x00},
- {0x3a20, 0x6f},
- {0x3a21, 0x00},
- {0x3a20, 0x3f},
- {0x3a21, 0x00},
- {0x3a20, 0x5f},
- {0x3a21, 0x00},
- {0x3a20, 0x2f},
- {0x3a21, 0x00},
- {0x3078, 0x02},
- {0x3079, 0x00},
- {0x307a, 0x00},
- {0x307b, 0x00},
- {0x3080, 0x02},
- {0x3081, 0x00},
- {0x3082, 0x00},
- {0x3083, 0x00},
- {0x3088, 0x02},
- {0x3094, 0x00},
- {0x3095, 0x00},
- {0x3096, 0x00},
- {0x309b, 0x02},
- {0x309c, 0x00},
- {0x309d, 0x00},
- {0x309e, 0x00},
- {0x30a4, 0x00},
- {0x30a5, 0x00},
- {0x3288, 0x21},
- {0x328a, 0x02},
- {0x3414, 0x05},
- {0x3416, 0x18},
- {0x35Ac, 0x0e},
- {0x3648, 0x01},
- {0x364a, 0x04},
- {0x364c, 0x04},
- {0x3678, 0x01},
- {0x367c, 0x31},
- {0x367e, 0x31},
- {0x3708, 0x02},
- {0x3714, 0x01},
- {0x3715, 0x02},
- {0x3716, 0x02},
- {0x3717, 0x02},
- {0x371c, 0x3d},
- {0x371d, 0x3f},
- {0x372c, 0x00},
- {0x372d, 0x00},
- {0x372e, 0x46},
- {0x372f, 0x00},
- {0x3730, 0x89},
- {0x3731, 0x00},
- {0x3732, 0x08},
- {0x3733, 0x01},
- {0x3734, 0xfe},
- {0x3735, 0x05},
- {0x375d, 0x00},
- {0x375e, 0x00},
- {0x375f, 0x61},
- {0x3760, 0x06},
- {0x3768, 0x1b},
- {0x3769, 0x1b},
- {0x376a, 0x1a},
- {0x376b, 0x19},
- {0x376c, 0x18},
- {0x376d, 0x14},
- {0x376e, 0x0f},
- {0x3776, 0x00},
- {0x3777, 0x00},
- {0x3778, 0x46},
- {0x3779, 0x00},
- {0x377a, 0x08},
- {0x377b, 0x01},
- {0x377c, 0x45},
- {0x377d, 0x01},
- {0x377e, 0x23},
- {0x377f, 0x02},
- {0x3780, 0xd9},
- {0x3781, 0x03},
- {0x3782, 0xf5},
- {0x3783, 0x06},
- {0x3784, 0xa5},
- {0x3788, 0x0f},
- {0x378a, 0xd9},
- {0x378b, 0x03},
- {0x378c, 0xeb},
- {0x378d, 0x05},
- {0x378e, 0x87},
- {0x378f, 0x06},
- {0x3790, 0xf5},
- {0x3792, 0x43},
- {0x3794, 0x7a},
- {0x3796, 0xa1},
- {0x37b0, 0x37},
- {0x3e04, 0x0e},
- {0x30e8, 0x50},
- {0x30e9, 0x00},
- {0x3e04, 0x0e},
- {0x3002, 0x00},
+static const struct cci_reg_sequence mode_1920x1080_regs[] = {
+ { IMX334_REG_HTRIMMING_START, 0x03f0 },
+ { IMX334_REG_HNUM, 0x0780 },
+ { IMX334_REG_AREA3_ST_ADR_1, 0x02cc },
+ { IMX334_REG_AREA3_ST_ADR_2, 0x02cd },
+ { IMX334_REG_AREA3_WIDTH_1, 0x0438 },
+ { IMX334_REG_AREA3_WIDTH_2, 0x0438 },
+ { IMX334_REG_Y_OUT_SIZE, 0x0438 },
+ { IMX334_REG_UNREAD_ED_ADR, 0x0a18 },
};
/* Sensor mode registers for 3840x2160@30fps */
-static const struct imx334_reg mode_3840x2160_regs[] = {
- {0x3000, 0x01},
- {0x3002, 0x00},
- {0x3018, 0x04},
- {0x37b0, 0x36},
- {0x304c, 0x00},
- {0x300c, 0x3b},
- {0x300d, 0x2a},
- {0x3034, 0x26},
- {0x3035, 0x02},
- {0x314c, 0x29},
- {0x314d, 0x01},
- {0x315a, 0x02},
- {0x3168, 0xa0},
- {0x316a, 0x7e},
- {0x3288, 0x21},
- {0x328a, 0x02},
- {0x302c, 0x3c},
- {0x302d, 0x00},
- {0x302e, 0x00},
- {0x302f, 0x0f},
- {0x3076, 0x70},
- {0x3077, 0x08},
- {0x3090, 0x70},
- {0x3091, 0x08},
- {0x30d8, 0x20},
- {0x30d9, 0x12},
- {0x3308, 0x70},
- {0x3309, 0x08},
- {0x3414, 0x05},
- {0x3416, 0x18},
- {0x35ac, 0x0e},
- {0x3648, 0x01},
- {0x364a, 0x04},
- {0x364c, 0x04},
- {0x3678, 0x01},
- {0x367c, 0x31},
- {0x367e, 0x31},
- {0x3708, 0x02},
- {0x3714, 0x01},
- {0x3715, 0x02},
- {0x3716, 0x02},
- {0x3717, 0x02},
- {0x371c, 0x3d},
- {0x371d, 0x3f},
- {0x372c, 0x00},
- {0x372d, 0x00},
- {0x372e, 0x46},
- {0x372f, 0x00},
- {0x3730, 0x89},
- {0x3731, 0x00},
- {0x3732, 0x08},
- {0x3733, 0x01},
- {0x3734, 0xfe},
- {0x3735, 0x05},
- {0x375d, 0x00},
- {0x375e, 0x00},
- {0x375f, 0x61},
- {0x3760, 0x06},
- {0x3768, 0x1b},
- {0x3769, 0x1b},
- {0x376a, 0x1a},
- {0x376b, 0x19},
- {0x376c, 0x18},
- {0x376d, 0x14},
- {0x376e, 0x0f},
- {0x3776, 0x00},
- {0x3777, 0x00},
- {0x3778, 0x46},
- {0x3779, 0x00},
- {0x377a, 0x08},
- {0x377b, 0x01},
- {0x377c, 0x45},
- {0x377d, 0x01},
- {0x377e, 0x23},
- {0x377f, 0x02},
- {0x3780, 0xd9},
- {0x3781, 0x03},
- {0x3782, 0xf5},
- {0x3783, 0x06},
- {0x3784, 0xa5},
- {0x3788, 0x0f},
- {0x378a, 0xd9},
- {0x378b, 0x03},
- {0x378c, 0xeb},
- {0x378d, 0x05},
- {0x378e, 0x87},
- {0x378f, 0x06},
- {0x3790, 0xf5},
- {0x3792, 0x43},
- {0x3794, 0x7a},
- {0x3796, 0xa1},
- {0x3e04, 0x0e},
- {0x319e, 0x00},
- {0x3a00, 0x01},
- {0x3a18, 0xbf},
- {0x3a19, 0x00},
- {0x3a1a, 0x67},
- {0x3a1b, 0x00},
- {0x3a1c, 0x6f},
- {0x3a1d, 0x00},
- {0x3a1e, 0xd7},
- {0x3a1f, 0x01},
- {0x3a20, 0x6f},
- {0x3a21, 0x00},
- {0x3a22, 0xcf},
- {0x3a23, 0x00},
- {0x3a24, 0x6f},
- {0x3a25, 0x00},
- {0x3a26, 0xb7},
- {0x3a27, 0x00},
- {0x3a28, 0x5f},
- {0x3a29, 0x00},
+static const struct cci_reg_sequence mode_3840x2160_regs[] = {
+ { IMX334_REG_HMAX, 0x0226 },
+ { IMX334_REG_INCKSEL2, 0x02 },
+ { IMX334_REG_HTRIMMING_START, 0x003c },
+ { IMX334_REG_HNUM, 0x0f00 },
+ { IMX334_REG_AREA3_ST_ADR_1, 0x00b0 },
+ { IMX334_REG_AREA3_ST_ADR_2, 0x00b1 },
+ { IMX334_REG_UNREAD_ED_ADR, 0x1220 },
+ { IMX334_REG_AREA3_WIDTH_1, 0x0870 },
+ { IMX334_REG_AREA3_WIDTH_2, 0x0870 },
+ { IMX334_REG_Y_OUT_SIZE, 0x0870 },
+ { IMX334_REG_SYS_MODE, 0x0100 },
+ { IMX334_REG_TCLKPOST, 0x00bf },
+ { IMX334_REG_TCLKPREPARE, 0x0067 },
+ { IMX334_REG_TCLKTRAIL, 0x006f },
+ { IMX334_REG_TCLKZERO, 0x1d7 },
+ { IMX334_REG_THSPREPARE, 0x006f },
+ { IMX334_REG_THSZERO, 0x00cf },
+ { IMX334_REG_THSTRAIL, 0x006f },
+ { IMX334_REG_THSEXIT, 0x00b7 },
+ { IMX334_REG_TPLX, 0x005f },
};
static const char * const imx334_test_pattern_menu[] = {
@@ -458,18 +406,16 @@ static const int imx334_test_pattern_val[] = {
IMX334_TP_COLOR_VBARS,
};
-static const struct imx334_reg raw10_framefmt_regs[] = {
- {0x3050, 0x00},
- {0x319d, 0x00},
- {0x341c, 0xff},
- {0x341d, 0x01},
+static const struct cci_reg_sequence raw10_framefmt_regs[] = {
+ { IMX334_REG_ADBIT, 0x00 },
+ { IMX334_REG_MDBIT, 0x00 },
+ { IMX334_REG_ADBIT1, 0x01ff },
};
-static const struct imx334_reg raw12_framefmt_regs[] = {
- {0x3050, 0x01},
- {0x319d, 0x01},
- {0x341c, 0x47},
- {0x341d, 0x00},
+static const struct cci_reg_sequence raw12_framefmt_regs[] = {
+ { IMX334_REG_ADBIT, 0x01 },
+ { IMX334_REG_MDBIT, 0x01 },
+ { IMX334_REG_ADBIT1, 0x0047 },
};
static const u32 imx334_mbus_codes[] = {
@@ -505,6 +451,32 @@ static const struct imx334_mode supported_modes[] = {
.num_of_regs = ARRAY_SIZE(mode_1920x1080_regs),
.regs = mode_1920x1080_regs,
},
+ }, {
+ .width = 1280,
+ .height = 720,
+ .hblank = 2480,
+ .vblank = 1170,
+ .vblank_min = 45,
+ .vblank_max = 132840,
+ .pclk = 297000000,
+ .link_freq_idx = 1,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_regs),
+ .regs = mode_1280x720_regs,
+ },
+ }, {
+ .width = 640,
+ .height = 480,
+ .hblank = 2480,
+ .vblank = 1170,
+ .vblank_min = 45,
+ .vblank_max = 132840,
+ .pclk = 297000000,
+ .link_freq_idx = 1,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_640x480_regs),
+ .regs = mode_640x480_regs,
+ },
},
};
@@ -520,101 +492,6 @@ static inline struct imx334 *to_imx334(struct v4l2_subdev *subdev)
}
/**
- * imx334_read_reg() - Read registers.
- * @imx334: pointer to imx334 device
- * @reg: register address
- * @len: length of bytes to read. Max supported bytes is 4
- * @val: pointer to register value to be filled.
- *
- * Big endian register addresses with little endian values.
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx334_read_reg(struct imx334 *imx334, u16 reg, u32 len, u32 *val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx334->sd);
- struct i2c_msg msgs[2] = {0};
- u8 addr_buf[2] = {0};
- u8 data_buf[4] = {0};
- int ret;
-
- if (WARN_ON(len > 4))
- return -EINVAL;
-
- put_unaligned_be16(reg, addr_buf);
-
- /* Write register address */
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = ARRAY_SIZE(addr_buf);
- msgs[0].buf = addr_buf;
-
- /* Read data from register */
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = data_buf;
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs))
- return -EIO;
-
- *val = get_unaligned_le32(data_buf);
-
- return 0;
-}
-
-/**
- * imx334_write_reg() - Write register
- * @imx334: pointer to imx334 device
- * @reg: register address
- * @len: length of bytes. Max supported bytes is 4
- * @val: register value
- *
- * Big endian register addresses with little endian values.
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx334_write_reg(struct imx334 *imx334, u16 reg, u32 len, u32 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx334->sd);
- u8 buf[6] = {0};
-
- if (WARN_ON(len > 4))
- return -EINVAL;
-
- put_unaligned_be16(reg, buf);
- put_unaligned_le32(val, buf + 2);
- if (i2c_master_send(client, buf, len + 2) != len + 2)
- return -EIO;
-
- return 0;
-}
-
-/**
- * imx334_write_regs() - Write a list of registers
- * @imx334: pointer to imx334 device
- * @regs: list of registers to be written
- * @len: length of registers array
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx334_write_regs(struct imx334 *imx334,
- const struct imx334_reg *regs, u32 len)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < len; i++) {
- ret = imx334_write_reg(imx334, regs[i].address, 1, regs[i].val);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/**
* imx334_update_controls() - Update control ranges based on streaming mode
* @imx334: pointer to imx334 device
* @mode: pointer to imx334_mode sensor mode
@@ -659,30 +536,23 @@ static int imx334_update_controls(struct imx334 *imx334,
static int imx334_update_exp_gain(struct imx334 *imx334, u32 exposure, u32 gain)
{
u32 lpfr, shutter;
- int ret;
+ int ret_hold;
+ int ret = 0;
lpfr = imx334->vblank + imx334->cur_mode->height;
shutter = lpfr - exposure;
- dev_dbg(imx334->dev, "Set long exp %u analog gain %u sh0 %u lpfr %u",
+ dev_dbg(imx334->dev, "Set long exp %u analog gain %u sh0 %u lpfr %u\n",
exposure, gain, shutter, lpfr);
- ret = imx334_write_reg(imx334, IMX334_REG_HOLD, 1, 1);
- if (ret)
- return ret;
-
- ret = imx334_write_reg(imx334, IMX334_REG_LPFR, 3, lpfr);
- if (ret)
- goto error_release_group_hold;
-
- ret = imx334_write_reg(imx334, IMX334_REG_SHUTTER, 3, shutter);
- if (ret)
- goto error_release_group_hold;
-
- ret = imx334_write_reg(imx334, IMX334_REG_AGAIN, 1, gain);
+ cci_write(imx334->cci, IMX334_REG_HOLD, 1, &ret);
+ cci_write(imx334->cci, IMX334_REG_VMAX, lpfr, &ret);
+ cci_write(imx334->cci, IMX334_REG_SHUTTER, shutter, &ret);
+ cci_write(imx334->cci, IMX334_REG_AGAIN, gain, &ret);
-error_release_group_hold:
- imx334_write_reg(imx334, IMX334_REG_HOLD, 1, 0);
+ ret_hold = cci_write(imx334->cci, IMX334_REG_HOLD, 0, NULL);
+ if (ret_hold)
+ return ret_hold;
return ret;
}
@@ -707,11 +577,10 @@ static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
u32 exposure;
int ret;
- switch (ctrl->id) {
- case V4L2_CID_VBLANK:
+ if (ctrl->id == V4L2_CID_VBLANK) {
imx334->vblank = imx334->vblank_ctrl->val;
- dev_dbg(imx334->dev, "Received vblank %u, new lpfr %u",
+ dev_dbg(imx334->dev, "Received vblank %u, new lpfr %u\n",
imx334->vblank,
imx334->vblank + imx334->cur_mode->height);
@@ -721,23 +590,32 @@ static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
imx334->cur_mode->height -
IMX334_EXPOSURE_OFFSET,
1, IMX334_EXPOSURE_DEFAULT);
+ if (ret)
+ return ret;
+ }
+
+ /* Set controls only if sensor is in power on state */
+ if (!pm_runtime_get_if_in_use(imx334->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ exposure = imx334->exp_ctrl->val;
+ analog_gain = imx334->again_ctrl->val;
+
+ ret = imx334_update_exp_gain(imx334, exposure, analog_gain);
+
break;
case V4L2_CID_EXPOSURE:
- /* Set controls only if sensor is in power on state */
- if (!pm_runtime_get_if_in_use(imx334->dev))
- return 0;
-
exposure = ctrl->val;
analog_gain = imx334->again_ctrl->val;
- dev_dbg(imx334->dev, "Received exp %u analog gain %u",
+ dev_dbg(imx334->dev, "Received exp %u analog gain %u\n",
exposure, analog_gain);
ret = imx334_update_exp_gain(imx334, exposure, analog_gain);
- pm_runtime_put(imx334->dev);
-
break;
case V4L2_CID_PIXEL_RATE:
case V4L2_CID_LINK_FREQ:
@@ -746,29 +624,31 @@ static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_TEST_PATTERN:
if (ctrl->val) {
- imx334_write_reg(imx334, IMX334_TP_CLK_EN, 1,
- IMX334_TP_CLK_EN_VAL);
- imx334_write_reg(imx334, IMX334_DIG_CLP_MODE, 1, 0x0);
- imx334_write_reg(imx334, IMX334_TPG_COLORW, 1,
- IMX334_TPG_COLORW_120P);
- imx334_write_reg(imx334, IMX334_REG_TP, 1,
- imx334_test_pattern_val[ctrl->val]);
- imx334_write_reg(imx334, IMX334_TPG_EN_DOUT, 1,
- IMX334_TP_ENABLE);
+ cci_write(imx334->cci, IMX334_TP_CLK_EN,
+ IMX334_TP_CLK_EN_VAL, NULL);
+ cci_write(imx334->cci, IMX334_DIG_CLP_MODE, 0x0, NULL);
+ cci_write(imx334->cci, IMX334_TPG_COLORW,
+ IMX334_TPG_COLORW_120P, NULL);
+ cci_write(imx334->cci, IMX334_REG_TP,
+ imx334_test_pattern_val[ctrl->val], NULL);
+ cci_write(imx334->cci, IMX334_TPG_EN_DOUT,
+ IMX334_TP_ENABLE, NULL);
} else {
- imx334_write_reg(imx334, IMX334_DIG_CLP_MODE, 1, 0x1);
- imx334_write_reg(imx334, IMX334_TP_CLK_EN, 1,
- IMX334_TP_CLK_DIS_VAL);
- imx334_write_reg(imx334, IMX334_TPG_EN_DOUT, 1,
- IMX334_TP_DISABLE);
+ cci_write(imx334->cci, IMX334_DIG_CLP_MODE, 0x1, NULL);
+ cci_write(imx334->cci, IMX334_TP_CLK_EN,
+ IMX334_TP_CLK_DIS_VAL, NULL);
+ cci_write(imx334->cci, IMX334_TPG_EN_DOUT,
+ IMX334_TP_DISABLE, NULL);
}
ret = 0;
break;
default:
- dev_err(imx334->dev, "Invalid control %d", ctrl->id);
+ dev_err(imx334->dev, "Invalid control %d\n", ctrl->id);
ret = -EINVAL;
}
+ pm_runtime_put(imx334->dev);
+
return ret;
}
@@ -874,8 +754,6 @@ static int imx334_get_pad_format(struct v4l2_subdev *sd,
{
struct imx334 *imx334 = to_imx334(sd);
- mutex_lock(&imx334->mutex);
-
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
@@ -886,8 +764,6 @@ static int imx334_get_pad_format(struct v4l2_subdev *sd,
imx334_fill_pad_format(imx334, imx334->cur_mode, fmt);
}
- mutex_unlock(&imx334->mutex);
-
return 0;
}
@@ -907,8 +783,6 @@ static int imx334_set_pad_format(struct v4l2_subdev *sd,
const struct imx334_mode *mode;
int ret = 0;
- mutex_lock(&imx334->mutex);
-
mode = v4l2_find_nearest_size(supported_modes,
ARRAY_SIZE(supported_modes),
width, height,
@@ -929,8 +803,6 @@ static int imx334_set_pad_format(struct v4l2_subdev *sd,
imx334->cur_mode = mode;
}
- mutex_unlock(&imx334->mutex);
-
return ret;
}
@@ -949,8 +821,6 @@ static int imx334_init_state(struct v4l2_subdev *sd,
fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- mutex_lock(&imx334->mutex);
-
imx334_fill_pad_format(imx334, imx334->cur_mode, &fmt);
__v4l2_ctrl_modify_range(imx334->link_freq_ctrl, 0,
@@ -958,8 +828,6 @@ static int imx334_init_state(struct v4l2_subdev *sd,
~(imx334->link_freq_bitmap),
__ffs(imx334->link_freq_bitmap));
- mutex_unlock(&imx334->mutex);
-
return imx334_set_pad_format(sd, sd_state, &fmt);
}
@@ -967,109 +835,113 @@ static int imx334_set_framefmt(struct imx334 *imx334)
{
switch (imx334->cur_code) {
case MEDIA_BUS_FMT_SRGGB10_1X10:
- return imx334_write_regs(imx334, raw10_framefmt_regs,
- ARRAY_SIZE(raw10_framefmt_regs));
+ return cci_multi_reg_write(imx334->cci, raw10_framefmt_regs,
+ ARRAY_SIZE(raw10_framefmt_regs), NULL);
+
case MEDIA_BUS_FMT_SRGGB12_1X12:
- return imx334_write_regs(imx334, raw12_framefmt_regs,
- ARRAY_SIZE(raw12_framefmt_regs));
+ return cci_multi_reg_write(imx334->cci, raw12_framefmt_regs,
+ ARRAY_SIZE(raw12_framefmt_regs), NULL);
}
return -EINVAL;
}
/**
- * imx334_start_streaming() - Start sensor stream
- * @imx334: pointer to imx334 device
+ * imx334_enable_streams() - Enable specified streams for the sensor
+ * @sd: pointer to the V4L2 subdevice
+ * @state: pointer to the subdevice state
+ * @pad: pad number for which streams are enabled
+ * @streams_mask: bitmask specifying the streams to enable
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx334_start_streaming(struct imx334 *imx334)
+static int imx334_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
+ struct imx334 *imx334 = to_imx334(sd);
const struct imx334_reg_list *reg_list;
int ret;
+ ret = pm_runtime_resume_and_get(imx334->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = cci_multi_reg_write(imx334->cci, common_mode_regs,
+ ARRAY_SIZE(common_mode_regs), NULL);
+ if (ret) {
+ dev_err(imx334->dev, "fail to write common registers\n");
+ goto err_rpm_put;
+ }
+
/* Write sensor mode registers */
reg_list = &imx334->cur_mode->reg_list;
- ret = imx334_write_regs(imx334, reg_list->regs,
- reg_list->num_of_regs);
+ ret = cci_multi_reg_write(imx334->cci, reg_list->regs,
+ reg_list->num_of_regs, NULL);
if (ret) {
- dev_err(imx334->dev, "fail to write initial registers");
- return ret;
+ dev_err(imx334->dev, "fail to write initial registers\n");
+ goto err_rpm_put;
+ }
+
+ ret = cci_write(imx334->cci, IMX334_REG_LANEMODE,
+ IMX334_CSI_4_LANE_MODE, NULL);
+ if (ret) {
+ dev_err(imx334->dev, "failed to configure lanes\n");
+ goto err_rpm_put;
}
ret = imx334_set_framefmt(imx334);
if (ret) {
dev_err(imx334->dev, "%s failed to set frame format: %d\n",
__func__, ret);
- return ret;
+ goto err_rpm_put;
}
/* Setup handler will write actual exposure and gain */
ret = __v4l2_ctrl_handler_setup(imx334->sd.ctrl_handler);
if (ret) {
- dev_err(imx334->dev, "fail to setup handler");
- return ret;
+ dev_err(imx334->dev, "fail to setup handler\n");
+ goto err_rpm_put;
}
/* Start streaming */
- ret = imx334_write_reg(imx334, IMX334_REG_MODE_SELECT,
- 1, IMX334_MODE_STREAMING);
+ ret = cci_write(imx334->cci, IMX334_REG_MODE_SELECT,
+ IMX334_MODE_STREAMING, NULL);
if (ret) {
- dev_err(imx334->dev, "fail to start streaming");
- return ret;
+ dev_err(imx334->dev, "fail to start streaming\n");
+ goto err_rpm_put;
}
return 0;
-}
-/**
- * imx334_stop_streaming() - Stop sensor stream
- * @imx334: pointer to imx334 device
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx334_stop_streaming(struct imx334 *imx334)
-{
- return imx334_write_reg(imx334, IMX334_REG_MODE_SELECT,
- 1, IMX334_MODE_STANDBY);
+err_rpm_put:
+ pm_runtime_put(imx334->dev);
+ return ret;
}
/**
- * imx334_set_stream() - Enable sensor streaming
- * @sd: pointer to imx334 subdevice
- * @enable: set to enable sensor streaming
+ * imx334_disable_streams() - Enable specified streams for the sensor
+ * @sd: pointer to the V4L2 subdevice
+ * @state: pointer to the subdevice state
+ * @pad: pad number for which streams are disabled
+ * @streams_mask: bitmask specifying the streams to disable
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
+static int imx334_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
struct imx334 *imx334 = to_imx334(sd);
int ret;
- mutex_lock(&imx334->mutex);
-
- if (enable) {
- ret = pm_runtime_resume_and_get(imx334->dev);
- if (ret < 0)
- goto error_unlock;
-
- ret = imx334_start_streaming(imx334);
- if (ret)
- goto error_power_off;
- } else {
- imx334_stop_streaming(imx334);
- pm_runtime_put(imx334->dev);
- }
-
- mutex_unlock(&imx334->mutex);
-
- return 0;
+ ret = cci_write(imx334->cci, IMX334_REG_MODE_SELECT,
+ IMX334_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(imx334->dev, "%s failed to stop stream\n", __func__);
-error_power_off:
pm_runtime_put(imx334->dev);
-error_unlock:
- mutex_unlock(&imx334->mutex);
return ret;
}
@@ -1083,14 +955,14 @@ error_unlock:
static int imx334_detect(struct imx334 *imx334)
{
int ret;
- u32 val;
+ u64 val;
- ret = imx334_read_reg(imx334, IMX334_REG_ID, 2, &val);
+ ret = cci_read(imx334->cci, IMX334_REG_ID, &val, NULL);
if (ret)
return ret;
if (val != IMX334_ID) {
- dev_err(imx334->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx334->dev, "chip id mismatch: %x!=%llx\n",
IMX334_ID, val);
return -ENXIO;
}
@@ -1120,24 +992,20 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
/* Request optional reset pin */
imx334->reset_gpio = devm_gpiod_get_optional(imx334->dev, "reset",
GPIOD_OUT_LOW);
- if (IS_ERR(imx334->reset_gpio)) {
- dev_err(imx334->dev, "failed to get reset gpio %ld",
- PTR_ERR(imx334->reset_gpio));
- return PTR_ERR(imx334->reset_gpio);
- }
+ if (IS_ERR(imx334->reset_gpio))
+ return dev_err_probe(imx334->dev, PTR_ERR(imx334->reset_gpio),
+ "failed to get reset gpio\n");
/* Get sensor input clock */
imx334->inclk = devm_clk_get(imx334->dev, NULL);
- if (IS_ERR(imx334->inclk)) {
- dev_err(imx334->dev, "could not get inclk");
- return PTR_ERR(imx334->inclk);
- }
+ if (IS_ERR(imx334->inclk))
+ return dev_err_probe(imx334->dev, PTR_ERR(imx334->inclk),
+ "could not get inclk\n");
rate = clk_get_rate(imx334->inclk);
- if (rate != IMX334_INCLK_RATE) {
- dev_err(imx334->dev, "inclk frequency mismatch");
- return -EINVAL;
- }
+ if (rate != IMX334_INCLK_RATE)
+ return dev_err_probe(imx334->dev, -EINVAL,
+ "inclk frequency mismatch\n");
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
@@ -1150,7 +1018,7 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX334_NUM_DATA_LANES) {
dev_err(imx334->dev,
- "number of CSI2 data lanes %d is not supported",
+ "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto done_endpoint_free;
@@ -1169,7 +1037,7 @@ done_endpoint_free:
/* V4l2 subdevice ops */
static const struct v4l2_subdev_video_ops imx334_video_ops = {
- .s_stream = imx334_set_stream,
+ .s_stream = v4l2_subdev_s_stream_helper,
};
static const struct v4l2_subdev_pad_ops imx334_pad_ops = {
@@ -1177,6 +1045,8 @@ static const struct v4l2_subdev_pad_ops imx334_pad_ops = {
.enum_frame_size = imx334_enum_frame_size,
.get_fmt = imx334_get_pad_format,
.set_fmt = imx334_set_pad_format,
+ .enable_streams = imx334_enable_streams,
+ .disable_streams = imx334_disable_streams,
};
static const struct v4l2_subdev_ops imx334_subdev_ops = {
@@ -1204,7 +1074,7 @@ static int imx334_power_on(struct device *dev)
ret = clk_prepare_enable(imx334->inclk);
if (ret) {
- dev_err(imx334->dev, "fail to enable inclk");
+ dev_err(imx334->dev, "fail to enable inclk\n");
goto error_reset;
}
@@ -1253,9 +1123,6 @@ static int imx334_init_controls(struct imx334 *imx334)
if (ret)
return ret;
- /* Serialize controls with sensor device */
- ctrl_hdlr->lock = &imx334->mutex;
-
/* Initialize exposure and gain */
lpfr = mode->vblank + mode->height;
imx334->exp_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
@@ -1342,29 +1209,31 @@ static int imx334_probe(struct i2c_client *client)
return -ENOMEM;
imx334->dev = &client->dev;
+ imx334->cci = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(imx334->cci)) {
+ dev_err(imx334->dev, "Unable to initialize I2C\n");
+ return -ENODEV;
+ }
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx334->sd, client, &imx334_subdev_ops);
imx334->sd.internal_ops = &imx334_internal_ops;
ret = imx334_parse_hw_config(imx334);
- if (ret) {
- dev_err(imx334->dev, "HW configuration is not supported");
- return ret;
- }
-
- mutex_init(&imx334->mutex);
+ if (ret)
+ return dev_err_probe(imx334->dev, ret,
+ "HW configuration is not supported\n");
ret = imx334_power_on(imx334->dev);
if (ret) {
- dev_err(imx334->dev, "failed to power-on the sensor");
- goto error_mutex_destroy;
+ dev_err_probe(imx334->dev, ret, "failed to power-on the sensor\n");
+ return ret;
}
/* Check module identity */
ret = imx334_detect(imx334);
if (ret) {
- dev_err(imx334->dev, "failed to find sensor: %d", ret);
+ dev_err(imx334->dev, "failed to find sensor: %d\n", ret);
goto error_power_off;
}
@@ -1375,7 +1244,7 @@ static int imx334_probe(struct i2c_client *client)
ret = imx334_init_controls(imx334);
if (ret) {
- dev_err(imx334->dev, "failed to init controls: %d", ret);
+ dev_err(imx334->dev, "failed to init controls: %d\n", ret);
goto error_power_off;
}
@@ -1387,31 +1256,44 @@ static int imx334_probe(struct i2c_client *client)
imx334->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx334->sd.entity, 1, &imx334->pad);
if (ret) {
- dev_err(imx334->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx334->dev, "failed to init entity pads: %d\n", ret);
goto error_handler_free;
}
- ret = v4l2_async_register_subdev_sensor(&imx334->sd);
+ imx334->sd.state_lock = imx334->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&imx334->sd);
if (ret < 0) {
- dev_err(imx334->dev,
- "failed to register async subdev: %d", ret);
+ dev_err(imx334->dev, "subdev init error: %d\n", ret);
goto error_media_entity;
}
pm_runtime_set_active(imx334->dev);
pm_runtime_enable(imx334->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&imx334->sd);
+ if (ret < 0) {
+ dev_err(imx334->dev,
+ "failed to register async subdev: %d\n", ret);
+ goto error_subdev_cleanup;
+ }
+
pm_runtime_idle(imx334->dev);
return 0;
+error_subdev_cleanup:
+ v4l2_subdev_cleanup(&imx334->sd);
+ pm_runtime_disable(imx334->dev);
+ pm_runtime_set_suspended(imx334->dev);
+
error_media_entity:
media_entity_cleanup(&imx334->sd.entity);
+
error_handler_free:
v4l2_ctrl_handler_free(imx334->sd.ctrl_handler);
+
error_power_off:
imx334_power_off(imx334->dev);
-error_mutex_destroy:
- mutex_destroy(&imx334->mutex);
return ret;
}
@@ -1425,16 +1307,17 @@ error_mutex_destroy:
static void imx334_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx334 *imx334 = to_imx334(sd);
v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
- pm_runtime_suspended(&client->dev);
-
- mutex_destroy(&imx334->mutex);
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ imx334_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
}
static const struct dev_pm_ops imx334_pm_ops = {
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 0beb80b8c458..9b4db4cd4929 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -31,7 +31,7 @@
#define IMX335_REG_CPWAIT_TIME CCI_REG8(0x300d)
#define IMX335_REG_WINMODE CCI_REG8(0x3018)
#define IMX335_REG_HTRIMMING_START CCI_REG16_LE(0x302c)
-#define IMX335_REG_HNUM CCI_REG8(0x302e)
+#define IMX335_REG_HNUM CCI_REG16_LE(0x302e)
/* Lines per frame */
#define IMX335_REG_VMAX CCI_REG24_LE(0x3030)
@@ -660,7 +660,8 @@ static int imx335_enum_frame_size(struct v4l2_subdev *sd,
struct imx335 *imx335 = to_imx335(sd);
u32 code;
- if (fsize->index > ARRAY_SIZE(imx335_mbus_codes))
+ /* Only a single supported_mode available. */
+ if (fsize->index > 0)
return -EINVAL;
code = imx335_get_format_code(imx335, fsize->code);
diff --git a/drivers/media/i2c/lt6911uxe.c b/drivers/media/i2c/lt6911uxe.c
index c5b40bb58a37..24857d683fcf 100644
--- a/drivers/media/i2c/lt6911uxe.c
+++ b/drivers/media/i2c/lt6911uxe.c
@@ -605,10 +605,10 @@ static int lt6911uxe_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(lt6911uxe->reset_gpio),
"failed to get reset gpio\n");
- lt6911uxe->irq_gpio = devm_gpiod_get(dev, "readystat", GPIOD_IN);
+ lt6911uxe->irq_gpio = devm_gpiod_get(dev, "hpd", GPIOD_IN);
if (IS_ERR(lt6911uxe->irq_gpio))
return dev_err_probe(dev, PTR_ERR(lt6911uxe->irq_gpio),
- "failed to get ready_stat gpio\n");
+ "failed to get hpd gpio\n");
ret = lt6911uxe_fwnode_parse(lt6911uxe, dev);
if (ret)
diff --git a/drivers/media/i2c/max96714.c b/drivers/media/i2c/max96714.c
index 159753b13777..3cc1b1ae47d1 100644
--- a/drivers/media/i2c/max96714.c
+++ b/drivers/media/i2c/max96714.c
@@ -7,11 +7,11 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
-#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c
index 9259d58ba734..3746729366ac 100644
--- a/drivers/media/i2c/max96717.c
+++ b/drivers/media/i2c/max96717.c
@@ -9,10 +9,10 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/driver.h>
#include <linux/i2c-mux.h>
#include <linux/i2c.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <media/v4l2-cci.h>
diff --git a/drivers/media/i2c/ov02c10.c b/drivers/media/i2c/ov02c10.c
new file mode 100644
index 000000000000..089a4fd9627c
--- /dev/null
+++ b/drivers/media/i2c/ov02c10.c
@@ -0,0 +1,1013 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/version.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV02C10_LINK_FREQ_400MHZ 400000000ULL
+#define OV02C10_MCLK 19200000
+#define OV02C10_RGB_DEPTH 10
+
+#define OV02C10_REG_CHIP_ID CCI_REG16(0x300a)
+#define OV02C10_CHIP_ID 0x5602
+
+#define OV02C10_REG_STREAM_CONTROL CCI_REG8(0x0100)
+
+#define OV02C10_REG_HTS CCI_REG16(0x380c)
+
+/* vertical-timings from sensor */
+#define OV02C10_REG_VTS CCI_REG16(0x380e)
+#define OV02C10_VTS_MAX 0xffff
+
+/* Exposure controls from sensor */
+#define OV02C10_REG_EXPOSURE CCI_REG16(0x3501)
+#define OV02C10_EXPOSURE_MIN 4
+#define OV02C10_EXPOSURE_MAX_MARGIN 8
+#define OV02C10_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV02C10_REG_ANALOG_GAIN CCI_REG16(0x3508)
+#define OV02C10_ANAL_GAIN_MIN 0x10
+#define OV02C10_ANAL_GAIN_MAX 0xf8
+#define OV02C10_ANAL_GAIN_STEP 1
+#define OV02C10_ANAL_GAIN_DEFAULT 0x10
+
+/* Digital gain controls from sensor */
+#define OV02C10_REG_DIGITAL_GAIN CCI_REG24(0x350a)
+#define OV02C10_DGTL_GAIN_MIN 0x0400
+#define OV02C10_DGTL_GAIN_MAX 0x3fff
+#define OV02C10_DGTL_GAIN_STEP 1
+#define OV02C10_DGTL_GAIN_DEFAULT 0x0400
+
+/* Rotate */
+#define OV02C10_ROTATE_CONTROL CCI_REG8(0x3820)
+#define OV02C10_ISP_X_WIN_CONTROL CCI_REG16(0x3810)
+#define OV02C10_ISP_Y_WIN_CONTROL CCI_REG16(0x3812)
+#define OV02C10_CONFIG_ROTATE 0x18
+
+/* Test Pattern Control */
+#define OV02C10_REG_TEST_PATTERN CCI_REG8(0x4503)
+#define OV02C10_TEST_PATTERN_ENABLE BIT(7)
+
+struct ov02c10_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Sensor register settings for this resolution */
+ const struct reg_sequence *reg_sequence;
+ const int sequence_length;
+ /* Sensor register settings for 1 or 2 lane config */
+ const struct reg_sequence *lane_settings[2];
+ const int lane_settings_length[2];
+};
+
+static const struct reg_sequence sensor_1928x1092_30fps_setting[] = {
+ {0x0301, 0x08},
+ {0x0303, 0x06},
+ {0x0304, 0x01},
+ {0x0305, 0xe0},
+ {0x0313, 0x40},
+ {0x031c, 0x4f},
+ {0x3020, 0x97},
+ {0x3022, 0x01},
+ {0x3026, 0xb4},
+ {0x303b, 0x00},
+ {0x303c, 0x4f},
+ {0x303d, 0xe6},
+ {0x303e, 0x00},
+ {0x303f, 0x03},
+ {0x3021, 0x23},
+ {0x3501, 0x04},
+ {0x3502, 0x6c},
+ {0x3504, 0x0c},
+ {0x3507, 0x00},
+ {0x3508, 0x08},
+ {0x3509, 0x00},
+ {0x350a, 0x01},
+ {0x350b, 0x00},
+ {0x350c, 0x41},
+ {0x3600, 0x84},
+ {0x3603, 0x08},
+ {0x3610, 0x57},
+ {0x3611, 0x1b},
+ {0x3613, 0x78},
+ {0x3623, 0x00},
+ {0x3632, 0xa0},
+ {0x3642, 0xe8},
+ {0x364c, 0x70},
+ {0x365f, 0x0f},
+ {0x3708, 0x30},
+ {0x3714, 0x24},
+ {0x3725, 0x02},
+ {0x3737, 0x08},
+ {0x3739, 0x28},
+ {0x3749, 0x32},
+ {0x374a, 0x32},
+ {0x374b, 0x32},
+ {0x374c, 0x32},
+ {0x374d, 0x81},
+ {0x374e, 0x81},
+ {0x374f, 0x81},
+ {0x3752, 0x36},
+ {0x3753, 0x36},
+ {0x3754, 0x36},
+ {0x3761, 0x00},
+ {0x376c, 0x81},
+ {0x3774, 0x18},
+ {0x3776, 0x08},
+ {0x377c, 0x81},
+ {0x377d, 0x81},
+ {0x377e, 0x81},
+ {0x37a0, 0x44},
+ {0x37a6, 0x44},
+ {0x37aa, 0x0d},
+ {0x37ae, 0x00},
+ {0x37cb, 0x03},
+ {0x37cc, 0x01},
+ {0x37d8, 0x02},
+ {0x37d9, 0x10},
+ {0x37e1, 0x10},
+ {0x37e2, 0x18},
+ {0x37e3, 0x08},
+ {0x37e4, 0x08},
+ {0x37e5, 0x02},
+ {0x37e6, 0x08},
+
+ /* 1928x1092 */
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x07},
+ {0x3805, 0x8f},
+ {0x3806, 0x04},
+ {0x3807, 0x47},
+ {0x3808, 0x07},
+ {0x3809, 0x88},
+ {0x380a, 0x04},
+ {0x380b, 0x44},
+ {0x3810, 0x00},
+ {0x3811, 0x02},
+ {0x3812, 0x00},
+ {0x3813, 0x02},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+
+ {0x3820, 0xb0},
+ {0x3821, 0x00},
+ {0x3822, 0x80},
+ {0x3823, 0x08},
+ {0x3824, 0x00},
+ {0x3825, 0x20},
+ {0x3826, 0x00},
+ {0x3827, 0x08},
+ {0x382a, 0x00},
+ {0x382b, 0x08},
+ {0x382d, 0x00},
+ {0x382e, 0x00},
+ {0x382f, 0x23},
+ {0x3834, 0x00},
+ {0x3839, 0x00},
+ {0x383a, 0xd1},
+ {0x383e, 0x03},
+ {0x393d, 0x29},
+ {0x393f, 0x6e},
+ {0x394b, 0x06},
+ {0x394c, 0x06},
+ {0x394d, 0x08},
+ {0x394f, 0x01},
+ {0x3950, 0x01},
+ {0x3951, 0x01},
+ {0x3952, 0x01},
+ {0x3953, 0x01},
+ {0x3954, 0x01},
+ {0x3955, 0x01},
+ {0x3956, 0x01},
+ {0x3957, 0x0e},
+ {0x3958, 0x08},
+ {0x3959, 0x08},
+ {0x395a, 0x08},
+ {0x395b, 0x13},
+ {0x395c, 0x09},
+ {0x395d, 0x05},
+ {0x395e, 0x02},
+ {0x395f, 0x00},
+ {0x395f, 0x00},
+ {0x3960, 0x00},
+ {0x3961, 0x00},
+ {0x3962, 0x00},
+ {0x3963, 0x00},
+ {0x3964, 0x00},
+ {0x3965, 0x00},
+ {0x3966, 0x00},
+ {0x3967, 0x00},
+ {0x3968, 0x01},
+ {0x3969, 0x01},
+ {0x396a, 0x01},
+ {0x396b, 0x01},
+ {0x396c, 0x10},
+ {0x396d, 0xf0},
+ {0x396e, 0x11},
+ {0x396f, 0x00},
+ {0x3970, 0x37},
+ {0x3971, 0x37},
+ {0x3972, 0x37},
+ {0x3973, 0x37},
+ {0x3974, 0x00},
+ {0x3975, 0x3c},
+ {0x3976, 0x3c},
+ {0x3977, 0x3c},
+ {0x3978, 0x3c},
+ {0x3c00, 0x0f},
+ {0x3c20, 0x01},
+ {0x3c21, 0x08},
+ {0x3f00, 0x8b},
+ {0x3f02, 0x0f},
+ {0x4000, 0xc3},
+ {0x4001, 0xe0},
+ {0x4002, 0x00},
+ {0x4003, 0x40},
+ {0x4008, 0x04},
+ {0x4009, 0x23},
+ {0x400a, 0x04},
+ {0x400b, 0x01},
+ {0x4077, 0x06},
+ {0x4078, 0x00},
+ {0x4079, 0x1a},
+ {0x407a, 0x7f},
+ {0x407b, 0x01},
+ {0x4080, 0x03},
+ {0x4081, 0x84},
+ {0x4308, 0x03},
+ {0x4309, 0xff},
+ {0x430d, 0x00},
+ {0x4806, 0x00},
+ {0x4813, 0x00},
+ {0x4837, 0x10},
+ {0x4857, 0x05},
+ {0x4500, 0x07},
+ {0x4501, 0x00},
+ {0x4503, 0x00},
+ {0x450a, 0x04},
+ {0x450e, 0x00},
+ {0x450f, 0x00},
+ {0x4900, 0x00},
+ {0x4901, 0x00},
+ {0x4902, 0x01},
+ {0x5001, 0x50},
+ {0x5006, 0x00},
+ {0x5080, 0x40},
+ {0x5181, 0x2b},
+ {0x5202, 0xa3},
+ {0x5206, 0x01},
+ {0x5207, 0x00},
+ {0x520a, 0x01},
+ {0x520b, 0x00},
+ {0x365d, 0x00},
+ {0x4815, 0x40},
+ {0x4816, 0x12},
+ {0x4f00, 0x01},
+};
+
+static const struct reg_sequence sensor_1928x1092_30fps_1lane_setting[] = {
+ {0x301b, 0xd2},
+ {0x3027, 0xe1},
+ {0x380c, 0x08},
+ {0x380d, 0xe8},
+ {0x380e, 0x04},
+ {0x380f, 0x8c},
+ {0x394e, 0x0b},
+ {0x4800, 0x24},
+ {0x5000, 0xf5},
+ /* plls */
+ {0x0303, 0x05},
+ {0x0305, 0x90},
+ {0x0316, 0x90},
+ {0x3016, 0x12},
+};
+
+static const struct reg_sequence sensor_1928x1092_30fps_2lane_setting[] = {
+ {0x301b, 0xf0},
+ {0x3027, 0xf1},
+ {0x380c, 0x04},
+ {0x380d, 0x74},
+ {0x380e, 0x09},
+ {0x380f, 0x18},
+ {0x394e, 0x0a},
+ {0x4041, 0x20},
+ {0x4884, 0x04},
+ {0x4800, 0x64},
+ {0x4d00, 0x03},
+ {0x4d01, 0xd8},
+ {0x4d02, 0xba},
+ {0x4d03, 0xa0},
+ {0x4d04, 0xb7},
+ {0x4d05, 0x34},
+ {0x4d0d, 0x00},
+ {0x5000, 0xfd},
+ {0x481f, 0x30},
+ /* plls */
+ {0x0303, 0x05},
+ {0x0305, 0x90},
+ {0x0316, 0x90},
+ {0x3016, 0x32},
+};
+
+static const char * const ov02c10_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Color Bar type 4",
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV02C10_LINK_FREQ_400MHZ,
+};
+
+static const struct ov02c10_mode supported_modes[] = {
+ {
+ .width = 1928,
+ .height = 1092,
+ .hts = 2280,
+ .vts_min = 1164,
+ .reg_sequence = sensor_1928x1092_30fps_setting,
+ .sequence_length = ARRAY_SIZE(sensor_1928x1092_30fps_setting),
+ .lane_settings = {
+ sensor_1928x1092_30fps_1lane_setting,
+ sensor_1928x1092_30fps_2lane_setting
+ },
+ .lane_settings_length = {
+ ARRAY_SIZE(sensor_1928x1092_30fps_1lane_setting),
+ ARRAY_SIZE(sensor_1928x1092_30fps_2lane_setting),
+ },
+ },
+};
+
+static const char * const ov02c10_supply_names[] = {
+ "dovdd", /* Digital I/O power */
+ "avdd", /* Analog power */
+ "dvdd", /* Digital core power */
+};
+
+struct ov02c10 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct regmap *regmap;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ struct clk *img_clk;
+ struct gpio_desc *reset;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov02c10_supply_names)];
+
+ /* MIPI lane info */
+ u32 link_freq_index;
+ u8 mipi_lanes;
+};
+
+static inline struct ov02c10 *to_ov02c10(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ov02c10, sd);
+}
+
+static int ov02c10_test_pattern(struct ov02c10 *ov02c10, int pattern)
+{
+ int ret = 0;
+
+ if (!pattern)
+ return cci_update_bits(ov02c10->regmap, OV02C10_REG_TEST_PATTERN,
+ BIT(7), 0, NULL);
+
+ cci_update_bits(ov02c10->regmap, OV02C10_REG_TEST_PATTERN,
+ 0x03, pattern - 1, &ret);
+ cci_update_bits(ov02c10->regmap, OV02C10_REG_TEST_PATTERN,
+ BIT(7), OV02C10_TEST_PATTERN_ENABLE, &ret);
+ return ret;
+}
+
+static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov02c10 *ov02c10 = container_of(ctrl->handler,
+ struct ov02c10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
+ const u32 height = supported_modes[0].height;
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = height + ctrl->val - OV02C10_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(ov02c10->exposure,
+ ov02c10->exposure->minimum,
+ exposure_max, ov02c10->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ cci_write(ov02c10->regmap, OV02C10_REG_ANALOG_GAIN,
+ ctrl->val << 4, &ret);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ cci_write(ov02c10->regmap, OV02C10_REG_DIGITAL_GAIN,
+ ctrl->val << 6, &ret);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ cci_write(ov02c10->regmap, OV02C10_REG_EXPOSURE,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_VBLANK:
+ cci_write(ov02c10->regmap, OV02C10_REG_VTS, height + ctrl->val,
+ &ret);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov02c10_test_pattern(ov02c10, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov02c10_ctrl_ops = {
+ .s_ctrl = ov02c10_set_ctrl,
+};
+
+static int ov02c10_init_controls(struct ov02c10 *ov02c10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr = &ov02c10->ctrl_handler;
+ const struct ov02c10_mode *mode = &supported_modes[0];
+ u32 vblank_min, vblank_max, vblank_default, vts_def;
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, h_blank, pixel_rate;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+
+ ov02c10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov02c10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ov02c10->link_freq_index, 0,
+ link_freq_menu_items);
+ if (ov02c10->link_freq)
+ ov02c10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* MIPI lanes are DDR -> use link-freq * 2 */
+ pixel_rate = div_u64(link_freq_menu_items[ov02c10->link_freq_index] *
+ 2 * ov02c10->mipi_lanes, OV02C10_RGB_DEPTH);
+
+ ov02c10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ pixel_rate, 1, pixel_rate);
+
+ /*
+ * For default multiple min by number of lanes to keep the default
+ * FPS the same indepenedent of the lane count.
+ */
+ vts_def = mode->vts_min * ov02c10->mipi_lanes;
+
+ vblank_min = mode->vts_min - mode->height;
+ vblank_max = OV02C10_VTS_MAX - mode->height;
+ vblank_default = vts_def - mode->height;
+ ov02c10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_default);
+
+ h_blank = mode->hts - mode->width;
+ ov02c10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank,
+ 1, h_blank);
+ if (ov02c10->hblank)
+ ov02c10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV02C10_ANAL_GAIN_MIN, OV02C10_ANAL_GAIN_MAX,
+ OV02C10_ANAL_GAIN_STEP, OV02C10_ANAL_GAIN_DEFAULT);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV02C10_DGTL_GAIN_MIN, OV02C10_DGTL_GAIN_MAX,
+ OV02C10_DGTL_GAIN_STEP, OV02C10_DGTL_GAIN_DEFAULT);
+ exposure_max = vts_def - OV02C10_EXPOSURE_MAX_MARGIN;
+ ov02c10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV02C10_EXPOSURE_MIN,
+ exposure_max,
+ OV02C10_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov02c10_test_pattern_menu) - 1,
+ 0, 0, ov02c10_test_pattern_menu);
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov02c10_ctrl_ops, &props);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov02c10->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov02c10_update_pad_format(const struct ov02c10_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov02c10_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ const struct ov02c10_mode *mode = &supported_modes[0];
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+ const struct reg_sequence *reg_sequence;
+ int ret, sequence_length;
+
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret)
+ return ret;
+
+ reg_sequence = mode->reg_sequence;
+ sequence_length = mode->sequence_length;
+ ret = regmap_multi_reg_write(ov02c10->regmap,
+ reg_sequence, sequence_length);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode\n");
+ goto out;
+ }
+
+ reg_sequence = mode->lane_settings[ov02c10->mipi_lanes - 1];
+ sequence_length = mode->lane_settings_length[ov02c10->mipi_lanes - 1];
+ ret = regmap_multi_reg_write(ov02c10->regmap,
+ reg_sequence, sequence_length);
+ if (ret) {
+ dev_err(&client->dev, "failed to write lane settings\n");
+ goto out;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov02c10->sd.ctrl_handler);
+ if (ret)
+ goto out;
+
+ ret = cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 1, NULL);
+out:
+ if (ret)
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static int ov02c10_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+
+ cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 0, NULL);
+ pm_runtime_put(&client->dev);
+
+ return 0;
+}
+
+/* This function tries to get power control resources */
+static int ov02c10_get_pm_resources(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+ int i;
+
+ ov02c10->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ov02c10->reset))
+ return dev_err_probe(dev, PTR_ERR(ov02c10->reset),
+ "failed to get reset gpio\n");
+
+ for (i = 0; i < ARRAY_SIZE(ov02c10_supply_names); i++)
+ ov02c10->supplies[i].supply = ov02c10_supply_names[i];
+
+ return devm_regulator_bulk_get(dev, ARRAY_SIZE(ov02c10_supply_names),
+ ov02c10->supplies);
+}
+
+static int ov02c10_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+
+ gpiod_set_value_cansleep(ov02c10->reset, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(ov02c10_supply_names),
+ ov02c10->supplies);
+
+ clk_disable_unprepare(ov02c10->img_clk);
+
+ return 0;
+}
+
+static int ov02c10_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+ int ret;
+
+ ret = clk_prepare_enable(ov02c10->img_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable imaging clock: %d", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov02c10_supply_names),
+ ov02c10->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators: %d", ret);
+ clk_disable_unprepare(ov02c10->img_clk);
+ return ret;
+ }
+
+ if (ov02c10->reset) {
+ /* Assert reset for at least 2ms on back to back off-on */
+ usleep_range(2000, 2200);
+ gpiod_set_value_cansleep(ov02c10->reset, 0);
+ usleep_range(5000, 5100);
+ }
+
+ return 0;
+}
+
+static int ov02c10_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ const struct ov02c10_mode *mode = &supported_modes[0];
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+ s32 vblank_def, h_blank;
+
+ ov02c10_update_pad_format(mode, &fmt->format);
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_min * ov02c10->mipi_lanes - mode->height;
+ __v4l2_ctrl_modify_range(ov02c10->vblank, mode->vts_min - mode->height,
+ OV02C10_VTS_MAX - mode->height, 1, vblank_def);
+ __v4l2_ctrl_s_ctrl(ov02c10->vblank, vblank_def);
+ h_blank = mode->hts - mode->width;
+ __v4l2_ctrl_modify_range(ov02c10->hblank, h_blank, h_blank, 1, h_blank);
+
+ return 0;
+}
+
+static int ov02c10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov02c10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov02c10_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ ov02c10_update_pad_format(&supported_modes[0],
+ v4l2_subdev_state_get_format(sd_state, 0));
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov02c10_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov02c10_pad_ops = {
+ .set_fmt = ov02c10_set_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enum_mbus_code = ov02c10_enum_mbus_code,
+ .enum_frame_size = ov02c10_enum_frame_size,
+ .enable_streams = ov02c10_enable_streams,
+ .disable_streams = ov02c10_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov02c10_subdev_ops = {
+ .video = &ov02c10_video_ops,
+ .pad = &ov02c10_pad_ops,
+};
+
+static const struct media_entity_operations ov02c10_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov02c10_internal_ops = {
+ .init_state = ov02c10_init_state,
+};
+
+static int ov02c10_identify_module(struct ov02c10 *ov02c10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
+ u64 chip_id;
+ int ret;
+
+ ret = cci_read(ov02c10->regmap, OV02C10_REG_CHIP_ID, &chip_id, NULL);
+ if (ret)
+ return ret;
+
+ if (chip_id != OV02C10_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%llx",
+ OV02C10_CHIP_ID, chip_id);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ov02c10_check_hwcfg(struct device *dev, struct ov02c10 *ov02c10)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *ep, *fwnode = dev_fwnode(dev);
+ unsigned long link_freq_bitmap;
+ u32 mclk;
+ int ret;
+
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver,
+ * wait for this.
+ */
+ ep = fwnode_graph_get_endpoint_by_id(fwnode, 0, 0, 0);
+ if (!ep)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
+
+ ov02c10->img_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(ov02c10->img_clk)) {
+ fwnode_handle_put(ep);
+ return dev_err_probe(dev, PTR_ERR(ov02c10->img_clk),
+ "failed to get imaging clock\n");
+ }
+
+ if (ov02c10->img_clk) {
+ mclk = clk_get_rate(ov02c10->img_clk);
+ } else {
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (ret) {
+ fwnode_handle_put(ep);
+ return dev_err_probe(dev, ret,
+ "reading clock-frequency property\n");
+ }
+ }
+
+ if (mclk != OV02C10_MCLK) {
+ fwnode_handle_put(ep);
+ return dev_err_probe(dev, -EINVAL,
+ "external clock %u is not supported\n",
+ mclk);
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing endpoint failed\n");
+
+ ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &link_freq_bitmap);
+ if (ret)
+ goto check_hwcfg_error;
+
+ /* v4l2_link_freq_to_bitmap() guarantees at least 1 bit is set */
+ ov02c10->link_freq_index = ffs(link_freq_bitmap) - 1;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 1 &&
+ bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "number of CSI2 data lanes %u is not supported\n",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ goto check_hwcfg_error;
+ }
+
+ ov02c10->mipi_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ return ret;
+}
+
+static void ov02c10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ ov02c10_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
+}
+
+static int ov02c10_probe(struct i2c_client *client)
+{
+ struct ov02c10 *ov02c10;
+ int ret;
+
+ ov02c10 = devm_kzalloc(&client->dev, sizeof(*ov02c10), GFP_KERNEL);
+ if (!ov02c10)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ov02c10->sd, client, &ov02c10_subdev_ops);
+
+ /* Check HW config */
+ ret = ov02c10_check_hwcfg(&client->dev, ov02c10);
+ if (ret)
+ return ret;
+
+ ret = ov02c10_get_pm_resources(&client->dev);
+ if (ret)
+ return ret;
+
+ ov02c10->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(ov02c10->regmap))
+ return PTR_ERR(ov02c10->regmap);
+
+ ret = ov02c10_power_on(&client->dev);
+ if (ret) {
+ dev_err_probe(&client->dev, ret, "failed to power on\n");
+ return ret;
+ }
+
+ ret = ov02c10_identify_module(ov02c10);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto probe_error_power_off;
+ }
+
+ ret = ov02c10_init_controls(ov02c10);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov02c10->sd.internal_ops = &ov02c10_internal_ops;
+ ov02c10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov02c10->sd.entity.ops = &ov02c10_subdev_entity_ops;
+ ov02c10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov02c10->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov02c10->sd.entity, 1, &ov02c10->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov02c10->sd.state_lock = ov02c10->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&ov02c10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to init subdev: %d", ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&ov02c10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_v4l2_subdev_cleanup;
+ }
+
+ pm_runtime_idle(&client->dev);
+ return 0;
+
+probe_error_v4l2_subdev_cleanup:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ v4l2_subdev_cleanup(&ov02c10->sd);
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov02c10->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov02c10->sd.ctrl_handler);
+
+probe_error_power_off:
+ ov02c10_power_off(&client->dev);
+
+ return ret;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ov02c10_pm_ops, ov02c10_power_off,
+ ov02c10_power_on, NULL);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov02c10_acpi_ids[] = {
+ { "OVTI02C1" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov02c10_acpi_ids);
+#endif
+
+static const struct of_device_id ov02c10_of_match[] = {
+ { .compatible = "ovti,ov02c10" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov02c10_of_match);
+
+static struct i2c_driver ov02c10_i2c_driver = {
+ .driver = {
+ .name = "ov02c10",
+ .pm = pm_sleep_ptr(&ov02c10_pm_ops),
+ .acpi_match_table = ACPI_PTR(ov02c10_acpi_ids),
+ .of_match_table = ov02c10_of_match,
+ },
+ .probe = ov02c10_probe,
+ .remove = ov02c10_remove,
+};
+
+module_i2c_driver(ov02c10_i2c_driver);
+
+MODULE_AUTHOR("Hao Yao <hao.yao@intel.com>");
+MODULE_AUTHOR("Heimir Thor Sverrisson <heimir.sverrisson@gmail.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("OmniVision OV02C10 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov02e10.c b/drivers/media/i2c/ov02e10.c
new file mode 100644
index 000000000000..d74dc62e189d
--- /dev/null
+++ b/drivers/media/i2c/ov02e10.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV02E10_LINK_FREQ_360MHZ 360000000ULL
+#define OV02E10_SCLK 36000000LL
+#define OV02E10_MCLK 19200000
+#define OV02E10_DATA_LANES 2
+#define OV02E10_RGB_DEPTH 10
+
+#define OV02E10_REG_PAGE_FLAG CCI_REG8(0xfd)
+#define OV02E10_PAGE_0 0x0
+#define OV02E10_PAGE_1 0x1
+#define OV02E10_PAGE_2 0x2
+#define OV02E10_PAGE_3 0x3
+#define OV02E10_PAGE_5 0x4
+#define OV02E10_PAGE_7 0x5
+#define OV02E10_PAGE_8 0x6
+#define OV02E10_PAGE_9 0xF
+#define OV02E10_PAGE_D 0x8
+#define OV02E10_PAGE_E 0x9
+#define OV02E10_PAGE_F 0xA
+
+#define OV02E10_REG_CHIP_ID CCI_REG32(0x00)
+#define OV02E10_CHIP_ID 0x45025610
+
+/* Horizontal and vertical flip */
+#define OV02E10_REG_ORIENTATION CCI_REG8(0x32)
+
+/* vertical-timings from sensor */
+#define OV02E10_REG_VTS CCI_REG16(0x35)
+#define OV02E10_VTS_DEF 2244
+#define OV02E10_VTS_MIN 2244
+#define OV02E10_VTS_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define OV02E10_REG_HTS CCI_REG16(0x37)
+
+/* Exposure controls from sensor */
+#define OV02E10_REG_EXPOSURE CCI_REG16(0x03)
+#define OV02E10_EXPOSURE_MIN 1
+#define OV02E10_EXPOSURE_MAX_MARGIN 2
+#define OV02E10_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV02E10_REG_ANALOG_GAIN CCI_REG8(0x24)
+#define OV02E10_ANAL_GAIN_MIN 0x10
+#define OV02E10_ANAL_GAIN_MAX 0xf8
+#define OV02E10_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define OV02E10_REG_DIGITAL_GAIN CCI_REG16(0x21)
+#define OV02E10_DGTL_GAIN_MIN 256
+#define OV02E10_DGTL_GAIN_MAX 1020
+#define OV02E10_DGTL_GAIN_STEP 1
+#define OV02E10_DGTL_GAIN_DEFAULT 256
+
+/* Register update control */
+#define OV02E10_REG_COMMAND_UPDATE CCI_REG8(0xE7)
+#define OV02E10_COMMAND_UPDATE 0x00
+#define OV02E10_COMMAND_HOLD 0x01
+
+/* Test Pattern Control */
+#define OV02E10_REG_TEST_PATTERN CCI_REG8(0x12)
+#define OV02E10_TEST_PATTERN_ENABLE BIT(0)
+#define OV02E10_TEST_PATTERN_BAR_SHIFT 1
+
+struct reg_sequence_list {
+ u32 num_regs;
+ const struct reg_sequence *regs;
+};
+
+struct ov02e10_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timing */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Sensor register settings for this resolution */
+ const struct reg_sequence_list reg_list;
+};
+
+static const struct reg_sequence mode_1928x1088_30fps_2lane[] = {
+ { 0xfd, 0x00 },
+ { 0x20, 0x00 },
+ { 0x20, 0x0b },
+ { 0x21, 0x02 },
+ { 0x10, 0x23 },
+ { 0xc5, 0x04 },
+ { 0x21, 0x00 },
+ { 0x14, 0x96 },
+ { 0x17, 0x01 },
+ { 0xfd, 0x01 },
+ { 0x03, 0x00 },
+ { 0x04, 0x04 },
+ { 0x05, 0x04 },
+ { 0x06, 0x62 },
+ { 0x07, 0x01 },
+ { 0x22, 0x80 },
+ { 0x24, 0xff },
+ { 0x40, 0xc6 },
+ { 0x41, 0x18 },
+ { 0x45, 0x3f },
+ { 0x48, 0x0c },
+ { 0x4c, 0x08 },
+ { 0x51, 0x12 },
+ { 0x52, 0x10 },
+ { 0x57, 0x98 },
+ { 0x59, 0x06 },
+ { 0x5a, 0x04 },
+ { 0x5c, 0x38 },
+ { 0x5e, 0x10 },
+ { 0x67, 0x11 },
+ { 0x7b, 0x04 },
+ { 0x81, 0x12 },
+ { 0x90, 0x51 },
+ { 0x91, 0x09 },
+ { 0x92, 0x21 },
+ { 0x93, 0x28 },
+ { 0x95, 0x54 },
+ { 0x9d, 0x20 },
+ { 0x9e, 0x04 },
+ { 0xb1, 0x9a },
+ { 0xb2, 0x86 },
+ { 0xb6, 0x3f },
+ { 0xb9, 0x30 },
+ { 0xc1, 0x01 },
+ { 0xc5, 0xa0 },
+ { 0xc6, 0x73 },
+ { 0xc7, 0x04 },
+ { 0xc8, 0x25 },
+ { 0xc9, 0x05 },
+ { 0xca, 0x28 },
+ { 0xcb, 0x00 },
+ { 0xcf, 0x16 },
+ { 0xd2, 0xd0 },
+ { 0xd7, 0x3f },
+ { 0xd8, 0x40 },
+ { 0xd9, 0x40 },
+ { 0xda, 0x44 },
+ { 0xdb, 0x3d },
+ { 0xdc, 0x3d },
+ { 0xdd, 0x3d },
+ { 0xde, 0x3d },
+ { 0xdf, 0xf0 },
+ { 0xea, 0x0f },
+ { 0xeb, 0x04 },
+ { 0xec, 0x29 },
+ { 0xee, 0x47 },
+ { 0xfd, 0x01 },
+ { 0x31, 0x01 },
+ { 0x27, 0x00 },
+ { 0x2f, 0x41 },
+ { 0xfd, 0x02 },
+ { 0xa1, 0x01 },
+ { 0xfd, 0x02 },
+ { 0x9a, 0x03 },
+ { 0xfd, 0x03 },
+ { 0x9d, 0x0f },
+ { 0xfd, 0x07 },
+ { 0x42, 0x00 },
+ { 0x43, 0xad },
+ { 0x44, 0x00 },
+ { 0x45, 0xa8 },
+ { 0x46, 0x00 },
+ { 0x47, 0xa8 },
+ { 0x48, 0x00 },
+ { 0x49, 0xad },
+ { 0xfd, 0x00 },
+ { 0xc4, 0x01 },
+ { 0xfd, 0x01 },
+ { 0x33, 0x03 },
+ { 0xfd, 0x00 },
+ { 0x20, 0x1f },
+};
+
+static const char *const ov02e10_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bar",
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV02E10_LINK_FREQ_360MHZ,
+};
+
+static const struct ov02e10_mode supported_modes[] = {
+ {
+ .width = 1928,
+ .height = 1088,
+ .hts = 534,
+ .vts_def = 2244,
+ .vts_min = 2244,
+ .reg_list = {
+ .num_regs = ARRAY_SIZE(mode_1928x1088_30fps_2lane),
+ .regs = mode_1928x1088_30fps_2lane,
+ },
+ },
+};
+
+static const char * const ov02e10_supply_names[] = {
+ "dovdd", /* Digital I/O power */
+ "avdd", /* Analog power */
+ "dvdd", /* Digital core power */
+};
+
+struct ov02e10 {
+ struct regmap *regmap;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+
+ struct clk *img_clk;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov02e10_supply_names)];
+ struct gpio_desc *reset;
+
+ /* Current mode */
+ const struct ov02e10_mode *cur_mode;
+
+ /* MIPI lanes info */
+ u32 link_freq_index;
+ u8 mipi_lanes;
+};
+
+static inline struct ov02e10 *to_ov02e10(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ov02e10, sd);
+}
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV02E10_DATA_LANES;
+
+ do_div(pixel_rate, OV02E10_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static u64 to_pixels_per_line(u32 hts, u32 f_index)
+{
+ u64 ppl = hts * to_pixel_rate(f_index);
+
+ do_div(ppl, OV02E10_SCLK);
+
+ return ppl;
+}
+
+static void ov02e10_test_pattern(struct ov02e10 *ov02e10, u32 pattern, int *pret)
+{
+ if (pattern)
+ pattern = pattern << OV02E10_TEST_PATTERN_BAR_SHIFT |
+ OV02E10_TEST_PATTERN_ENABLE;
+
+ cci_write(ov02e10->regmap, OV02E10_REG_TEST_PATTERN, pattern, pret);
+}
+
+static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov02e10 *ov02e10 = container_of(ctrl->handler,
+ struct ov02e10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
+ s64 exposure_max;
+ int ret;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = ov02e10->cur_mode->height + ctrl->val -
+ OV02E10_EXPOSURE_MAX_MARGIN;
+ ret = __v4l2_ctrl_modify_range(ov02e10->exposure,
+ ov02e10->exposure->minimum,
+ exposure_max,
+ ov02e10->exposure->step,
+ exposure_max);
+ if (ret)
+ return ret;
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ ret = cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
+ OV02E10_COMMAND_HOLD, NULL);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_ANALOG_GAIN,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_DIGITAL_GAIN,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_EXPOSURE,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_ORIENTATION,
+ ov02e10->hflip->val | ov02e10->vflip->val << 1, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_VTS,
+ ov02e10->cur_mode->height + ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ ov02e10_test_pattern(ov02e10, ctrl->val, &ret);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
+ OV02E10_COMMAND_UPDATE, &ret);
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov02e10_ctrl_ops = {
+ .s_ctrl = ov02e10_set_ctrl,
+};
+
+static int ov02e10_init_controls(struct ov02e10 *ov02e10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr = &ov02e10->ctrl_handler;
+ const struct ov02e10_mode *mode = ov02e10->cur_mode;
+ u32 vblank_min, vblank_max, vblank_def;
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, h_blank, pixel_rate;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 12);
+
+ ov02e10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov02e10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ov02e10->link_freq_index,
+ 0, link_freq_menu_items);
+ if (ov02e10->link_freq)
+ ov02e10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = to_pixel_rate(ov02e10->link_freq_index);
+ ov02e10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ pixel_rate, 1, pixel_rate);
+
+ vblank_min = mode->vts_min - mode->height;
+ vblank_max = OV02E10_VTS_MAX - mode->height;
+ vblank_def = mode->vts_def - mode->height;
+ ov02e10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_def);
+
+ h_blank = mode->hts - mode->width;
+ ov02e10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank,
+ 1, h_blank);
+ if (ov02e10->hblank)
+ ov02e10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV02E10_ANAL_GAIN_MIN, OV02E10_ANAL_GAIN_MAX,
+ OV02E10_ANAL_GAIN_STEP, OV02E10_ANAL_GAIN_MIN);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV02E10_DGTL_GAIN_MIN, OV02E10_DGTL_GAIN_MAX,
+ OV02E10_DGTL_GAIN_STEP, OV02E10_DGTL_GAIN_DEFAULT);
+
+ exposure_max = mode->vts_def - OV02E10_EXPOSURE_MAX_MARGIN;
+ ov02e10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV02E10_EXPOSURE_MIN,
+ exposure_max,
+ OV02E10_EXPOSURE_STEP,
+ exposure_max);
+
+ ov02e10->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (ov02e10->hflip)
+ ov02e10->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ ov02e10->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ov02e10->vflip)
+ ov02e10->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov02e10_test_pattern_menu) - 1,
+ 0, 0, ov02e10_test_pattern_menu);
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov02e10_ctrl_ops, &props);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov02e10->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov02e10_update_pad_format(const struct ov02e10_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov02e10_set_stream_mode(struct ov02e10 *ov02e10, u8 val)
+{
+ int ret = 0;
+
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG, OV02E10_PAGE_0, &ret);
+ cci_write(ov02e10->regmap, CCI_REG8(0xa0), val, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG, OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, CCI_REG8(0x01), 0x02, &ret);
+
+ return ret;
+}
+
+static int ov02e10_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+ const struct reg_sequence_list *reg_list;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret)
+ return ret;
+
+ reg_list = &ov02e10->cur_mode->reg_list;
+ ret = regmap_multi_reg_write(ov02e10->regmap, reg_list->regs,
+ reg_list->num_regs);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode\n");
+ goto out;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov02e10->sd.ctrl_handler);
+ if (ret)
+ goto out;
+
+ ret = ov02e10_set_stream_mode(ov02e10, 1);
+
+out:
+ if (ret)
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static int ov02e10_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+
+ ov02e10_set_stream_mode(ov02e10, 0);
+ pm_runtime_put(&client->dev);
+
+ return 0;
+}
+
+static int ov02e10_get_pm_resources(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+ int i;
+
+ ov02e10->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ov02e10->reset))
+ return dev_err_probe(dev, PTR_ERR(ov02e10->reset),
+ "failed to get reset gpio\n");
+
+ for (i = 0; i < ARRAY_SIZE(ov02e10_supply_names); i++)
+ ov02e10->supplies[i].supply = ov02e10_supply_names[i];
+
+ return devm_regulator_bulk_get(dev, ARRAY_SIZE(ov02e10_supply_names),
+ ov02e10->supplies);
+}
+
+static int ov02e10_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+
+ if (ov02e10->reset)
+ gpiod_set_value_cansleep(ov02e10->reset, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(ov02e10_supply_names),
+ ov02e10->supplies);
+
+ clk_disable_unprepare(ov02e10->img_clk);
+
+ return 0;
+}
+
+static int ov02e10_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+ int ret;
+
+ ret = clk_prepare_enable(ov02e10->img_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable imaging clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov02e10_supply_names),
+ ov02e10->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators\n");
+ goto disable_clk;
+ }
+
+ if (ov02e10->reset) {
+ usleep_range(5000, 5100);
+ gpiod_set_value_cansleep(ov02e10->reset, 0);
+ usleep_range(8000, 8100);
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(ov02e10->img_clk);
+
+ return ret;
+}
+
+static int ov02e10_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+ const struct ov02e10_mode *mode;
+ s32 vblank_def, h_blank;
+ int ret = 0;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height, fmt->format.width,
+ fmt->format.height);
+
+ ov02e10_update_pad_format(mode, &fmt->format);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
+ } else {
+ ov02e10->cur_mode = mode;
+ ret = __v4l2_ctrl_s_ctrl(ov02e10->link_freq,
+ ov02e10->link_freq_index);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl_int64(ov02e10->pixel_rate,
+ to_pixel_rate(ov02e10->link_freq_index));
+ if (ret)
+ return ret;
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ ret = __v4l2_ctrl_modify_range(ov02e10->vblank,
+ mode->vts_min - mode->height,
+ OV02E10_VTS_MAX - mode->height,
+ 1, vblank_def);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl(ov02e10->vblank, vblank_def);
+ if (ret)
+ return ret;
+
+ h_blank = to_pixels_per_line(mode->hts, ov02e10->link_freq_index);
+ h_blank -= mode->width;
+ ret = __v4l2_ctrl_modify_range(ov02e10->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ return ret;
+}
+
+static int ov02e10_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_state_get_format(sd_state, fmt->pad);
+ else
+ ov02e10_update_pad_format(ov02e10->cur_mode, &fmt->format);
+
+ return 0;
+}
+
+static int ov02e10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov02e10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov02e10_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ ov02e10_update_pad_format(&supported_modes[0],
+ v4l2_subdev_state_get_format(sd_state, 0));
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov02e10_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov02e10_pad_ops = {
+ .set_fmt = ov02e10_set_format,
+ .get_fmt = ov02e10_get_format,
+ .enum_mbus_code = ov02e10_enum_mbus_code,
+ .enum_frame_size = ov02e10_enum_frame_size,
+ .enable_streams = ov02e10_enable_streams,
+ .disable_streams = ov02e10_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov02e10_subdev_ops = {
+ .video = &ov02e10_video_ops,
+ .pad = &ov02e10_pad_ops,
+};
+
+static const struct media_entity_operations ov02e10_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov02e10_internal_ops = {
+ .init_state = ov02e10_init_state,
+};
+
+static int ov02e10_identify_module(struct ov02e10 *ov02e10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
+ int ret;
+ u64 val;
+
+ ret = cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_0, NULL);
+ cci_read(ov02e10->regmap, OV02E10_REG_CHIP_ID, &val, &ret);
+ if (ret)
+ return ret;
+
+ if (val != OV02E10_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV02E10_CHIP_ID, (u32)val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ov02e10_check_hwcfg(struct device *dev, struct ov02e10 *ov02e10)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ unsigned long link_freq_bitmap;
+ u32 ext_clk;
+ int ret;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing endpoint failed\n");
+
+ ov02e10->img_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(ov02e10->img_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(ov02e10->img_clk),
+ "failed to get imaging clock\n");
+ goto out_err;
+ }
+
+ if (ov02e10->img_clk) {
+ ext_clk = clk_get_rate(ov02e10->img_clk);
+ } else {
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &ext_clk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency\n");
+ goto out_err;
+ }
+ }
+
+ if (ext_clk != OV02E10_MCLK) {
+ dev_err(dev, "external clock %d is not supported\n",
+ ext_clk);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV02E10_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &link_freq_bitmap);
+ if (ret)
+ goto out_err;
+
+ /* v4l2_link_freq_to_bitmap() guarantees at least 1 bit is set */
+ ov02e10->link_freq_index = ffs(link_freq_bitmap) - 1;
+ ov02e10->mipi_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+out_err:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static void ov02e10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ ov02e10_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
+}
+
+static int ov02e10_probe(struct i2c_client *client)
+{
+ struct ov02e10 *ov02e10;
+ int ret;
+
+ ov02e10 = devm_kzalloc(&client->dev, sizeof(*ov02e10), GFP_KERNEL);
+ if (!ov02e10)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ov02e10->sd, client, &ov02e10_subdev_ops);
+
+ /* Check HW config */
+ ret = ov02e10_check_hwcfg(&client->dev, ov02e10);
+ if (ret)
+ return ret;
+
+ /* Initialize subdev */
+ ov02e10->regmap = devm_cci_regmap_init_i2c(client, 8);
+ if (IS_ERR(ov02e10->regmap))
+ return PTR_ERR(ov02e10->regmap);
+
+ ret = ov02e10_get_pm_resources(&client->dev);
+ if (ret)
+ return ret;
+
+ ret = ov02e10_power_on(&client->dev);
+ if (ret) {
+ dev_err_probe(&client->dev, ret, "failed to power on\n");
+ return ret;
+ }
+
+ /* Check module identity */
+ ret = ov02e10_identify_module(ov02e10);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ goto probe_error_power_off;
+ }
+
+ ov02e10->cur_mode = &supported_modes[0];
+ ret = ov02e10_init_controls(ov02e10);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d\n", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ /* Initialize subdev */
+ ov02e10->sd.internal_ops = &ov02e10_internal_ops;
+ ov02e10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov02e10->sd.entity.ops = &ov02e10_subdev_entity_ops;
+ ov02e10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ ov02e10->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov02e10->sd.entity, 1, &ov02e10->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov02e10->sd.state_lock = ov02e10->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&ov02e10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to init subdev: %d", ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&ov02e10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_v4l2_subdev_cleanup;
+ }
+
+ pm_runtime_idle(&client->dev);
+ return 0;
+
+probe_error_v4l2_subdev_cleanup:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ v4l2_subdev_cleanup(&ov02e10->sd);
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov02e10->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov02e10->sd.ctrl_handler);
+
+probe_error_power_off:
+ ov02e10_power_off(&client->dev);
+
+ return ret;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ov02e10_pm_ops, ov02e10_power_off,
+ ov02e10_power_on, NULL);
+
+static const struct acpi_device_id ov02e10_acpi_ids[] = {
+ { "OVTI02E1" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov02e10_acpi_ids);
+
+static const struct of_device_id ov02e10_of_match[] = {
+ { .compatible = "ovti,ov02e10" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov02e10_of_match);
+
+static struct i2c_driver ov02e10_i2c_driver = {
+ .driver = {
+ .name = "ov02e10",
+ .pm = pm_sleep_ptr(&ov02e10_pm_ops),
+ .acpi_match_table = ov02e10_acpi_ids,
+ .of_match_table = ov02e10_of_match,
+ },
+ .probe = ov02e10_probe,
+ .remove = ov02e10_remove,
+};
+
+module_i2c_driver(ov02e10_i2c_driver);
+
+MODULE_AUTHOR("Jingjing Xiong <jingjing.xiong@intel.com>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_AUTHOR("Alan Stern <stern@rowland.harvard.edu>");
+MODULE_AUTHOR("Bryan O'Donoghue <bryan.odonoghue@linaro.org>");
+MODULE_DESCRIPTION("OmniVision OV02E10 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index cf0e41fc3071..e0094305ca2a 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -107,6 +108,7 @@
enum {
OV08X40_LINK_FREQ_400MHZ_INDEX,
+ OV08X40_LINK_FREQ_749MHZ_INDEX,
};
struct ov08x40_reg {
@@ -150,67 +152,7 @@ struct ov08x40_mode {
u16 exposure_shift;
};
-static const struct ov08x40_reg mipi_data_rate_800mbps[] = {
- {0x0103, 0x01},
- {0x1000, 0x00},
- {0x1601, 0xd0},
- {0x1001, 0x04},
- {0x5004, 0x53},
- {0x5110, 0x00},
- {0x5111, 0x14},
- {0x5112, 0x01},
- {0x5113, 0x7b},
- {0x5114, 0x00},
- {0x5152, 0xa3},
- {0x5a52, 0x1f},
- {0x5a1a, 0x0e},
- {0x5a1b, 0x10},
- {0x5a1f, 0x0e},
- {0x5a27, 0x0e},
- {0x6002, 0x2e},
-};
-
-static const struct ov08x40_reg mode_3856x2416_regs[] = {
- {0x5000, 0x5d},
- {0x5001, 0x20},
- {0x5008, 0xb0},
- {0x50c1, 0x00},
- {0x53c1, 0x00},
- {0x5f40, 0x00},
- {0x5f41, 0x40},
- {0x0300, 0x3a},
- {0x0301, 0xc8},
- {0x0302, 0x31},
- {0x0303, 0x03},
- {0x0304, 0x01},
- {0x0305, 0xa1},
- {0x0306, 0x04},
- {0x0307, 0x01},
- {0x0308, 0x03},
- {0x0309, 0x03},
- {0x0310, 0x0a},
- {0x0311, 0x02},
- {0x0312, 0x01},
- {0x0313, 0x08},
- {0x0314, 0x66},
- {0x0315, 0x00},
- {0x0316, 0x34},
- {0x0320, 0x02},
- {0x0321, 0x03},
- {0x0323, 0x05},
- {0x0324, 0x01},
- {0x0325, 0xb8},
- {0x0326, 0x4a},
- {0x0327, 0x04},
- {0x0329, 0x00},
- {0x032a, 0x05},
- {0x032b, 0x00},
- {0x032c, 0x00},
- {0x032d, 0x00},
- {0x032e, 0x02},
- {0x032f, 0xa0},
- {0x0350, 0x00},
- {0x0360, 0x01},
+static const struct ov08x40_reg ov08x40_global_regs[] = {
{0x1216, 0x60},
{0x1217, 0x5b},
{0x1218, 0x00},
@@ -220,7 +162,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x198e, 0x00},
{0x198f, 0x01},
{0x3009, 0x04},
- {0x3012, 0x41},
{0x3015, 0x00},
{0x3016, 0xb0},
{0x3017, 0xf0},
@@ -245,11 +186,10 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3058, 0x80},
{0x3059, 0x00},
{0x3107, 0x86},
- {0x3400, 0x1c},
{0x3401, 0x80},
{0x3402, 0x8c},
- {0x3419, 0x13},
- {0x341a, 0x89},
+ {0x3404, 0x01},
+ {0x3407, 0x01},
{0x341b, 0x30},
{0x3420, 0x00},
{0x3421, 0x00},
@@ -257,7 +197,7 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3423, 0x00},
{0x3424, 0x00},
{0x3425, 0x00},
- {0x3426, 0x00},
+ {0x3426, 0x10},
{0x3427, 0x00},
{0x3428, 0x0f},
{0x3429, 0x00},
@@ -286,27 +226,28 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3455, 0x80},
{0x3456, 0x08},
{0x3500, 0x00},
- {0x3501, 0x02},
- {0x3502, 0x00},
+ {0x3502, 0x10},
{0x3504, 0x4c},
{0x3506, 0x30},
{0x3507, 0x00},
- {0x3508, 0x01},
- {0x3509, 0x00},
{0x350a, 0x01},
{0x350b, 0x00},
{0x350c, 0x00},
{0x3540, 0x00},
- {0x3541, 0x01},
- {0x3542, 0x00},
{0x3544, 0x4c},
{0x3546, 0x30},
{0x3547, 0x00},
- {0x3548, 0x01},
{0x3549, 0x00},
{0x354a, 0x01},
{0x354b, 0x00},
{0x354c, 0x00},
+ {0x3601, 0x40},
+ {0x3602, 0x90},
+ {0x3608, 0x0a},
+ {0x3609, 0x08},
+ {0x360f, 0x99},
+ {0x3680, 0xa4},
+ {0x3682, 0x80},
{0x3688, 0x02},
{0x368a, 0x2e},
{0x368e, 0x71},
@@ -316,9 +257,7 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x36a4, 0x00},
{0x36a6, 0x00},
{0x3711, 0x00},
- {0x3712, 0x51},
{0x3713, 0x00},
- {0x3714, 0x24},
{0x3716, 0x00},
{0x3718, 0x07},
{0x371a, 0x1c},
@@ -327,7 +266,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3725, 0x32},
{0x3727, 0x05},
{0x3760, 0x02},
- {0x3761, 0x17},
{0x3762, 0x02},
{0x3763, 0x02},
{0x3764, 0x02},
@@ -337,41 +275,19 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3768, 0x02},
{0x3769, 0x00},
{0x376b, 0x20},
- {0x376e, 0x03},
- {0x37b0, 0x00},
- {0x37b1, 0xab},
{0x37b2, 0x01},
- {0x37b3, 0x82},
- {0x37b4, 0x00},
- {0x37b5, 0xe4},
- {0x37b6, 0x01},
- {0x37b7, 0xee},
{0x3800, 0x00},
{0x3801, 0x00},
{0x3802, 0x00},
- {0x3803, 0x00},
{0x3804, 0x0f},
{0x3805, 0x1f},
{0x3806, 0x09},
- {0x3807, 0x7f},
- {0x3808, 0x0f},
- {0x3809, 0x10},
- {0x380a, 0x09},
- {0x380b, 0x70},
{0x380c, 0x02},
- {0x380d, 0x80},
- {0x380e, 0x13},
- {0x380f, 0x88},
{0x3810, 0x00},
- {0x3811, 0x08},
{0x3812, 0x00},
- {0x3813, 0x07},
{0x3814, 0x11},
{0x3815, 0x11},
- {0x3820, 0x00},
- {0x3821, 0x04},
{0x3822, 0x00},
- {0x3823, 0x04},
{0x3828, 0x0f},
{0x382a, 0x80},
{0x382e, 0x41},
@@ -386,7 +302,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3847, 0x00},
{0x384a, 0x00},
{0x384c, 0x02},
- {0x384d, 0x80},
{0x3856, 0x50},
{0x3857, 0x30},
{0x3858, 0x80},
@@ -400,9 +315,45 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x388d, 0x00},
{0x388e, 0x00},
{0x388f, 0x00},
- {0x3894, 0x00},
{0x3895, 0x00},
+ {0x3911, 0x90},
+ {0x3913, 0x90},
+ {0x3921, 0x0f},
+ {0x3928, 0x15},
+ {0x3929, 0x2a},
+ {0x392c, 0x02},
+ {0x392e, 0x04},
+ {0x392f, 0x03},
+ {0x3931, 0x07},
+ {0x3932, 0x10},
+ {0x3938, 0x09},
+ {0x3a1f, 0x8a},
+ {0x3a22, 0x91},
+ {0x3a23, 0x15},
+ {0x3a25, 0x96},
+ {0x3a28, 0xb4},
+ {0x3a29, 0x26},
+ {0x3a2b, 0xba},
+ {0x3a2e, 0xbf},
+ {0x3a2f, 0x18},
+ {0x3a31, 0xc1},
+ {0x3a74, 0x84},
+ {0x3a99, 0x84},
+ {0x3ab9, 0xa6},
+ {0x3aba, 0xba},
+ {0x3b0a, 0x01},
+ {0x3b0b, 0x00},
+ {0x3b0e, 0x01},
+ {0x3b0f, 0x00},
+ {0x3b12, 0x84},
+ {0x3b14, 0xbb},
+ {0x3b15, 0xbf},
+ {0x3b1b, 0xc9},
+ {0x3b21, 0xc9},
+ {0x3b3f, 0x9d},
+ {0x3b45, 0x9d},
{0x3c84, 0x00},
+ {0x3d84, 0x04},
{0x3d85, 0x8b},
{0x3daa, 0x80},
{0x3dab, 0x14},
@@ -424,44 +375,21 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x4008, 0x00},
{0x4009, 0x05},
{0x400a, 0x00},
- {0x400b, 0x08},
{0x400c, 0x00},
- {0x400d, 0x08},
{0x400e, 0x14},
{0x4010, 0xf4},
{0x4011, 0x03},
{0x4012, 0x55},
{0x4015, 0x00},
- {0x4016, 0x2d},
{0x4017, 0x00},
{0x4018, 0x0f},
+ {0x4019, 0x00},
+ {0x401a, 0x40},
{0x401b, 0x08},
{0x401c, 0x00},
{0x401d, 0x10},
{0x401e, 0x02},
{0x401f, 0x00},
- {0x4050, 0x06},
- {0x4051, 0xff},
- {0x4052, 0xff},
- {0x4053, 0xff},
- {0x4054, 0xff},
- {0x4055, 0xff},
- {0x4056, 0xff},
- {0x4057, 0x7f},
- {0x4058, 0x00},
- {0x4059, 0x00},
- {0x405a, 0x00},
- {0x405b, 0x00},
- {0x405c, 0x07},
- {0x405d, 0xff},
- {0x405e, 0x07},
- {0x405f, 0xff},
- {0x4080, 0x78},
- {0x4081, 0x78},
- {0x4082, 0x78},
- {0x4083, 0x78},
- {0x4019, 0x00},
- {0x401a, 0x40},
{0x4020, 0x04},
{0x4021, 0x00},
{0x4022, 0x04},
@@ -486,6 +414,22 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x4045, 0x80},
{0x4046, 0x00},
{0x4047, 0x80},
+ {0x4050, 0x06},
+ {0x4051, 0xff},
+ {0x4052, 0xff},
+ {0x4053, 0xff},
+ {0x4054, 0xff},
+ {0x4055, 0xff},
+ {0x4056, 0xff},
+ {0x4057, 0x7f},
+ {0x4058, 0x00},
+ {0x4059, 0x00},
+ {0x405a, 0x00},
+ {0x405b, 0x00},
+ {0x405c, 0x07},
+ {0x405d, 0xff},
+ {0x405e, 0x07},
+ {0x405f, 0xff},
{0x4060, 0x00},
{0x4061, 0x00},
{0x4062, 0x00},
@@ -518,6 +462,10 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x407d, 0x00},
{0x407e, 0x00},
{0x407f, 0x00},
+ {0x4080, 0x78},
+ {0x4081, 0x78},
+ {0x4082, 0x78},
+ {0x4083, 0x78},
{0x40e0, 0x00},
{0x40e1, 0x00},
{0x40e2, 0x00},
@@ -560,7 +508,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x431b, 0x00},
{0x431c, 0x00},
{0x4500, 0x07},
- {0x4501, 0x00},
{0x4502, 0x00},
{0x4503, 0x0f},
{0x4504, 0x80},
@@ -573,7 +520,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x4510, 0x00},
{0x4523, 0x00},
{0x4526, 0x00},
- {0x4542, 0x00},
{0x4543, 0x00},
{0x4544, 0x00},
{0x4545, 0x00},
@@ -592,8 +538,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x480c, 0x80},
{0x480f, 0x32},
{0x4813, 0xe4},
- {0x4837, 0x14},
- {0x4850, 0x42},
{0x4884, 0x04},
{0x4c00, 0xf8},
{0x4c01, 0x44},
@@ -604,93 +548,37 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x4d05, 0x00},
{0x4d06, 0x0c},
{0x4d07, 0x00},
- {0x3d84, 0x04},
- {0x3680, 0xa4},
- {0x3682, 0x80},
- {0x3601, 0x40},
- {0x3602, 0x90},
- {0x3608, 0x0a},
- {0x3938, 0x09},
- {0x3a74, 0x84},
- {0x3a99, 0x84},
- {0x3ab9, 0xa6},
- {0x3aba, 0xba},
- {0x3b12, 0x84},
- {0x3b14, 0xbb},
- {0x3b15, 0xbf},
- {0x3a29, 0x26},
- {0x3a1f, 0x8a},
- {0x3a22, 0x91},
- {0x3a25, 0x96},
- {0x3a28, 0xb4},
- {0x3a2b, 0xba},
- {0x3a2e, 0xbf},
- {0x3a31, 0xc1},
- {0x3a20, 0x00},
- {0x3939, 0x9d},
- {0x3902, 0x0e},
- {0x3903, 0x0e},
- {0x3904, 0x0e},
- {0x3905, 0x0e},
- {0x3906, 0x07},
- {0x3907, 0x0d},
- {0x3908, 0x11},
- {0x3909, 0x12},
- {0x360f, 0x99},
- {0x390c, 0x33},
- {0x390d, 0x66},
- {0x390e, 0xaa},
- {0x3911, 0x90},
- {0x3913, 0x90},
- {0x3915, 0x90},
- {0x3917, 0x90},
- {0x3b3f, 0x9d},
- {0x3b45, 0x9d},
- {0x3b1b, 0xc9},
- {0x3b21, 0xc9},
- {0x3440, 0xa4},
- {0x3a23, 0x15},
- {0x3a26, 0x1d},
- {0x3a2c, 0x4a},
- {0x3a2f, 0x18},
- {0x3a32, 0x55},
- {0x3b0a, 0x01},
- {0x3b0b, 0x00},
- {0x3b0e, 0x01},
- {0x3b0f, 0x00},
- {0x392c, 0x02},
- {0x392d, 0x02},
- {0x392e, 0x04},
- {0x392f, 0x03},
- {0x3930, 0x08},
- {0x3931, 0x07},
- {0x3932, 0x10},
- {0x3933, 0x0c},
- {0x3609, 0x08},
- {0x3921, 0x0f},
- {0x3928, 0x15},
- {0x3929, 0x2a},
- {0x392a, 0x54},
- {0x392b, 0xa8},
- {0x3426, 0x10},
- {0x3407, 0x01},
- {0x3404, 0x01},
- {0x3500, 0x00},
- {0x3501, 0x10},
- {0x3502, 0x10},
- {0x3508, 0x0f},
- {0x3509, 0x80},
-};
-
-static const struct ov08x40_reg mode_1928x1208_regs[] = {
- {0x5000, 0x55},
- {0x5001, 0x00},
{0x5008, 0xb0},
{0x50c1, 0x00},
{0x53c1, 0x00},
{0x5f40, 0x00},
{0x5f41, 0x40},
- {0x0300, 0x3a},
+};
+
+static const struct ov08x40_reg_list ov08x40_global_setting = {
+ .num_of_regs = ARRAY_SIZE(ov08x40_global_regs),
+ .regs = ov08x40_global_regs,
+};
+
+static const struct ov08x40_reg mipi_data_rate_800mbps[] = {
+ {0x0103, 0x01},
+ {0x1000, 0x00},
+ {0x1601, 0xd0},
+ {0x1001, 0x04},
+ {0x5004, 0x53},
+ {0x5110, 0x00},
+ {0x5111, 0x14},
+ {0x5112, 0x01},
+ {0x5113, 0x7b},
+ {0x5114, 0x00},
+ {0x5152, 0xa3},
+ {0x5a52, 0x1f},
+ {0x5a1a, 0x0e},
+ {0x5a1b, 0x10},
+ {0x5a1f, 0x0e},
+ {0x5a27, 0x0e},
+ {0x6002, 0x2e},
+ {0x0300, 0x3a}, /* PLL CTRL */
{0x0301, 0xc8},
{0x0302, 0x31},
{0x0303, 0x03},
@@ -723,421 +611,522 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = {
{0x032f, 0xa0},
{0x0350, 0x00},
{0x0360, 0x01},
- {0x1216, 0x60},
- {0x1217, 0x5b},
- {0x1218, 0x00},
- {0x1220, 0x24},
- {0x198a, 0x00},
- {0x198b, 0x01},
- {0x198e, 0x00},
- {0x198f, 0x01},
- {0x3009, 0x04},
+ {0x3012, 0x41}, /* MIPI SC Lanes */
+};
+
+static const struct ov08x40_reg mipi_data_rate_1500mbps[] = {
+ {0x0103, 0x01},
+ {0x1000, 0x00},
+ {0x1601, 0xd0},
+ {0x1001, 0x04},
+ {0x5004, 0x53},
+ {0x5110, 0x00},
+ {0x5111, 0x14},
+ {0x5112, 0x01},
+ {0x5113, 0x7b},
+ {0x5114, 0x00},
+ {0x5152, 0xa3},
+ {0x5a52, 0x1f},
+ {0x5a1a, 0x0e},
+ {0x5a1b, 0x10},
+ {0x5a1f, 0x0e},
+ {0x5a27, 0x0e},
+ {0x6002, 0x2e},
+ {0x0300, 0x3a}, /* PLL */
+ {0x0301, 0x88},
+ {0x0302, 0x31},
+ {0x0303, 0x05},
+ {0x0304, 0x01},
+ {0x0305, 0x38},
+ {0x0306, 0x04},
+ {0x0307, 0x00},
+ {0x0308, 0x03},
+ {0x0309, 0x02},
+ {0x0310, 0x0a},
+ {0x0311, 0x02},
+ {0x0312, 0x01},
+ {0x0313, 0x08},
+ {0x0314, 0x00},
+ {0x0315, 0x00},
+ {0x0316, 0x2c},
+ {0x0320, 0x02},
+ {0x0321, 0x03},
+ {0x0323, 0x05},
+ {0x0324, 0x01},
+ {0x0325, 0xb8},
+ {0x0326, 0x4a},
+ {0x0327, 0x04},
+ {0x0329, 0x00},
+ {0x032a, 0x05},
+ {0x032b, 0x00},
+ {0x032c, 0x00},
+ {0x032d, 0x00},
+ {0x032e, 0x02},
+ {0x032f, 0xa0},
+ {0x0350, 0x00},
+ {0x0360, 0x01},
+ {0x3012, 0x21}, /* MIPI SC Lanes */
+};
+
+static const struct ov08x40_reg mode_3856x2176_regs_800mbps[] = {
+ {0x5000, 0x5d},
+ {0x5001, 0x20},
+ {0x3012, 0x41},
+ {0x3400, 0x1c},
+ {0x3419, 0x13},
+ {0x341a, 0x89},
+ {0x3426, 0x00},
+ {0x3501, 0x02},
+ {0x3502, 0x00},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3541, 0x01},
+ {0x3542, 0x00},
+ {0x3548, 0x01},
+ {0x3712, 0x51},
+ {0x3714, 0x24},
+ {0x3761, 0x17},
+ {0x376e, 0x03},
+ {0x37b0, 0x00},
+ {0x37b1, 0xab},
+ {0x37b3, 0x82},
+ {0x37b4, 0x00},
+ {0x37b5, 0xe4},
+ {0x37b6, 0x01},
+ {0x37b7, 0xee},
+ {0x3820, 0x00},
+ {0x3821, 0x04},
+ {0x3823, 0x04},
+ {0x384d, 0x80},
+ {0x3894, 0x00},
+ {0x400b, 0x08},
+ {0x400d, 0x08},
+ {0x4016, 0x2d},
+ {0x4501, 0x00},
+ {0x4542, 0x00},
+ {0x4837, 0x14},
+ {0x4850, 0x42},
+ {0x3a20, 0x00},
+ {0x3939, 0x9d},
+ {0x3902, 0x0e},
+ {0x3903, 0x0e},
+ {0x3904, 0x0e},
+ {0x3905, 0x0e},
+ {0x3906, 0x07},
+ {0x3907, 0x0d},
+ {0x3908, 0x11},
+ {0x3909, 0x12},
+ {0x390c, 0x33},
+ {0x390d, 0x66},
+ {0x390e, 0xaa},
+ {0x3915, 0x90},
+ {0x3917, 0x90},
+ {0x3440, 0xa4},
+ {0x3a26, 0x1d},
+ {0x3a2c, 0x4a},
+ {0x3a32, 0x55},
+ {0x392d, 0x02},
+ {0x3930, 0x08},
+ {0x3933, 0x0c},
+ {0x392a, 0x54},
+ {0x392b, 0xa8},
+ {0x380d, 0x80},
+ {0x380e, 0x13},
+ {0x380f, 0x88},
+ {0x3803, 0x70},
+ {0x3807, 0x0f},
+ {0x3808, 0x0f},
+ {0x3809, 0x10},
+ {0x380a, 0x08},
+ {0x380b, 0x80},
+ {0x3811, 0x08},
+ {0x3813, 0x10},
+ {0x3501, 0x10},
+ {0x3508, 0x0f},
+ {0x3509, 0x80},
+ {0x3813, 0x0f},
+};
+
+/* OV08X 1C 3856x2176_DPHY1500M-2L */
+static const struct ov08x40_reg mode_3856x2176_regs_1500mbps[] = {
+ {0x5000, 0x5d},
+ {0x5001, 0x20},
+ {0x3012, 0x21},
+ {0x3400, 0x1c},
+ {0x3419, 0x12},
+ {0x341a, 0x99},
+ {0x3426, 0x00},
+ {0x3501, 0x02},
+ {0x3502, 0x00},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3541, 0x01},
+ {0x3542, 0x00},
+ {0x3548, 0x01},
+ {0x3712, 0x51},
+ {0x3714, 0x24},
+ {0x3761, 0x17},
+ {0x376e, 0x03},
+ {0x37b0, 0x00},
+ {0x37b1, 0xab},
+ {0x37b3, 0x82},
+ {0x37b4, 0x00},
+ {0x37b5, 0xe4},
+ {0x37b6, 0x01},
+ {0x37b7, 0xee},
+ {0x3803, 0x70},
+ {0x3807, 0x0f},
+ {0x3808, 0x0f},
+ {0x3809, 0x10},
+ {0x380a, 0x08},
+ {0x380b, 0x80},
+ {0x380d, 0xa0},
+ {0x380e, 0x12},
+ {0x380f, 0x98},
+ {0x3811, 0x08},
+ {0x3813, 0x10},
+ {0x3820, 0x00},
+ {0x3821, 0x04},
+ {0x3823, 0x04},
+ {0x384d, 0xa0},
+ {0x3894, 0x00},
+ {0x400b, 0x08},
+ {0x400d, 0x08},
+ {0x4016, 0x2d},
+ {0x4501, 0x00},
+ {0x4542, 0x00},
+ {0x4837, 0x0a},
+ {0x4850, 0x47},
+ {0x3a20, 0x00},
+ {0x3939, 0x9d},
+ {0x3902, 0x0e},
+ {0x3903, 0x0e},
+ {0x3904, 0x0e},
+ {0x3905, 0x0e},
+ {0x3906, 0x07},
+ {0x3907, 0x0d},
+ {0x3908, 0x11},
+ {0x3909, 0x12},
+ {0x390c, 0x33},
+ {0x390d, 0x66},
+ {0x390e, 0xaa},
+ {0x3915, 0x90},
+ {0x3917, 0x90},
+ {0x3440, 0xa4},
+ {0x3a26, 0x1d},
+ {0x3a2c, 0x4a},
+ {0x3a32, 0x55},
+ {0x392d, 0x02},
+ {0x3930, 0x08},
+ {0x3933, 0x0c},
+ {0x392a, 0x54},
+ {0x392b, 0xa8},
+ {0x3501, 0x10},
+ {0x3508, 0x0f},
+ {0x3509, 0x80},
+ {0x3813, 0x0f},
+};
+
+/* OV08X 4C1stg 1928x1088_DPHY1500M-2L 30fps */
+static const struct ov08x40_reg mode_1928x1088_regs_1500mbps[] = {
+ {0x5000, 0x55},
+ {0x5001, 0x00},
+ {0x3012, 0x21},
+ {0x3400, 0x30},
+ {0x3419, 0x08},
+ {0x341a, 0x4f},
+ {0x3426, 0x00},
+ {0x3501, 0x02},
+ {0x3502, 0x00},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3541, 0x01},
+ {0x3542, 0x00},
+ {0x3548, 0x01},
+ {0x3712, 0x50},
+ {0x3714, 0x21},
+ {0x3761, 0x28},
+ {0x376e, 0x07},
+ {0x37b0, 0x01},
+ {0x37b1, 0x0f},
+ {0x37b3, 0xd6},
+ {0x37b4, 0x01},
+ {0x37b5, 0x48},
+ {0x37b6, 0x02},
+ {0x37b7, 0x40},
+ {0x3803, 0x78},
+ {0x3807, 0x07},
+ {0x3808, 0x07},
+ {0x3809, 0x88},
+ {0x380a, 0x04},
+ {0x380b, 0x40},
+ {0x380d, 0xf0},
+ {0x380e, 0x08},
+ {0x380f, 0x4e},
+ {0x3811, 0x04},
+ {0x3813, 0x03},
+ {0x3820, 0x02},
+ {0x3821, 0x14},
+ {0x3823, 0x84},
+ {0x384d, 0xf0},
+ {0x3894, 0x03},
+ {0x400b, 0x04},
+ {0x400d, 0x04},
+ {0x4016, 0x27},
+ {0x4501, 0x10},
+ {0x4542, 0x01},
+ {0x4837, 0x0a},
+ {0x4850, 0x47},
+ {0x4911, 0x00},
+ {0x4919, 0x00},
+ {0x491a, 0x40},
+ {0x4920, 0x04},
+ {0x4921, 0x00},
+ {0x4922, 0x04},
+ {0x4923, 0x00},
+ {0x4924, 0x04},
+ {0x4925, 0x00},
+ {0x4926, 0x04},
+ {0x4927, 0x00},
+ {0x4930, 0x00},
+ {0x4931, 0x00},
+ {0x4932, 0x00},
+ {0x4933, 0x00},
+ {0x4934, 0x00},
+ {0x4935, 0x00},
+ {0x4936, 0x00},
+ {0x4937, 0x00},
+ {0x4940, 0x00},
+ {0x4941, 0x80},
+ {0x4942, 0x00},
+ {0x4943, 0x80},
+ {0x4944, 0x00},
+ {0x4945, 0x80},
+ {0x4946, 0x00},
+ {0x4947, 0x80},
+ {0x4960, 0x00},
+ {0x4961, 0x00},
+ {0x4962, 0x00},
+ {0x4963, 0x00},
+ {0x4964, 0x00},
+ {0x4965, 0x00},
+ {0x4966, 0x00},
+ {0x4967, 0x00},
+ {0x4968, 0x00},
+ {0x4969, 0x00},
+ {0x496a, 0x00},
+ {0x496b, 0x00},
+ {0x496c, 0x00},
+ {0x496d, 0x00},
+ {0x496e, 0x00},
+ {0x496f, 0x00},
+ {0x4970, 0x00},
+ {0x4971, 0x00},
+ {0x4972, 0x00},
+ {0x4973, 0x00},
+ {0x4974, 0x00},
+ {0x4975, 0x00},
+ {0x4976, 0x00},
+ {0x4977, 0x00},
+ {0x4978, 0x00},
+ {0x4979, 0x00},
+ {0x497a, 0x00},
+ {0x497b, 0x00},
+ {0x497c, 0x00},
+ {0x497d, 0x00},
+ {0x497e, 0x00},
+ {0x497f, 0x00},
+ {0x49e0, 0x00},
+ {0x49e1, 0x00},
+ {0x49e2, 0x00},
+ {0x49e3, 0x00},
+ {0x49e4, 0x00},
+ {0x49e5, 0x00},
+ {0x49e6, 0x00},
+ {0x49e7, 0x00},
+ {0x49e8, 0x00},
+ {0x49e9, 0x80},
+ {0x49ea, 0x00},
+ {0x49eb, 0x80},
+ {0x49ec, 0x00},
+ {0x49ed, 0x80},
+ {0x49ee, 0x00},
+ {0x49ef, 0x80},
+ {0x49f0, 0x02},
+ {0x49f1, 0x04},
+ {0x3a20, 0x05},
+ {0x3939, 0x6b},
+ {0x3902, 0x10},
+ {0x3903, 0x10},
+ {0x3904, 0x10},
+ {0x3905, 0x10},
+ {0x3906, 0x01},
+ {0x3907, 0x0b},
+ {0x3908, 0x10},
+ {0x3909, 0x13},
+ {0x390b, 0x11},
+ {0x390c, 0x21},
+ {0x390d, 0x32},
+ {0x390e, 0x76},
+ {0x3a1a, 0x1c},
+ {0x3a26, 0x17},
+ {0x3a2c, 0x50},
+ {0x3a32, 0x4f},
+ {0x3ace, 0x01},
+ {0x3ad2, 0x01},
+ {0x3ad6, 0x01},
+ {0x3ada, 0x01},
+ {0x3ade, 0x01},
+ {0x3ae2, 0x01},
+ {0x3aee, 0x01},
+ {0x3af2, 0x01},
+ {0x3af6, 0x01},
+ {0x3afa, 0x01},
+ {0x3afe, 0x01},
+ {0x3b02, 0x01},
+ {0x3b06, 0x01},
+ {0x392d, 0x01},
+ {0x3930, 0x09},
+ {0x3933, 0x0d},
+ {0x392a, 0x52},
+ {0x392b, 0xa3},
+ {0x340b, 0x1b},
+ {0x3501, 0x01},
+ {0x3508, 0x0f},
+ {0x3509, 0x00},
+ {0x3541, 0x00},
+ {0x3542, 0x80},
+ {0x3548, 0x0f},
+ {0x3813, 0x03},
+};
+
+static const struct ov08x40_reg mode_3856x2416_regs[] = {
+ {0x5000, 0x5d},
+ {0x5001, 0x20},
+ {0x3012, 0x41},
+ {0x3400, 0x1c},
+ {0x3419, 0x13},
+ {0x341a, 0x89},
+ {0x3426, 0x00},
+ {0x3501, 0x02},
+ {0x3502, 0x00},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3541, 0x01},
+ {0x3542, 0x00},
+ {0x3548, 0x01},
+ {0x3712, 0x51},
+ {0x3714, 0x24},
+ {0x3761, 0x17},
+ {0x376e, 0x03},
+ {0x37b0, 0x00},
+ {0x37b1, 0xab},
+ {0x37b3, 0x82},
+ {0x37b4, 0x00},
+ {0x37b5, 0xe4},
+ {0x37b6, 0x01},
+ {0x37b7, 0xee},
+ {0x3803, 0x00},
+ {0x3807, 0x7f},
+ {0x3808, 0x0f},
+ {0x3809, 0x10},
+ {0x380a, 0x09},
+ {0x380b, 0x70},
+ {0x380d, 0x80},
+ {0x380e, 0x13},
+ {0x380f, 0x88},
+ {0x3811, 0x08},
+ {0x3813, 0x07},
+ {0x3820, 0x00},
+ {0x3821, 0x04},
+ {0x3823, 0x04},
+ {0x384d, 0x80},
+ {0x3894, 0x00},
+ {0x400b, 0x08},
+ {0x400d, 0x08},
+ {0x4016, 0x2d},
+ {0x4501, 0x00},
+ {0x4542, 0x00},
+ {0x4837, 0x14},
+ {0x4850, 0x42},
+ {0x3a20, 0x00},
+ {0x3939, 0x9d},
+ {0x3902, 0x0e},
+ {0x3903, 0x0e},
+ {0x3904, 0x0e},
+ {0x3905, 0x0e},
+ {0x3906, 0x07},
+ {0x3907, 0x0d},
+ {0x3908, 0x11},
+ {0x3909, 0x12},
+ {0x390c, 0x33},
+ {0x390d, 0x66},
+ {0x390e, 0xaa},
+ {0x3915, 0x90},
+ {0x3917, 0x90},
+ {0x3440, 0xa4},
+ {0x3a26, 0x1d},
+ {0x3a2c, 0x4a},
+ {0x3a32, 0x55},
+ {0x392d, 0x02},
+ {0x3930, 0x08},
+ {0x3933, 0x0c},
+ {0x392a, 0x54},
+ {0x392b, 0xa8},
+ {0x3501, 0x10},
+ {0x3508, 0x0f},
+ {0x3509, 0x80},
+};
+
+static const struct ov08x40_reg mode_1928x1208_regs[] = {
+ {0x5000, 0x55},
+ {0x5001, 0x00},
{0x3012, 0x41},
- {0x3015, 0x00},
- {0x3016, 0xb0},
- {0x3017, 0xf0},
- {0x3018, 0xf0},
- {0x3019, 0xd2},
- {0x301a, 0xb0},
- {0x301c, 0x81},
- {0x301d, 0x02},
- {0x301e, 0x80},
- {0x3022, 0xf0},
- {0x3025, 0x89},
- {0x3030, 0x03},
- {0x3044, 0xc2},
- {0x3050, 0x35},
- {0x3051, 0x60},
- {0x3052, 0x25},
- {0x3053, 0x00},
- {0x3054, 0x00},
- {0x3055, 0x02},
- {0x3056, 0x80},
- {0x3057, 0x80},
- {0x3058, 0x80},
- {0x3059, 0x00},
- {0x3107, 0x86},
{0x3400, 0x1c},
- {0x3401, 0x80},
- {0x3402, 0x8c},
{0x3419, 0x08},
{0x341a, 0xaf},
- {0x341b, 0x30},
- {0x3420, 0x00},
- {0x3421, 0x00},
- {0x3422, 0x00},
- {0x3423, 0x00},
- {0x3424, 0x00},
- {0x3425, 0x00},
{0x3426, 0x00},
- {0x3427, 0x00},
- {0x3428, 0x0f},
- {0x3429, 0x00},
- {0x342a, 0x00},
- {0x342b, 0x00},
- {0x342c, 0x00},
- {0x342d, 0x00},
- {0x342e, 0x00},
- {0x342f, 0x11},
- {0x3430, 0x11},
- {0x3431, 0x10},
- {0x3432, 0x00},
- {0x3433, 0x00},
- {0x3434, 0x00},
- {0x3435, 0x00},
- {0x3436, 0x00},
- {0x3437, 0x00},
- {0x3442, 0x02},
- {0x3443, 0x02},
- {0x3444, 0x07},
- {0x3450, 0x00},
- {0x3451, 0x00},
- {0x3452, 0x18},
- {0x3453, 0x18},
- {0x3454, 0x00},
- {0x3455, 0x80},
- {0x3456, 0x08},
- {0x3500, 0x00},
{0x3501, 0x02},
{0x3502, 0x00},
- {0x3504, 0x4c},
- {0x3506, 0x30},
- {0x3507, 0x00},
{0x3508, 0x01},
{0x3509, 0x00},
- {0x350a, 0x01},
- {0x350b, 0x00},
- {0x350c, 0x00},
- {0x3540, 0x00},
{0x3541, 0x01},
{0x3542, 0x00},
- {0x3544, 0x4c},
- {0x3546, 0x30},
- {0x3547, 0x00},
{0x3548, 0x01},
- {0x3549, 0x00},
- {0x354a, 0x01},
- {0x354b, 0x00},
- {0x354c, 0x00},
- {0x3688, 0x02},
- {0x368a, 0x2e},
- {0x368e, 0x71},
- {0x3696, 0xd1},
- {0x3699, 0x00},
- {0x369a, 0x00},
- {0x36a4, 0x00},
- {0x36a6, 0x00},
- {0x3711, 0x00},
{0x3712, 0x50},
- {0x3713, 0x00},
{0x3714, 0x21},
- {0x3716, 0x00},
- {0x3718, 0x07},
- {0x371a, 0x1c},
- {0x371b, 0x00},
- {0x3720, 0x08},
- {0x3725, 0x32},
- {0x3727, 0x05},
- {0x3760, 0x02},
{0x3761, 0x28},
- {0x3762, 0x02},
- {0x3763, 0x02},
- {0x3764, 0x02},
- {0x3765, 0x2c},
- {0x3766, 0x04},
- {0x3767, 0x2c},
- {0x3768, 0x02},
- {0x3769, 0x00},
- {0x376b, 0x20},
{0x376e, 0x07},
{0x37b0, 0x01},
{0x37b1, 0x0f},
- {0x37b2, 0x01},
{0x37b3, 0xd6},
{0x37b4, 0x01},
{0x37b5, 0x48},
{0x37b6, 0x02},
{0x37b7, 0x40},
- {0x3800, 0x00},
- {0x3801, 0x00},
- {0x3802, 0x00},
{0x3803, 0x00},
- {0x3804, 0x0f},
- {0x3805, 0x1f},
- {0x3806, 0x09},
{0x3807, 0x7f},
{0x3808, 0x07},
{0x3809, 0x88},
{0x380a, 0x04},
{0x380b, 0xb8},
- {0x380c, 0x02},
{0x380d, 0xd0},
{0x380e, 0x11},
{0x380f, 0x5c},
- {0x3810, 0x00},
{0x3811, 0x04},
- {0x3812, 0x00},
{0x3813, 0x03},
- {0x3814, 0x11},
- {0x3815, 0x11},
{0x3820, 0x02},
{0x3821, 0x14},
- {0x3822, 0x00},
{0x3823, 0x04},
- {0x3828, 0x0f},
- {0x382a, 0x80},
- {0x382e, 0x41},
- {0x3837, 0x08},
- {0x383a, 0x81},
- {0x383b, 0x81},
- {0x383c, 0x11},
- {0x383d, 0x11},
- {0x383e, 0x00},
- {0x383f, 0x38},
- {0x3840, 0x00},
- {0x3847, 0x00},
- {0x384a, 0x00},
- {0x384c, 0x02},
{0x384d, 0xd0},
- {0x3856, 0x50},
- {0x3857, 0x30},
- {0x3858, 0x80},
- {0x3859, 0x40},
- {0x3860, 0x00},
- {0x3888, 0x00},
- {0x3889, 0x00},
- {0x388a, 0x00},
- {0x388b, 0x00},
- {0x388c, 0x00},
- {0x388d, 0x00},
- {0x388e, 0x00},
- {0x388f, 0x00},
{0x3894, 0x00},
- {0x3895, 0x00},
- {0x3c84, 0x00},
- {0x3d85, 0x8b},
- {0x3daa, 0x80},
- {0x3dab, 0x14},
- {0x3dac, 0x80},
- {0x3dad, 0xc8},
- {0x3dae, 0x81},
- {0x3daf, 0x7b},
- {0x3f00, 0x10},
- {0x3f01, 0x11},
- {0x3f06, 0x0d},
- {0x3f07, 0x0b},
- {0x3f08, 0x0d},
- {0x3f09, 0x0b},
- {0x3f0a, 0x01},
- {0x3f0b, 0x11},
- {0x3f0c, 0x33},
- {0x4001, 0x07},
- {0x4007, 0x20},
- {0x4008, 0x00},
- {0x4009, 0x05},
- {0x400a, 0x00},
{0x400b, 0x04},
- {0x400c, 0x00},
{0x400d, 0x04},
- {0x400e, 0x14},
- {0x4010, 0xf4},
- {0x4011, 0x03},
- {0x4012, 0x55},
- {0x4015, 0x00},
{0x4016, 0x27},
- {0x4017, 0x00},
- {0x4018, 0x0f},
- {0x401b, 0x08},
- {0x401c, 0x00},
- {0x401d, 0x10},
- {0x401e, 0x02},
- {0x401f, 0x00},
- {0x4050, 0x06},
- {0x4051, 0xff},
- {0x4052, 0xff},
- {0x4053, 0xff},
- {0x4054, 0xff},
- {0x4055, 0xff},
- {0x4056, 0xff},
- {0x4057, 0x7f},
- {0x4058, 0x00},
- {0x4059, 0x00},
- {0x405a, 0x00},
- {0x405b, 0x00},
- {0x405c, 0x07},
- {0x405d, 0xff},
- {0x405e, 0x07},
- {0x405f, 0xff},
- {0x4080, 0x78},
- {0x4081, 0x78},
- {0x4082, 0x78},
- {0x4083, 0x78},
- {0x4019, 0x00},
- {0x401a, 0x40},
- {0x4020, 0x04},
- {0x4021, 0x00},
- {0x4022, 0x04},
- {0x4023, 0x00},
- {0x4024, 0x04},
- {0x4025, 0x00},
- {0x4026, 0x04},
- {0x4027, 0x00},
- {0x4030, 0x00},
- {0x4031, 0x00},
- {0x4032, 0x00},
- {0x4033, 0x00},
- {0x4034, 0x00},
- {0x4035, 0x00},
- {0x4036, 0x00},
- {0x4037, 0x00},
- {0x4040, 0x00},
- {0x4041, 0x80},
- {0x4042, 0x00},
- {0x4043, 0x80},
- {0x4044, 0x00},
- {0x4045, 0x80},
- {0x4046, 0x00},
- {0x4047, 0x80},
- {0x4060, 0x00},
- {0x4061, 0x00},
- {0x4062, 0x00},
- {0x4063, 0x00},
- {0x4064, 0x00},
- {0x4065, 0x00},
- {0x4066, 0x00},
- {0x4067, 0x00},
- {0x4068, 0x00},
- {0x4069, 0x00},
- {0x406a, 0x00},
- {0x406b, 0x00},
- {0x406c, 0x00},
- {0x406d, 0x00},
- {0x406e, 0x00},
- {0x406f, 0x00},
- {0x4070, 0x00},
- {0x4071, 0x00},
- {0x4072, 0x00},
- {0x4073, 0x00},
- {0x4074, 0x00},
- {0x4075, 0x00},
- {0x4076, 0x00},
- {0x4077, 0x00},
- {0x4078, 0x00},
- {0x4079, 0x00},
- {0x407a, 0x00},
- {0x407b, 0x00},
- {0x407c, 0x00},
- {0x407d, 0x00},
- {0x407e, 0x00},
- {0x407f, 0x00},
- {0x40e0, 0x00},
- {0x40e1, 0x00},
- {0x40e2, 0x00},
- {0x40e3, 0x00},
- {0x40e4, 0x00},
- {0x40e5, 0x00},
- {0x40e6, 0x00},
- {0x40e7, 0x00},
- {0x40e8, 0x00},
- {0x40e9, 0x80},
- {0x40ea, 0x00},
- {0x40eb, 0x80},
- {0x40ec, 0x00},
- {0x40ed, 0x80},
- {0x40ee, 0x00},
- {0x40ef, 0x80},
- {0x40f0, 0x02},
- {0x40f1, 0x04},
- {0x4300, 0x00},
- {0x4301, 0x00},
- {0x4302, 0x00},
- {0x4303, 0x00},
- {0x4304, 0x00},
- {0x4305, 0x00},
- {0x4306, 0x00},
- {0x4307, 0x00},
- {0x4308, 0x00},
- {0x4309, 0x00},
- {0x430a, 0x00},
- {0x430b, 0xff},
- {0x430c, 0xff},
- {0x430d, 0x00},
- {0x430e, 0x00},
- {0x4315, 0x00},
- {0x4316, 0x00},
- {0x4317, 0x00},
- {0x4318, 0x00},
- {0x4319, 0x00},
- {0x431a, 0x00},
- {0x431b, 0x00},
- {0x431c, 0x00},
- {0x4500, 0x07},
{0x4501, 0x10},
- {0x4502, 0x00},
- {0x4503, 0x0f},
- {0x4504, 0x80},
- {0x4506, 0x01},
- {0x4509, 0x05},
- {0x450c, 0x00},
- {0x450d, 0x20},
- {0x450e, 0x00},
- {0x450f, 0x00},
- {0x4510, 0x00},
- {0x4523, 0x00},
- {0x4526, 0x00},
{0x4542, 0x00},
- {0x4543, 0x00},
- {0x4544, 0x00},
- {0x4545, 0x00},
- {0x4546, 0x00},
- {0x4547, 0x10},
- {0x4602, 0x00},
- {0x4603, 0x15},
- {0x460b, 0x07},
- {0x4680, 0x11},
- {0x4686, 0x00},
- {0x4687, 0x00},
- {0x4700, 0x00},
- {0x4800, 0x64},
- {0x4806, 0x40},
- {0x480b, 0x10},
- {0x480c, 0x80},
- {0x480f, 0x32},
- {0x4813, 0xe4},
{0x4837, 0x14},
{0x4850, 0x42},
- {0x4884, 0x04},
- {0x4c00, 0xf8},
- {0x4c01, 0x44},
- {0x4c03, 0x00},
- {0x4d00, 0x00},
- {0x4d01, 0x16},
- {0x4d04, 0x10},
- {0x4d05, 0x00},
- {0x4d06, 0x0c},
- {0x4d07, 0x00},
- {0x3d84, 0x04},
- {0x3680, 0xa4},
- {0x3682, 0x80},
- {0x3601, 0x40},
- {0x3602, 0x90},
- {0x3608, 0x0a},
- {0x3938, 0x09},
- {0x3a74, 0x84},
- {0x3a99, 0x84},
- {0x3ab9, 0xa6},
- {0x3aba, 0xba},
- {0x3b12, 0x84},
- {0x3b14, 0xbb},
- {0x3b15, 0xbf},
- {0x3a29, 0x26},
- {0x3a1f, 0x8a},
- {0x3a22, 0x91},
- {0x3a25, 0x96},
- {0x3a28, 0xb4},
- {0x3a2b, 0xba},
- {0x3a2e, 0xbf},
- {0x3a31, 0xc1},
{0x3a20, 0x05},
{0x3939, 0x6b},
{0x3902, 0x10},
@@ -1148,22 +1137,13 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = {
{0x3907, 0x0b},
{0x3908, 0x10},
{0x3909, 0x13},
- {0x360f, 0x99},
{0x390b, 0x11},
{0x390c, 0x21},
{0x390d, 0x32},
{0x390e, 0x76},
- {0x3911, 0x90},
- {0x3913, 0x90},
- {0x3b3f, 0x9d},
- {0x3b45, 0x9d},
- {0x3b1b, 0xc9},
- {0x3b21, 0xc9},
{0x3a1a, 0x1c},
- {0x3a23, 0x15},
{0x3a26, 0x17},
{0x3a2c, 0x50},
- {0x3a2f, 0x18},
{0x3a32, 0x4f},
{0x3ace, 0x01},
{0x3ad2, 0x01},
@@ -1178,31 +1158,13 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = {
{0x3afe, 0x01},
{0x3b02, 0x01},
{0x3b06, 0x01},
- {0x3b0a, 0x01},
- {0x3b0b, 0x00},
- {0x3b0e, 0x01},
- {0x3b0f, 0x00},
- {0x392c, 0x02},
{0x392d, 0x01},
- {0x392e, 0x04},
- {0x392f, 0x03},
{0x3930, 0x09},
- {0x3931, 0x07},
- {0x3932, 0x10},
{0x3933, 0x0d},
- {0x3609, 0x08},
- {0x3921, 0x0f},
- {0x3928, 0x15},
- {0x3929, 0x2a},
{0x392a, 0x52},
{0x392b, 0xa3},
{0x340b, 0x1b},
- {0x3426, 0x10},
- {0x3407, 0x01},
- {0x3404, 0x01},
- {0x3500, 0x00},
{0x3501, 0x08},
- {0x3502, 0x10},
{0x3508, 0x04},
{0x3509, 0x00},
};
@@ -1217,6 +1179,7 @@ static const char * const ov08x40_test_pattern_menu[] = {
/* Configurations for supported link frequencies */
#define OV08X40_LINK_FREQ_400MHZ 400000000ULL
+#define OV08X40_LINK_FREQ_749MHZ 749000000ULL
#define OV08X40_SCLK_96MHZ 96000000ULL
#define OV08X40_XVCLK 19200000
#define OV08X40_DATA_LANES 4
@@ -1236,6 +1199,7 @@ static u64 link_freq_to_pixel_rate(u64 f)
/* Menu items for LINK_FREQ V4L2 control */
static const s64 link_freq_menu_items[] = {
OV08X40_LINK_FREQ_400MHZ,
+ OV08X40_LINK_FREQ_749MHZ,
};
/* Link frequency configs */
@@ -1246,6 +1210,12 @@ static const struct ov08x40_link_freq_config link_freq_configs[] = {
.regs = mipi_data_rate_800mbps,
}
},
+ [OV08X40_LINK_FREQ_749MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_1500mbps),
+ .regs = mipi_data_rate_1500mbps,
+ }
+ },
};
/* Mode configs */
@@ -1266,6 +1236,22 @@ static const struct ov08x40_mode supported_modes[] = {
.exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN,
},
{
+ .width = 3856,
+ .height = 2176,
+ .vts_def = OV08X40_VTS_30FPS,
+ .vts_min = OV08X40_VTS_30FPS,
+ .llp = 0x10aa, /* in normal mode, tline time = 2 * HTS / SCLK */
+ .lanes = 4,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3856x2176_regs_800mbps),
+ .regs = mode_3856x2176_regs_800mbps,
+ },
+ .link_freq_index = OV08X40_LINK_FREQ_400MHZ_INDEX,
+ .exposure_shift = 1,
+ .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN,
+ },
+
+ {
.width = 1928,
.height = 1208,
.vts_def = OV08X40_VTS_BIN_30FPS,
@@ -1280,6 +1266,36 @@ static const struct ov08x40_mode supported_modes[] = {
.exposure_shift = 0,
.exposure_margin = OV08X40_EXPOSURE_BIN_MAX_MARGIN,
},
+ {
+ .width = 3856,
+ .height = 2176,
+ .vts_def = OV08X40_VTS_30FPS,
+ .vts_min = OV08X40_VTS_30FPS,
+ .llp = 0x10aa, /* in normal mode, tline time = 2 * HTS / SCLK */
+ .lanes = 2,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3856x2176_regs_1500mbps),
+ .regs = mode_3856x2176_regs_1500mbps,
+ },
+ .link_freq_index = OV08X40_LINK_FREQ_749MHZ_INDEX,
+ .exposure_shift = 1,
+ .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN,
+ },
+ {
+ .width = 1928,
+ .height = 1088,
+ .vts_def = OV08X40_VTS_BIN_30FPS,
+ .vts_min = OV08X40_VTS_BIN_30FPS,
+ .llp = 0x960,
+ .lanes = 2,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1928x1088_regs_1500mbps),
+ .regs = mode_1928x1088_regs_1500mbps,
+ },
+ .link_freq_index = OV08X40_LINK_FREQ_749MHZ_INDEX,
+ .exposure_shift = 0,
+ .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN,
+ },
};
static const char * const ov08x40_supply_names[] = {
@@ -1310,8 +1326,13 @@ struct ov08x40 {
/* Mutex for serialized access */
struct mutex mutex;
+ /* data lanes */
+ u8 mipi_lanes;
+
/* True if the device has been identified */
bool identified;
+
+ unsigned long link_freq_bitmap;
};
#define to_ov08x40(_sd) container_of(_sd, struct ov08x40, sd)
@@ -1341,7 +1362,7 @@ static int ov08x40_power_on(struct device *dev)
}
gpiod_set_value_cansleep(ov08x->reset_gpio, 0);
- usleep_range(1500, 1800);
+ usleep_range(5000, 5500);
return 0;
@@ -1727,6 +1748,15 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
+static bool filter_by_mipi_lanes(const void *array, size_t index,
+ const void *context)
+{
+ const struct ov08x40_mode *mode = array;
+ const struct ov08x40 *ov08x = context;
+
+ return mode->lanes == ov08x->mipi_lanes;
+}
+
static const struct v4l2_ctrl_ops ov08x40_ctrl_ops = {
.s_ctrl = ov08x40_set_ctrl,
};
@@ -1748,18 +1778,28 @@ static int ov08x40_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= ARRAY_SIZE(supported_modes))
- return -EINVAL;
+ struct ov08x40 *ov08x = to_ov08x40(sd);
+ size_t i, count = 0;
if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
return -EINVAL;
- fse->min_width = supported_modes[fse->index].width;
- fse->max_width = fse->min_width;
- fse->min_height = supported_modes[fse->index].height;
- fse->max_height = fse->min_height;
+ for (i = 0; i < ARRAY_SIZE(supported_modes); i++) {
+ if (!filter_by_mipi_lanes(&supported_modes[i], i, ov08x))
+ continue;
- return 0;
+ if (count == fse->index) {
+ fse->min_width = supported_modes[i].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[i].height;
+ fse->max_height = fse->min_height;
+ return 0;
+ }
+
+ count++;
+ }
+
+ return -EINVAL;
}
static void ov08x40_update_pad_format(const struct ov08x40_mode *mode,
@@ -1822,10 +1862,13 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd,
if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10)
fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
- mode = v4l2_find_nearest_size(supported_modes,
- ARRAY_SIZE(supported_modes),
- width, height,
- fmt->format.width, fmt->format.height);
+ mode = v4l2_find_nearest_size_conditional(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width,
+ fmt->format.height,
+ filter_by_mipi_lanes,
+ ov08x);
ov08x40_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
@@ -1891,6 +1934,14 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
return ret;
}
+ reg_list = &ov08x40_global_setting;
+ ret = ov08x40_write_reg_list(ov08x, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set global setting\n",
+ __func__);
+ return ret;
+ }
+
/* Apply default values of current mode */
reg_list = &ov08x->cur_mode->reg_list;
ret = ov08x40_write_reg_list(ov08x, reg_list);
@@ -2038,7 +2089,6 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x)
s64 pixel_rate_min;
s64 pixel_rate_max;
const struct ov08x40_mode *mode;
- u32 max;
int ret;
ctrl_hdlr = &ov08x->ctrl_handler;
@@ -2048,12 +2098,11 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x)
mutex_init(&ov08x->mutex);
ctrl_hdlr->lock = &ov08x->mutex;
- max = ARRAY_SIZE(link_freq_menu_items) - 1;
ov08x->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
&ov08x40_ctrl_ops,
V4L2_CID_LINK_FREQ,
- max,
- 0,
+ __fls(ov08x->link_freq_bitmap),
+ __ffs(ov08x->link_freq_bitmap),
link_freq_menu_items);
if (ov08x->link_freq)
ov08x->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
@@ -2149,7 +2198,7 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
};
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
- unsigned int i, j;
+ unsigned int i;
int ret;
u32 xvclk_rate;
@@ -2207,7 +2256,12 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
goto out_err;
}
- if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV08X40_DATA_LANES) {
+ switch (bus_cfg.bus.mipi_csi2.num_data_lanes) {
+ case 2:
+ case 4:
+ ov08x->mipi_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+ break;
+ default:
dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
@@ -2219,21 +2273,11 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
ret = -EINVAL;
goto out_err;
}
-
- for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
- for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
- if (link_freq_menu_items[i] ==
- bus_cfg.link_frequencies[j])
- break;
- }
-
- if (j == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequency %lld supported\n",
- link_freq_menu_items[i]);
- ret = -EINVAL;
- goto out_err;
- }
- }
+ ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &ov08x->link_freq_bitmap);
out_err:
v4l2_fwnode_endpoint_free(&bus_cfg);
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index 73c844aa5697..e85c7d33a670 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -34,9 +34,6 @@
#define OV13B10_VTS_120FPS 0x0320
#define OV13B10_VTS_MAX 0x7fff
-/* HBLANK control - read only */
-#define OV13B10_PPL_560MHZ 4704
-
/* Exposure control */
#define OV13B10_REG_EXPOSURE 0x3500
#define OV13B10_EXPOSURE_MIN 4
@@ -95,7 +92,7 @@ struct ov13b10_reg_list {
/* Link frequency config */
struct ov13b10_link_freq_config {
- u32 pixels_per_line;
+ u64 link_freq;
/* registers for this link frequency */
struct ov13b10_reg_list reg_list;
@@ -114,6 +111,10 @@ struct ov13b10_mode {
/* Index of Link frequency config to be used */
u32 link_freq_index;
+
+ /* Pixels per line in current mode */
+ u32 ppl;
+
/* Default register values */
struct ov13b10_reg_list reg_list;
};
@@ -513,6 +514,52 @@ static const struct ov13b10_reg mode_1364x768_120fps_regs[] = {
{0x5001, 0x0d},
};
+static const struct ov13b10_reg mode_2lanes_2104x1560_60fps_regs[] = {
+ {0x3016, 0x32},
+ {0x3106, 0x29},
+ {0x0305, 0xaf},
+ {0x3501, 0x06},
+ {0x3662, 0x88},
+ {0x3714, 0x28},
+ {0x3739, 0x10},
+ {0x37c2, 0x14},
+ {0x37d9, 0x06},
+ {0x37e2, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x08},
+ {0x3809, 0x38},
+ {0x380a, 0x06},
+ {0x380b, 0x18},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x06},
+ {0x380f, 0x3e},
+ {0x3810, 0x00},
+ {0x3811, 0x07},
+ {0x3812, 0x00},
+ {0x3813, 0x05},
+ {0x3814, 0x03},
+ {0x3816, 0x03},
+ {0x3820, 0x8b},
+ {0x3c8c, 0x18},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4050, 0x00},
+ {0x4051, 0x05},
+ {0x4501, 0x08},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xfd},
+ {0x5001, 0x0d},
+};
+
static const char * const ov13b10_test_pattern_menu[] = {
"Disabled",
"Vertical Color Bar Type 1",
@@ -526,15 +573,16 @@ static const char * const ov13b10_test_pattern_menu[] = {
#define OV13B10_LINK_FREQ_INDEX_0 0
#define OV13B10_EXT_CLK 19200000
-#define OV13B10_DATA_LANES 4
+#define OV13B10_4_DATA_LANES 4
+#define OV13B10_2_DATA_LANES 2
/*
- * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
- * data rate => double data rate; number of lanes => 4; bits per pixel => 10
+ * pixel_rate = data_rate * nr_of_lanes / bits_per_pixel
+ * data_rate => link_freq * 2; number of lanes => 4 or 2; bits per pixel => 10
*/
-static u64 link_freq_to_pixel_rate(u64 f)
+static u64 link_freq_to_pixel_rate(u64 f, u8 lanes)
{
- f *= 2 * OV13B10_DATA_LANES;
+ f *= 2 * lanes;
do_div(f, 10);
return f;
@@ -549,7 +597,7 @@ static const s64 link_freq_menu_items[] = {
static const struct ov13b10_link_freq_config
link_freq_configs[] = {
{
- .pixels_per_line = OV13B10_PPL_560MHZ,
+ .link_freq = OV13B10_LINK_FREQ_560MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_1120mbps),
.regs = mipi_data_rate_1120mbps,
@@ -558,12 +606,14 @@ static const struct ov13b10_link_freq_config
};
/* Mode configs */
-static const struct ov13b10_mode supported_modes[] = {
+static const struct ov13b10_mode supported_4_lanes_modes[] = {
+ /* 4 data lanes */
{
.width = 4208,
.height = 3120,
.vts_def = OV13B10_VTS_30FPS,
.vts_min = OV13B10_VTS_30FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_4208x3120_regs),
.regs = mode_4208x3120_regs,
@@ -575,6 +625,7 @@ static const struct ov13b10_mode supported_modes[] = {
.height = 3120,
.vts_def = OV13B10_VTS_30FPS,
.vts_min = OV13B10_VTS_30FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_4160x3120_regs),
.regs = mode_4160x3120_regs,
@@ -586,6 +637,7 @@ static const struct ov13b10_mode supported_modes[] = {
.height = 2340,
.vts_def = OV13B10_VTS_30FPS,
.vts_min = OV13B10_VTS_30FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_4160x2340_regs),
.regs = mode_4160x2340_regs,
@@ -597,6 +649,7 @@ static const struct ov13b10_mode supported_modes[] = {
.height = 1560,
.vts_def = OV13B10_VTS_60FPS,
.vts_min = OV13B10_VTS_60FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_2104x1560_regs),
.regs = mode_2104x1560_regs,
@@ -608,6 +661,7 @@ static const struct ov13b10_mode supported_modes[] = {
.height = 1170,
.vts_def = OV13B10_VTS_60FPS,
.vts_min = OV13B10_VTS_60FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_2080x1170_regs),
.regs = mode_2080x1170_regs,
@@ -620,6 +674,7 @@ static const struct ov13b10_mode supported_modes[] = {
.vts_def = OV13B10_VTS_120FPS,
.vts_min = OV13B10_VTS_120FPS,
.link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ .ppl = 4664,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_1364x768_120fps_regs),
.regs = mode_1364x768_120fps_regs,
@@ -627,6 +682,23 @@ static const struct ov13b10_mode supported_modes[] = {
},
};
+static const struct ov13b10_mode supported_2_lanes_modes[] = {
+ /* 2 data lanes */
+ {
+ .width = 2104,
+ .height = 1560,
+ .vts_def = OV13B10_VTS_60FPS,
+ .vts_min = OV13B10_VTS_60FPS,
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ .ppl = 2352,
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(mode_2lanes_2104x1560_60fps_regs),
+ .regs = mode_2lanes_2104x1560_60fps_regs,
+ },
+ },
+};
+
struct ov13b10 {
struct v4l2_subdev sd;
struct media_pad pad;
@@ -644,12 +716,20 @@ struct ov13b10 {
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
+ /* Supported modes */
+ const struct ov13b10_mode *supported_modes;
+
/* Current mode */
const struct ov13b10_mode *cur_mode;
/* Mutex for serialized access */
struct mutex mutex;
+ u8 supported_modes_num;
+
+ /* Data lanes used */
+ u8 data_lanes;
+
/* True if the device has been identified */
bool identified;
};
@@ -753,8 +833,8 @@ static int ov13b10_write_reg_list(struct ov13b10 *ov13b,
/* Open sub-device */
static int ov13b10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- const struct ov13b10_mode *default_mode = &supported_modes[0];
struct ov13b10 *ov13b = to_ov13b10(sd);
+ const struct ov13b10_mode *default_mode = ov13b->supported_modes;
struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_state_get_format(fh->state,
0);
@@ -973,7 +1053,10 @@ static int ov13b10_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= ARRAY_SIZE(supported_modes))
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ const struct ov13b10_mode *supported_modes = ov13b->supported_modes;
+
+ if (fse->index >= ov13b->supported_modes_num)
return -EINVAL;
if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
@@ -1033,6 +1116,7 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
{
struct ov13b10 *ov13b = to_ov13b10(sd);
const struct ov13b10_mode *mode;
+ const struct ov13b10_mode *supported_modes = ov13b->supported_modes;
struct v4l2_mbus_framefmt *framefmt;
s32 vblank_def;
s32 vblank_min;
@@ -1047,7 +1131,7 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
mode = v4l2_find_nearest_size(supported_modes,
- ARRAY_SIZE(supported_modes),
+ ov13b->supported_modes_num,
width, height,
fmt->format.width, fmt->format.height);
ov13b10_update_pad_format(mode, fmt);
@@ -1058,23 +1142,18 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
ov13b->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov13b->link_freq, mode->link_freq_index);
link_freq = link_freq_menu_items[mode->link_freq_index];
- pixel_rate = link_freq_to_pixel_rate(link_freq);
+ pixel_rate = link_freq_to_pixel_rate(link_freq,
+ ov13b->data_lanes);
__v4l2_ctrl_s_ctrl_int64(ov13b->pixel_rate, pixel_rate);
/* Update limits and set FPS to default */
- vblank_def = ov13b->cur_mode->vts_def -
- ov13b->cur_mode->height;
- vblank_min = ov13b->cur_mode->vts_min -
- ov13b->cur_mode->height;
+ vblank_def = mode->vts_def - mode->height;
+ vblank_min = mode->vts_min - mode->height;
__v4l2_ctrl_modify_range(ov13b->vblank, vblank_min,
- OV13B10_VTS_MAX
- - ov13b->cur_mode->height,
- 1,
- vblank_def);
+ OV13B10_VTS_MAX - mode->height,
+ 1, vblank_def);
__v4l2_ctrl_s_ctrl(ov13b->vblank, vblank_def);
- h_blank =
- link_freq_configs[mode->link_freq_index].pixels_per_line
- - ov13b->cur_mode->width;
+ h_blank = mode->ppl - mode->width;
__v4l2_ctrl_modify_range(ov13b->hblank, h_blank,
h_blank, 1, h_blank);
}
@@ -1311,7 +1390,8 @@ static int ov13b10_init_controls(struct ov13b10 *ov13b)
if (ov13b->link_freq)
ov13b->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0],
+ ov13b->data_lanes);
pixel_rate_min = 0;
/* By default, PIXEL_RATE is read only */
ov13b->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
@@ -1328,8 +1408,7 @@ static int ov13b10_init_controls(struct ov13b10 *ov13b)
OV13B10_VTS_MAX - mode->height, 1,
vblank_def);
- hblank = link_freq_configs[mode->link_freq_index].pixels_per_line -
- mode->width;
+ hblank = mode->ppl - mode->width;
ov13b->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
V4L2_CID_HBLANK,
hblank, hblank, 1, hblank);
@@ -1423,7 +1502,7 @@ static int ov13b10_get_pm_resources(struct device *dev)
return 0;
}
-static int ov13b10_check_hwcfg(struct device *dev)
+static int ov13b10_check_hwcfg(struct device *dev, struct ov13b10 *ov13b)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
@@ -1433,6 +1512,7 @@ static int ov13b10_check_hwcfg(struct device *dev)
unsigned int i, j;
int ret;
u32 ext_clk;
+ u8 dlane;
if (!fwnode)
return -ENXIO;
@@ -1459,13 +1539,32 @@ static int ov13b10_check_hwcfg(struct device *dev)
if (ret)
return ret;
- if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV13B10_DATA_LANES) {
+ dlane = bus_cfg.bus.mipi_csi2.num_data_lanes;
+ switch (dlane) {
+ case OV13B10_4_DATA_LANES:
+ ov13b->supported_modes = supported_4_lanes_modes;
+ ov13b->supported_modes_num =
+ ARRAY_SIZE(supported_4_lanes_modes);
+ break;
+
+ case OV13B10_2_DATA_LANES:
+ ov13b->supported_modes = supported_2_lanes_modes;
+ ov13b->supported_modes_num =
+ ARRAY_SIZE(supported_2_lanes_modes);
+ break;
+
+ default:
dev_err(dev, "number of CSI2 data lanes %d is not supported",
- bus_cfg.bus.mipi_csi2.num_data_lanes);
+ dlane);
ret = -EINVAL;
goto out_err;
}
+ ov13b->data_lanes = dlane;
+ ov13b->cur_mode = ov13b->supported_modes;
+ dev_dbg(dev, "%u lanes with %u modes selected\n",
+ ov13b->data_lanes, ov13b->supported_modes_num);
+
if (!bus_cfg.nr_of_link_frequencies) {
dev_err(dev, "no link frequencies defined");
ret = -EINVAL;
@@ -1499,17 +1598,17 @@ static int ov13b10_probe(struct i2c_client *client)
bool full_power;
int ret;
+ ov13b = devm_kzalloc(&client->dev, sizeof(*ov13b), GFP_KERNEL);
+ if (!ov13b)
+ return -ENOMEM;
+
/* Check HW config */
- ret = ov13b10_check_hwcfg(&client->dev);
+ ret = ov13b10_check_hwcfg(&client->dev, ov13b);
if (ret) {
dev_err(&client->dev, "failed to check hwcfg: %d", ret);
return ret;
}
- ov13b = devm_kzalloc(&client->dev, sizeof(*ov13b), GFP_KERNEL);
- if (!ov13b)
- return -ENOMEM;
-
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
@@ -1533,9 +1632,6 @@ static int ov13b10_probe(struct i2c_client *client)
}
}
- /* Set default mode to max resolution */
- ov13b->cur_mode = &supported_modes[0];
-
ret = ov13b10_init_controls(ov13b);
if (ret)
goto error_power_off;
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 80d151e8ae29..6cf461e3373c 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1456,12 +1456,12 @@ static int ov2740_probe(struct i2c_client *client)
return 0;
probe_error_v4l2_subdev_cleanup:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
v4l2_subdev_cleanup(&ov2740->sd);
probe_error_media_entity_cleanup:
media_entity_cleanup(&ov2740->sd.entity);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler);
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index c1081deffc2f..e7aec281e9a4 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1295,11 +1295,8 @@ static int ov5675_probe(struct i2c_client *client)
return -ENOMEM;
ret = ov5675_get_hwcfg(ov5675, &client->dev);
- if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
- ret);
+ if (ret)
return ret;
- }
v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops);
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index e6704d018248..4b6874d2a104 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -2276,8 +2276,8 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
if (!is_acpi_node(fwnode)) {
ov8856->xvclk = devm_clk_get(dev, "xvclk");
if (IS_ERR(ov8856->xvclk)) {
- dev_err(dev, "could not get xvclk clock (%pe)\n",
- ov8856->xvclk);
+ dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
+ "could not get xvclk clock\n");
return PTR_ERR(ov8856->xvclk);
}
@@ -2382,11 +2382,8 @@ static int ov8856_probe(struct i2c_client *client)
return -ENOMEM;
ret = ov8856_get_hwcfg(ov8856, &client->dev);
- if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
- ret);
+ if (ret)
return ret;
- }
v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index b8bd8354d100..52e8e2620b4d 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -16,10 +16,10 @@
*/
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -575,10 +575,9 @@ static int rdacm20_probe(struct i2c_client *client)
dev->dev = &client->dev;
dev->serializer.client = client;
- ret = of_property_read_u32_array(client->dev.of_node, "reg",
- dev->addrs, 2);
+ ret = device_property_read_u32_array(dev->dev, "reg", dev->addrs, 2);
if (ret < 0) {
- dev_err(dev->dev, "Invalid DT reg property: %d\n", ret);
+ dev_err(dev->dev, "Invalid FW reg property: %d\n", ret);
return -EINVAL;
}
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
index 3e22df36354f..bcab462708c7 100644
--- a/drivers/media/i2c/rdacm21.c
+++ b/drivers/media/i2c/rdacm21.c
@@ -11,10 +11,10 @@
*/
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -551,10 +551,9 @@ static int rdacm21_probe(struct i2c_client *client)
dev->dev = &client->dev;
dev->serializer.client = client;
- ret = of_property_read_u32_array(client->dev.of_node, "reg",
- dev->addrs, 2);
+ ret = device_property_read_u32_array(dev->dev, "reg", dev->addrs, 2);
if (ret < 0) {
- dev_err(dev->dev, "Invalid DT reg property: %d\n", ret);
+ dev_err(dev->dev, "Invalid FW reg property: %d\n", ret);
return -EINVAL;
}
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 2d5f42f11158..dcef93e1a3bc 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -313,6 +313,10 @@ static int tc358743_get_detected_timings(struct v4l2_subdev *sd,
memset(timings, 0, sizeof(struct v4l2_dv_timings));
+ /* if HPD is low, ignore any video */
+ if (!(i2c_rd8(sd, HPD_CTL) & MASK_HPD_OUT0))
+ return -ENOLINK;
+
if (no_signal(sd)) {
v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
return -ENOLINK;
diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c
new file mode 100644
index 000000000000..25e2fc88a036
--- /dev/null
+++ b/drivers/media/i2c/vd55g1.c
@@ -0,0 +1,1965 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for VD55G1 global shutter sensor family driver
+ *
+ * Copyright (C) 2025 STMicroelectronics SA
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+/* Register Map */
+#define VD55G1_REG_MODEL_ID CCI_REG32_LE(0x0000)
+#define VD55G1_MODEL_ID 0x53354731
+#define VD55G1_REG_REVISION CCI_REG16_LE(0x0004)
+#define VD55G1_REVISION_CCB 0x2020
+#define VD55G1_REG_FWPATCH_REVISION CCI_REG16_LE(0x0012)
+#define VD55G1_REG_FWPATCH_START_ADDR CCI_REG8(0x2000)
+#define VD55G1_REG_SYSTEM_FSM CCI_REG8(0x001c)
+#define VD55G1_SYSTEM_FSM_READY_TO_BOOT 0x01
+#define VD55G1_SYSTEM_FSM_SW_STBY 0x02
+#define VD55G1_SYSTEM_FSM_STREAMING 0x03
+#define VD55G1_REG_BOOT CCI_REG8(0x0200)
+#define VD55G1_BOOT_PATCH_SETUP 2
+#define VD55G1_REG_STBY CCI_REG8(0x0201)
+#define VD55G1_STBY_START_STREAM 1
+#define VD55G1_REG_STREAMING CCI_REG8(0x0202)
+#define VD55G1_STREAMING_STOP_STREAM 1
+#define VD55G1_REG_EXT_CLOCK CCI_REG32_LE(0x0220)
+#define VD55G1_REG_LINE_LENGTH CCI_REG16_LE(0x0300)
+#define VD55G1_REG_ORIENTATION CCI_REG8(0x0302)
+#define VD55G1_REG_FORMAT_CTRL CCI_REG8(0x030a)
+#define VD55G1_REG_OIF_CTRL CCI_REG16_LE(0x030c)
+#define VD55G1_REG_ISL_ENABLE CCI_REG16_LE(0x326)
+#define VD55G1_REG_OIF_IMG_CTRL CCI_REG8(0x030f)
+#define VD55G1_REG_MIPI_DATA_RATE CCI_REG32_LE(0x0224)
+#define VD55G1_REG_PATGEN_CTRL CCI_REG16_LE(0x0304)
+#define VD55G1_PATGEN_TYPE_SHIFT 4
+#define VD55G1_PATGEN_ENABLE BIT(0)
+#define VD55G1_REG_MANUAL_ANALOG_GAIN CCI_REG8(0x0501)
+#define VD55G1_REG_MANUAL_COARSE_EXPOSURE CCI_REG16_LE(0x0502)
+#define VD55G1_REG_MANUAL_DIGITAL_GAIN CCI_REG16_LE(0x0504)
+#define VD55G1_REG_APPLIED_COARSE_EXPOSURE CCI_REG16_LE(0x00e8)
+#define VD55G1_REG_APPLIED_ANALOG_GAIN CCI_REG16_LE(0x00ea)
+#define VD55G1_REG_APPLIED_DIGITAL_GAIN CCI_REG16_LE(0x00ec)
+#define VD55G1_REG_AE_FORCE_COLDSTART CCI_REG8(0x0308)
+#define VD55G1_REG_AE_COLDSTART_EXP_TIME CCI_REG32_LE(0x0374)
+#define VD55G1_REG_READOUT_CTRL CCI_REG8(0x052e)
+#define VD55G1_READOUT_CTRL_BIN_MODE_NORMAL 0
+#define VD55G1_READOUT_CTRL_BIN_MODE_DIGITAL_X2 1
+#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ea)
+#define VD55G1_DUSTER_ENABLE BIT(0)
+#define VD55G1_DUSTER_DISABLE 0
+#define VD55G1_DUSTER_DYN_ENABLE BIT(1)
+#define VD55G1_DUSTER_RING_ENABLE BIT(4)
+#define VD55G1_REG_AE_TARGET_PERCENTAGE CCI_REG8(0x0486)
+#define VD55G1_REG_NEXT_CTX CCI_REG16_LE(0x03e4)
+#define VD55G1_REG_EXPOSURE_USE_CASES CCI_REG8(0x0312)
+#define VD55G1_EXPOSURE_USE_CASES_MULTI_CONTEXT BIT(2)
+#define VD55G1_REG_EXPOSURE_MAX_COARSE CCI_REG16_LE(0x0372)
+#define VD55G1_EXPOSURE_MAX_COARSE_DEF 0x7fff
+#define VD55G1_EXPOSURE_MAX_COARSE_SUB 446
+#define VD55G1_REG_CTX_REPEAT_COUNT_CTX0 CCI_REG16_LE(0x03dc)
+#define VD55G1_REG_CTX_REPEAT_COUNT_CTX1 CCI_REG16_LE(0x03de)
+
+#define VD55G1_REG_EXP_MODE(ctx) \
+ CCI_REG8(0x0500 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_FRAME_LENGTH(ctx) \
+ CCI_REG32_LE(0x050c + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_X_START(ctx) \
+ CCI_REG16_LE(0x0514 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_X_WIDTH(ctx) \
+ CCI_REG16_LE(0x0516 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_Y_START(ctx) \
+ CCI_REG16_LE(0x0510 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_Y_HEIGHT(ctx) \
+ CCI_REG16_LE(0x0512 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_GPIO_0_CTRL(ctx) \
+ CCI_REG8(0x051d + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_GPIO_MODE_FSYNC_OUT 0x00
+#define VD55G1_GPIO_MODE_IN 0x01
+#define VD55G1_GPIO_MODE_STROBE 0x02
+#define VD55G1_REG_VT_MODE(ctx) \
+ CCI_REG8(0x0536 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_VT_MODE_NORMAL 0
+#define VD55G1_VT_MODE_SUBTRACTION 1
+#define VD55G1_REG_MASK_FRAME_CTRL(ctx) \
+ CCI_REG8(0x0537 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_MASK_FRAME_CTRL_OUTPUT 0
+#define VD55G1_MASK_FRAME_CTRL_MASK 1
+#define VD55G1_REG_EXPOSURE_INSTANCE(ctx) \
+ CCI_REG32_LE(0x52D + VD55G1_CTX_OFFSET * (ctx))
+
+#define VD55G1_WIDTH 804
+#define VD55G1_HEIGHT 704
+#define VD55G1_DEFAULT_MODE 0
+#define VD55G1_NB_GPIOS 4
+#define VD55G1_MEDIA_BUS_FMT_DEF MEDIA_BUS_FMT_Y8_1X8
+#define VD55G1_DGAIN_DEF 256
+#define VD55G1_AGAIN_DEF 19
+#define VD55G1_EXPO_MAX_TERM 64
+#define VD55G1_EXPO_DEF 500
+#define VD55G1_LINE_LENGTH_MIN 1128
+#define VD55G1_LINE_LENGTH_SUB_MIN 1344
+#define VD55G1_VBLANK_MIN 86
+#define VD55G1_VBLANK_MAX 0xffff
+#define VD55G1_FRAME_LENGTH_DEF 1860 /* 60 fps */
+#define VD55G1_MIPI_MARGIN 900
+#define VD55G1_CTX_OFFSET 0x50
+#define VD55G1_FWPATCH_REVISION_MAJOR 2
+#define VD55G1_FWPATCH_REVISION_MINOR 9
+#define VD55G1_XCLK_FREQ_MIN (6 * HZ_PER_MHZ)
+#define VD55G1_XCLK_FREQ_MAX (27 * HZ_PER_MHZ)
+#define VD55G1_MIPI_RATE_MIN (250 * HZ_PER_MHZ)
+#define VD55G1_MIPI_RATE_MAX (1200 * HZ_PER_MHZ)
+
+static const u8 patch_array[] = {
+ 0x44, 0x03, 0x09, 0x02, 0xe6, 0x01, 0x42, 0x00, 0xea, 0x01, 0x42, 0x00,
+ 0xf0, 0x01, 0x42, 0x00, 0xe6, 0x01, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0xfa, 0x68, 0x40, 0x00, 0xe8,
+ 0x09, 0xbe, 0x4c, 0x08, 0x00, 0xf2, 0x93, 0xdd, 0x1c, 0x00, 0xc0, 0xe2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x00, 0xfa, 0x6b, 0x80, 0x98, 0x7f,
+ 0xfc, 0xef, 0x11, 0xc1, 0x0f, 0x82, 0x69, 0xbe, 0x0f, 0xac, 0x58, 0x40,
+ 0x00, 0xe8, 0x0c, 0x0c, 0x00, 0xf2, 0x93, 0xdd, 0x1c, 0x00, 0x40, 0xe3,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x84, 0xfa, 0x46, 0x0e, 0xe8, 0xe0,
+ 0x08, 0xde, 0x4a, 0x40, 0x84, 0xe0, 0xa5, 0x86, 0xa8, 0x7d, 0xfc, 0xef,
+ 0x6b, 0x80, 0x01, 0xbf, 0x28, 0x77, 0x0c, 0xef, 0x0b, 0x0e, 0x21, 0x78,
+ 0x06, 0xc0, 0x0b, 0xa5, 0xb5, 0x84, 0x06, 0x42, 0x98, 0xe1, 0x01, 0x81,
+ 0x01, 0x42, 0x38, 0xe0, 0x0c, 0xc4, 0x0e, 0x84, 0x46, 0x02, 0x84, 0xe0,
+ 0x0c, 0x84, 0x11, 0x81, 0x21, 0x81, 0x31, 0x81, 0x41, 0x81, 0x51, 0x81,
+ 0xc1, 0x81, 0x05, 0x83, 0x0c, 0x0c, 0x84, 0xf2, 0x93, 0xdd, 0x06, 0x40,
+ 0x98, 0xe1, 0xc8, 0x80, 0x58, 0x82, 0x48, 0xc0, 0x38, 0xc2, 0x29, 0x00,
+ 0x10, 0xe0, 0x19, 0x00, 0x14, 0xe0, 0x09, 0x00, 0x38, 0xe0, 0x5f, 0xb8,
+ 0x5f, 0xa8, 0x5f, 0xa6, 0x5f, 0xa4, 0x5f, 0xa2, 0x5f, 0xa0, 0x56, 0x41,
+ 0x98, 0xe1, 0x18, 0x82, 0x28, 0x80, 0x38, 0xc0, 0x5f, 0xa2, 0x19, 0x00,
+ 0x20, 0xf8, 0x5f, 0xa4, 0x28, 0xc2, 0x5f, 0xa6, 0x39, 0x00, 0x10, 0xe0,
+ 0x5f, 0xa2, 0x19, 0x00, 0x14, 0xe0, 0x5f, 0xa4, 0x29, 0x00, 0x18, 0xe0,
+ 0x5f, 0xa6, 0x39, 0x00, 0x40, 0xe0, 0x5f, 0xa2, 0x19, 0x00, 0x44, 0xe0,
+ 0x5f, 0xa4, 0x29, 0x00, 0x1c, 0xe0, 0x5f, 0xa6, 0x39, 0x00, 0x38, 0xe0,
+ 0x5f, 0xa2, 0x19, 0x00, 0x20, 0xe0, 0x5f, 0xa4, 0x29, 0x00, 0x24, 0xe0,
+ 0x5f, 0xa6, 0x39, 0x00, 0x28, 0xe0, 0x5f, 0xa2, 0x19, 0x00, 0x2c, 0xe0,
+ 0x5f, 0xa4, 0x29, 0x00, 0x30, 0xe0, 0x5f, 0xa6, 0x09, 0x00, 0x34, 0xe0,
+ 0x5f, 0xa2, 0x5f, 0xa4, 0x5f, 0xa0, 0x4a, 0x0a, 0xfc, 0xfb, 0xe5, 0x82,
+ 0x08, 0xde, 0x4a, 0x40, 0x88, 0xe0, 0xf6, 0x40, 0x00, 0xe0, 0x01, 0x4e,
+ 0x99, 0x78, 0x0a, 0xc0, 0x85, 0x80, 0x98, 0x40, 0x00, 0xe8, 0x35, 0x81,
+ 0xa8, 0x40, 0x00, 0xe8, 0x0b, 0x8c, 0x0c, 0x0c, 0x84, 0xf2, 0xd5, 0xed,
+ 0x83, 0xc1, 0x13, 0xc5, 0x93, 0xdd, 0xc3, 0xc1, 0x83, 0xc1, 0x13, 0xc3,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x4c, 0x04, 0x04, 0xfa, 0xc6, 0x0f, 0x94, 0xe0,
+ 0x19, 0x0e, 0xc9, 0x65, 0x01, 0xc0, 0x28, 0xde, 0x0a, 0x42, 0x80, 0xe0,
+ 0x24, 0x02, 0x00, 0xfc, 0x16, 0xde, 0xa5, 0x8a, 0x19, 0x00, 0xb8, 0xe0,
+ 0x10, 0x02, 0x0c, 0xec, 0x1d, 0xe6, 0x14, 0x02, 0x88, 0x80, 0x4e, 0x04,
+ 0x01, 0x00, 0x10, 0x80, 0x25, 0x02, 0x08, 0x9c, 0x86, 0x02, 0x00, 0x80,
+ 0x08, 0x44, 0x00, 0x98, 0x55, 0x81, 0x11, 0x85, 0x45, 0x81, 0x11, 0x89,
+ 0x25, 0x81, 0x11, 0x83, 0x2b, 0x00, 0x24, 0xe0, 0x64, 0xc2, 0x0b, 0x84,
+ 0x08, 0x51, 0x00, 0xef, 0x2b, 0x80, 0x01, 0x83, 0x1b, 0x8c, 0x38, 0x7d,
+ 0x5c, 0xef, 0x18, 0xde, 0x0b, 0xa1, 0x25, 0x82, 0x0b, 0x0e, 0x88, 0xf9,
+ 0x0a, 0x00, 0x00, 0xe8, 0x10, 0x42, 0x04, 0x9c, 0x11, 0x4e, 0x0c, 0x80,
+ 0x10, 0x40, 0x04, 0xf0, 0x4e, 0x05, 0x01, 0x60, 0x10, 0xc0, 0x06, 0x88,
+ 0x10, 0x40, 0xf8, 0xf3, 0x06, 0xde, 0x4c, 0x0c, 0x04, 0xf2, 0x93, 0xdd,
+ 0x0c, 0x04, 0x1c, 0xfe, 0xf6, 0x0f, 0x94, 0xe0, 0x38, 0x9c, 0x46, 0x51,
+ 0xfc, 0xe0, 0x46, 0x49, 0x38, 0xe2, 0x30, 0x46, 0xf8, 0xf3, 0x36, 0x9c,
+ 0xc6, 0x46, 0x0c, 0xe1, 0x34, 0x8c, 0x94, 0xa0, 0x4e, 0xa0, 0x39, 0x06,
+ 0x80, 0xe0, 0x4a, 0x46, 0x94, 0xe0, 0x05, 0x8c, 0x6a, 0x40, 0x80, 0xe0,
+ 0x2c, 0x0c, 0x00, 0xe2, 0x0b, 0x8c, 0xb8, 0x7c, 0x5c, 0xef, 0x0b, 0x8c,
+ 0x9e, 0xa0, 0xf8, 0x40, 0x60, 0xef, 0x0b, 0xa1, 0x5a, 0x40, 0x80, 0xe0,
+ 0x65, 0x88, 0x28, 0x02, 0x01, 0x40, 0x00, 0x80, 0x2a, 0x42, 0x9c, 0xe1,
+ 0x28, 0x49, 0x60, 0xef, 0x96, 0x4d, 0x9c, 0xe1, 0x01, 0x81, 0x06, 0x98,
+ 0xd5, 0x81, 0x09, 0x0e, 0xa1, 0x64, 0x01, 0xc0, 0x4a, 0x40, 0x88, 0xe0,
+ 0x85, 0x80, 0xb8, 0x77, 0xfc, 0xef, 0x35, 0x81, 0xc8, 0x77, 0xfc, 0xef,
+ 0x08, 0x98, 0x4a, 0x00, 0xfc, 0xfb, 0x55, 0xfc, 0xe8, 0x4a, 0x60, 0xef,
+ 0x1a, 0x44, 0x9c, 0xe1, 0x35, 0x81, 0x1a, 0x4e, 0x9c, 0xe9, 0x1c, 0x00,
+ 0x00, 0xe2, 0x0c, 0x0c, 0x1c, 0xf6, 0x93, 0xdd, 0x0d, 0xc3, 0x1a, 0x41,
+ 0x08, 0xe4, 0x0a, 0x40, 0x84, 0xe1, 0x0c, 0x00, 0x00, 0xe2, 0x93, 0xdd,
+ 0x4c, 0x04, 0x1c, 0xfa, 0x86, 0x52, 0xec, 0xe1, 0x08, 0xa6, 0x65, 0x12,
+ 0x24, 0xf8, 0x0e, 0x02, 0x99, 0x7a, 0x00, 0xc0, 0x00, 0x40, 0xa0, 0xf3,
+ 0x06, 0xa6, 0x0b, 0x8c, 0x08, 0x49, 0x00, 0xef, 0x85, 0x12, 0x28, 0xf8,
+ 0x02, 0x02, 0xfc, 0xed, 0xf6, 0x47, 0xfd, 0x6f, 0xe0, 0xff, 0x04, 0xe2,
+ 0x14, 0x04, 0xc0, 0xe0, 0x0f, 0x86, 0x2f, 0xa0, 0x0b, 0x8c, 0x2e, 0xe2,
+ 0x08, 0x48, 0x00, 0xef, 0x86, 0x02, 0x84, 0xfe, 0x0e, 0x05, 0x09, 0x7d,
+ 0x00, 0xc0, 0x05, 0x52, 0x08, 0xf8, 0x18, 0x7d, 0xfc, 0xef, 0x4a, 0x40,
+ 0x80, 0xe0, 0x09, 0x12, 0x04, 0xc0, 0x65, 0x12, 0x20, 0xf8, 0x00, 0x40,
+ 0x40, 0xdc, 0x01, 0x52, 0x04, 0xc0, 0x0e, 0x00, 0x41, 0x78, 0xf5, 0xc5,
+ 0x6d, 0xc0, 0xb5, 0x82, 0x05, 0x10, 0x10, 0xe0, 0x11, 0xf1, 0x0f, 0x82,
+ 0x05, 0x50, 0x10, 0xe0, 0x05, 0x10, 0x10, 0xe0, 0xfe, 0x02, 0xf0, 0xff,
+ 0x0f, 0x82, 0x85, 0x83, 0x15, 0x10, 0x10, 0xe0, 0x16, 0x00, 0x91, 0x6e,
+ 0x69, 0xcd, 0x21, 0xf1, 0x6d, 0xc1, 0x01, 0x83, 0x2f, 0x82, 0x26, 0x00,
+ 0x00, 0x80, 0x2f, 0xa0, 0x25, 0x50, 0x10, 0xe0, 0x05, 0x10, 0x10, 0xe0,
+ 0x11, 0xa1, 0xfe, 0x04, 0xf0, 0xff, 0x06, 0x42, 0x00, 0x80, 0x0f, 0x84,
+ 0x0f, 0xa2, 0x05, 0x50, 0x10, 0xe0, 0x16, 0x00, 0x91, 0x6e, 0x69, 0xcd,
+ 0x6d, 0xc1, 0x71, 0x8d, 0x16, 0x00, 0x79, 0x61, 0x2d, 0xcb, 0x86, 0x0e,
+ 0x00, 0x80, 0x6d, 0xc1, 0x56, 0x0e, 0x00, 0xc0, 0x0b, 0x8c, 0x1b, 0x8e,
+ 0x71, 0x52, 0x0c, 0xf8, 0x08, 0x43, 0x00, 0xef, 0x05, 0x52, 0x14, 0xf8,
+ 0x15, 0x10, 0x28, 0xe0, 0x70, 0x04, 0x04, 0xec, 0x31, 0xe1, 0x29, 0x9e,
+ 0x1f, 0x86, 0x1f, 0xa4, 0x15, 0x50, 0x28, 0xe0, 0x86, 0x42, 0x3c, 0xe0,
+ 0x0e, 0x04, 0x9d, 0x64, 0x9b, 0xc2, 0x05, 0x52, 0x1c, 0xf8, 0x78, 0xa6,
+ 0x48, 0x77, 0xfc, 0xef, 0x4a, 0x40, 0x80, 0xe0, 0x70, 0x4e, 0x10, 0xdc,
+ 0x1e, 0x00, 0x81, 0x70, 0xeb, 0xcb, 0x70, 0x4e, 0xec, 0x93, 0x6d, 0xc1,
+ 0x11, 0x85, 0x36, 0x02, 0x00, 0x80, 0x76, 0xa6, 0x11, 0x52, 0x10, 0xf8,
+ 0x05, 0x10, 0x40, 0xe0, 0xfe, 0x47, 0x0c, 0xff, 0x14, 0x04, 0xa0, 0xe0,
+ 0x0f, 0x86, 0x0f, 0xa4, 0x05, 0x50, 0x40, 0xe0, 0x05, 0x10, 0x28, 0xe0,
+ 0xfe, 0x47, 0xfd, 0x7f, 0xe3, 0xff, 0x14, 0x04, 0xd0, 0xe0, 0x0f, 0x86,
+ 0x2f, 0xa0, 0x20, 0x00, 0x01, 0x6c, 0x00, 0xd0, 0x05, 0x50, 0x28, 0xe0,
+ 0x0b, 0x8c, 0xf8, 0x7e, 0xfc, 0xee, 0x0e, 0x03, 0x59, 0x78, 0xf5, 0xc5,
+ 0x0d, 0xc2, 0x05, 0x52, 0x0c, 0xf8, 0x08, 0xa6, 0x46, 0x42, 0xb4, 0xe0,
+ 0x18, 0x84, 0x00, 0x40, 0xf4, 0x93, 0x00, 0x40, 0x08, 0xdc, 0x1b, 0xa1,
+ 0x06, 0xa6, 0x05, 0x10, 0x40, 0x80, 0x04, 0x00, 0x50, 0x9c, 0x65, 0x8a,
+ 0x05, 0x10, 0x44, 0xe0, 0xf6, 0x43, 0xfd, 0x6f, 0x00, 0xf8, 0x0f, 0x82,
+ 0x06, 0x02, 0x01, 0x60, 0x1e, 0xc0, 0x0f, 0xa2, 0x05, 0x50, 0x44, 0xe0,
+ 0x05, 0x10, 0x44, 0xe0, 0x0e, 0x02, 0x00, 0xf8, 0x0f, 0x82, 0x09, 0xf6,
+ 0x05, 0x50, 0x44, 0xe0, 0x05, 0x10, 0x40, 0xe0, 0x04, 0x00, 0x54, 0xfc,
+ 0x05, 0x50, 0x40, 0xe0, 0x05, 0x10, 0x40, 0xe0, 0x04, 0x00, 0xcc, 0xfc,
+ 0x05, 0x50, 0x40, 0xe0, 0x05, 0x10, 0x40, 0xe0, 0x04, 0x00, 0x4c, 0xfc,
+ 0x05, 0x50, 0x40, 0xe0, 0x05, 0x10, 0x40, 0xe0, 0x04, 0x00, 0xd0, 0xfc,
+ 0x05, 0x50, 0x40, 0xe0, 0x4c, 0x0c, 0x1c, 0xf2, 0x93, 0xdd, 0xc3, 0xc1,
+ 0xc6, 0x40, 0xfc, 0xe0, 0x04, 0x80, 0xc6, 0x44, 0x0c, 0xe1, 0x15, 0x04,
+ 0x0c, 0xf8, 0x0a, 0x80, 0x06, 0x07, 0x04, 0xe0, 0x03, 0x42, 0x48, 0xe1,
+ 0x46, 0x02, 0x40, 0xe2, 0x08, 0xc6, 0x44, 0x88, 0x06, 0x46, 0x0e, 0xe0,
+ 0x86, 0x01, 0x84, 0xe0, 0x33, 0x80, 0x39, 0x06, 0xd8, 0xef, 0x0a, 0x46,
+ 0x80, 0xe0, 0x31, 0xbf, 0x06, 0x06, 0x00, 0xc0, 0x31, 0x48, 0x60, 0xe0,
+ 0x34, 0x88, 0x49, 0x06, 0x40, 0xe1, 0x40, 0x48, 0x7c, 0xf3, 0x41, 0x46,
+ 0x40, 0xe1, 0x24, 0x8a, 0x39, 0x04, 0x10, 0xe0, 0x39, 0xc2, 0x31, 0x44,
+ 0x10, 0xe0, 0x14, 0xc4, 0x1b, 0xa5, 0x11, 0x83, 0x11, 0x40, 0x25, 0x6a,
+ 0x01, 0xc0, 0x08, 0x5c, 0x00, 0xda, 0x15, 0x00, 0xcc, 0xe0, 0x25, 0x00,
+ 0xf8, 0xe0, 0x1b, 0x85, 0x08, 0x5c, 0x00, 0x9a, 0x4e, 0x03, 0x01, 0x60,
+ 0x10, 0xc0, 0x29, 0x00, 0x1c, 0xe4, 0x18, 0x84, 0x20, 0x44, 0xf8, 0xf3,
+ 0x2f, 0xa2, 0x21, 0x40, 0x1c, 0xe4, 0x93, 0xdd, 0x0c, 0x00, 0x80, 0xfa,
+ 0x15, 0x00, 0x3c, 0xe0, 0x21, 0x81, 0x31, 0x85, 0x21, 0x42, 0x60, 0xe0,
+ 0x15, 0x00, 0x44, 0xe0, 0x31, 0x42, 0x40, 0xe1, 0x15, 0x00, 0x34, 0xe0,
+ 0x21, 0x42, 0x20, 0xe0, 0x15, 0x00, 0x34, 0xe0, 0xd6, 0x04, 0x10, 0xe0,
+ 0x23, 0x42, 0x30, 0xe0, 0x15, 0x00, 0x34, 0xe0, 0x86, 0x44, 0x04, 0xe0,
+ 0x23, 0x42, 0x38, 0xe0, 0x05, 0x00, 0x30, 0xe0, 0xc6, 0x02, 0x08, 0xe0,
+ 0x13, 0x40, 0x10, 0xe3, 0xe8, 0x56, 0x40, 0xef, 0x06, 0x40, 0x0c, 0xe1,
+ 0x04, 0x80, 0x06, 0x02, 0x94, 0xe0, 0x2b, 0x02, 0xc4, 0xea, 0x3b, 0x00,
+ 0x78, 0xe2, 0x20, 0x44, 0xfd, 0x73, 0x07, 0xc0, 0x30, 0x46, 0x01, 0x70,
+ 0xf8, 0xc0, 0x3f, 0xa4, 0x33, 0x40, 0x78, 0xe2, 0x0a, 0x84, 0x0c, 0x08,
+ 0x80, 0xf2, 0xf8, 0x3b, 0x3c, 0xff, 0xc3, 0xc1, 0x06, 0x40, 0x0c, 0xe1,
+ 0x04, 0x80, 0x1b, 0x00, 0x40, 0xe4, 0x19, 0xc2, 0x13, 0x40, 0x40, 0xe4,
+ 0x1b, 0x00, 0x40, 0xe4, 0x19, 0xc4, 0x13, 0x40, 0x40, 0xe4, 0x93, 0xdd,
+ 0xc6, 0x43, 0xec, 0xe0, 0x46, 0x41, 0xfc, 0xe0, 0x24, 0x84, 0x04, 0x80,
+ 0x31, 0x81, 0x4a, 0x44, 0x80, 0xe0, 0x86, 0x44, 0x0c, 0xe1, 0x09, 0x00,
+ 0x6c, 0xe0, 0xc4, 0x8a, 0x8e, 0x47, 0xfc, 0x9f, 0x01, 0x42, 0x51, 0x78,
+ 0x0c, 0xc0, 0x31, 0x58, 0x90, 0xe0, 0x34, 0x8a, 0x41, 0xbf, 0x06, 0x08,
+ 0x00, 0xc0, 0x41, 0x46, 0xa0, 0xe0, 0x34, 0x8a, 0x51, 0x81, 0xf6, 0x0b,
+ 0x00, 0xc0, 0x51, 0x46, 0xd0, 0xe0, 0x34, 0x8a, 0x01, 0xbf, 0x51, 0x46,
+ 0xe0, 0xe0, 0x44, 0x84, 0x0a, 0x48, 0x84, 0xe0, 0x75, 0x86, 0x54, 0xca,
+ 0x49, 0x88, 0x44, 0x06, 0x88, 0xe1, 0x36, 0x94, 0x4a, 0x46, 0x80, 0xe0,
+ 0x34, 0xca, 0x47, 0xc6, 0x11, 0x8d, 0x41, 0x46, 0xd0, 0xe0, 0x34, 0x88,
+ 0x76, 0x02, 0x00, 0xc0, 0x06, 0x00, 0x00, 0xc0, 0x16, 0x8c, 0x14, 0x88,
+ 0x01, 0x42, 0xc0, 0xe1, 0x01, 0x42, 0xe0, 0xe1, 0x01, 0x42, 0xf0, 0xe1,
+ 0x93, 0xdd, 0x34, 0xca, 0x41, 0x85, 0x46, 0x8c, 0x34, 0xca, 0x06, 0x48,
+ 0x00, 0xe0, 0x41, 0x46, 0xd0, 0xe0, 0x34, 0x88, 0x41, 0x83, 0x46, 0x8c,
+ 0x34, 0x88, 0x01, 0x46, 0xc0, 0xe1, 0x01, 0x46, 0xe0, 0xe1, 0x01, 0x46,
+ 0xf0, 0xe1, 0x09, 0x02, 0x20, 0xe0, 0x14, 0xca, 0x03, 0x42, 0x58, 0xe0,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x4c, 0x04, 0x04, 0xfa, 0x46, 0x4e, 0x08, 0xe1,
+ 0x06, 0x4c, 0x0c, 0xe1, 0x0a, 0x9e, 0x14, 0x98, 0x05, 0x42, 0x44, 0xe0,
+ 0x10, 0x00, 0xe1, 0x65, 0x03, 0xc0, 0x78, 0x41, 0x00, 0xe8, 0x08, 0x9c,
+ 0x0b, 0xa1, 0x04, 0x98, 0x06, 0x02, 0x10, 0x80, 0x13, 0x40, 0xf8, 0x86,
+ 0x65, 0x82, 0x00, 0x00, 0xe1, 0x65, 0x03, 0xc0, 0xa8, 0x40, 0x00, 0xe8,
+ 0x14, 0x98, 0x04, 0x00, 0xa0, 0xfc, 0x03, 0x42, 0x00, 0xe7, 0x4c, 0x0c,
+ 0x04, 0xf2, 0x93, 0xdd, 0x0a, 0x80, 0x93, 0xdd, 0x0c, 0x04, 0x00, 0xfa,
+ 0x06, 0x02, 0xec, 0xe1, 0x64, 0x84, 0x15, 0x0c, 0x2c, 0xe0, 0x14, 0x02,
+ 0xa0, 0xfc, 0x15, 0x4c, 0x2c, 0xe0, 0xd8, 0x40, 0x00, 0xe8, 0x14, 0xd8,
+ 0x09, 0x82, 0x14, 0x02, 0x00, 0xfc, 0x1f, 0xa0, 0x1e, 0xd8, 0x01, 0x85,
+ 0x0c, 0x0c, 0x00, 0xf2, 0xe8, 0x32, 0x2c, 0xff, 0x93, 0xdd, 0xc3, 0xc1,
+ 0x0c, 0x04, 0x00, 0xfa, 0x6b, 0x80, 0xf6, 0x01, 0x94, 0xe0, 0x08, 0x80,
+ 0x4a, 0x40, 0x80, 0xe0, 0x45, 0x86, 0x06, 0x40, 0x0c, 0xe1, 0x04, 0x80,
+ 0xc6, 0x02, 0x40, 0xe2, 0x09, 0x00, 0xd0, 0xe0, 0x14, 0x84, 0x1b, 0xa5,
+ 0x15, 0x84, 0x07, 0xc5, 0x09, 0x82, 0x18, 0x41, 0x00, 0xe8, 0x46, 0x43,
+ 0xfc, 0xe0, 0x14, 0x84, 0x19, 0x02, 0xd8, 0xe0, 0x19, 0x82, 0x0b, 0x83,
+ 0x16, 0x00, 0x00, 0xc0, 0x01, 0x4c, 0x00, 0xc0, 0x0c, 0x0c, 0x00, 0xf2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x4a, 0x00, 0x00, 0xe0, 0x0c, 0x00, 0x00, 0xe2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x46, 0x40, 0x84, 0xe0, 0x11, 0xaf, 0x13, 0x40,
+ 0x6c, 0xec, 0x11, 0xb3, 0x13, 0x40, 0x70, 0xec, 0xc6, 0x43, 0xf0, 0xe0,
+ 0x13, 0x40, 0xdc, 0xec, 0xc6, 0x02, 0x24, 0xe0, 0x1c, 0x80, 0x93, 0xdd,
+ 0x4c, 0x00, 0x00, 0xfa, 0xc8, 0x60, 0x7c, 0xef, 0xe8, 0x61, 0x7c, 0xef,
+ 0x28, 0x7e, 0x80, 0xef, 0xc6, 0x40, 0x98, 0xe1, 0x11, 0x83, 0x16, 0x80,
+ 0x46, 0x01, 0x10, 0xe1, 0x11, 0x81, 0x16, 0x80, 0x4c, 0x08, 0x00, 0xf2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x0c, 0xfa, 0x6b, 0x80, 0x04, 0x98,
+ 0x7b, 0x82, 0x56, 0x42, 0xb4, 0xe0, 0x88, 0x84, 0x05, 0x00, 0x10, 0xe0,
+ 0x09, 0x86, 0x0b, 0xa5, 0x46, 0x02, 0x00, 0x80, 0x06, 0x05, 0x00, 0x80,
+ 0x25, 0x82, 0x0b, 0xa3, 0xa5, 0x80, 0x0b, 0xa1, 0x06, 0x00, 0xf4, 0xef,
+ 0xd5, 0x84, 0x11, 0x85, 0x21, 0x91, 0x0b, 0x8e, 0x88, 0x74, 0x10, 0xef,
+ 0x0b, 0xa1, 0xf5, 0x82, 0x0a, 0x9e, 0x1a, 0x9c, 0x24, 0x98, 0x07, 0xe0,
+ 0x0f, 0xa2, 0x0e, 0xca, 0x0a, 0xde, 0x1a, 0xdc, 0x24, 0x98, 0x03, 0xb0,
+ 0x07, 0xe0, 0x0f, 0xa2, 0x0e, 0xc8, 0x01, 0x81, 0x0c, 0x0c, 0x0c, 0xf2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x7c, 0xfa, 0x46, 0x42, 0x9c, 0xe0,
+ 0x0b, 0x02, 0x04, 0xe3, 0xf0, 0x1e, 0x30, 0xec, 0x0b, 0xa3, 0x35, 0x96,
+ 0x8e, 0x01, 0x01, 0x60, 0x10, 0xc0, 0x0e, 0xfc, 0xc6, 0x05, 0xd0, 0xe1,
+ 0x0b, 0x82, 0x31, 0x81, 0x10, 0x16, 0x00, 0xe5, 0x20, 0x10, 0x20, 0xe7,
+ 0x0e, 0xbe, 0xb5, 0x85, 0x94, 0xfc, 0xa4, 0xbe, 0x82, 0x4c, 0x9c, 0xf0,
+ 0x05, 0x0c, 0x40, 0xe0, 0x11, 0x89, 0x93, 0x8e, 0xa3, 0x8e, 0x58, 0x44,
+ 0x00, 0xe8, 0x15, 0x0c, 0xc0, 0xf8, 0x04, 0x0c, 0x80, 0xfb, 0x0c, 0xed,
+ 0x0b, 0x82, 0x1b, 0x8c, 0x48, 0x44, 0x00, 0xe8, 0x15, 0x10, 0x1c, 0xfc,
+ 0x0e, 0xa8, 0x0b, 0x82, 0x1b, 0x8c, 0xd8, 0x43, 0x00, 0xe8, 0x71, 0x88,
+ 0x0e, 0xa4, 0x0a, 0x0e, 0x40, 0xe0, 0x35, 0xf8, 0x04, 0xbe, 0x14, 0xbc,
+ 0x81, 0xa0, 0x03, 0x8e, 0x0e, 0xbe, 0x04, 0xfc, 0x11, 0x82, 0x3b, 0x82,
+ 0x03, 0x8e, 0x0e, 0xfc, 0x3b, 0xa9, 0x06, 0x0e, 0x00, 0xc0, 0x35, 0x5e,
+ 0x00, 0xc0, 0xd5, 0xfa, 0xc6, 0x01, 0xd0, 0xe1, 0x7b, 0x80, 0x04, 0x9e,
+ 0x11, 0x91, 0x98, 0x41, 0x00, 0xe8, 0x24, 0x9c, 0x46, 0x42, 0x9c, 0xe0,
+ 0x6b, 0x82, 0x03, 0x4c, 0xc4, 0xe0, 0x11, 0x91, 0x0b, 0x84, 0xf8, 0x40,
+ 0x00, 0xe8, 0x19, 0x0e, 0x20, 0xe5, 0x03, 0x4c, 0xc0, 0xe0, 0x0b, 0x82,
+ 0x08, 0x72, 0xfc, 0xef, 0x01, 0x4c, 0x24, 0xf9, 0xf1, 0x98, 0x0c, 0x0c,
+ 0x7c, 0xf2, 0x93, 0xdd, 0x4c, 0x00, 0x00, 0xfa, 0x48, 0x65, 0x2c, 0xef,
+ 0x4c, 0x08, 0x00, 0xf2, 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x00, 0xfa,
+ 0x6b, 0x82, 0x78, 0x6e, 0xfc, 0xee, 0x46, 0x42, 0xec, 0xe0, 0x24, 0x84,
+ 0x24, 0x02, 0x80, 0xfa, 0x1d, 0xcc, 0x11, 0x83, 0xf5, 0x82, 0x24, 0x02,
+ 0xa0, 0xe1, 0x14, 0x02, 0x80, 0xfa, 0x1d, 0xcc, 0x11, 0x85, 0x15, 0x82,
+ 0x27, 0xe1, 0x24, 0x02, 0x80, 0xfa, 0x1d, 0xcc, 0x11, 0x89, 0x86, 0x02,
+ 0x00, 0x80, 0x0c, 0x0c, 0x00, 0xf2, 0x18, 0x17, 0xfc, 0xfe, 0xc3, 0xc1,
+ 0x0c, 0x04, 0x00, 0xfa, 0x06, 0x41, 0x8c, 0xe0, 0x1b, 0x00, 0xec, 0xe4,
+ 0x1b, 0xa3, 0x75, 0x84, 0x11, 0x81, 0x8e, 0x05, 0x01, 0x60, 0x10, 0xc0,
+ 0x00, 0x06, 0xc0, 0xe5, 0x95, 0x81, 0x44, 0x88, 0x1d, 0xee, 0x75, 0x80,
+ 0x4e, 0xc1, 0x25, 0x81, 0x4e, 0xcd, 0x21, 0x88, 0x11, 0x82, 0x0a, 0x02,
+ 0x40, 0xe0, 0xd5, 0xfc, 0x56, 0x00, 0x00, 0xe1, 0x18, 0x80, 0x1b, 0xa1,
+ 0xc5, 0x84, 0x08, 0x82, 0x4a, 0x00, 0xfc, 0xfb, 0x45, 0x84, 0x86, 0x4d,
+ 0x84, 0xe1, 0x04, 0x98, 0x05, 0x00, 0x10, 0xe0, 0x4a, 0x40, 0x80, 0xe0,
+ 0x45, 0x82, 0x11, 0x81, 0x0b, 0x8c, 0x58, 0x76, 0x28, 0xef, 0x0b, 0x8c,
+ 0x0c, 0x0c, 0x00, 0xf2, 0x88, 0x35, 0x28, 0xff, 0x0c, 0x0c, 0x00, 0xf2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x46, 0x41, 0xfc, 0xe0, 0x04, 0x80, 0x09, 0x00,
+ 0x80, 0xe0, 0x09, 0x9e, 0x0b, 0xa3, 0x75, 0x82, 0x46, 0x41, 0x80, 0xe1,
+ 0x04, 0x80, 0xc6, 0x42, 0x8c, 0xe0, 0x04, 0xc2, 0x00, 0x40, 0x00, 0xf2,
+ 0x07, 0xcf, 0x06, 0x84, 0x06, 0x40, 0x84, 0xe0, 0x15, 0x00, 0x28, 0xe5,
+ 0x1c, 0xc2, 0x93, 0xdd, 0x0b, 0xa1, 0xc6, 0x00, 0xa0, 0xe1, 0x15, 0x00,
+ 0x04, 0xf8, 0x05, 0x84, 0x21, 0x8b, 0x2c, 0x84, 0x14, 0x80, 0x2c, 0x84,
+ 0x14, 0x82, 0x2c, 0x84, 0x15, 0x00, 0x10, 0xe0, 0x21, 0xa1, 0x21, 0x42,
+ 0x10, 0xe0, 0x05, 0x00, 0x14, 0xe0, 0x01, 0x88, 0x75, 0x83, 0x21, 0x85,
+ 0x2c, 0x84, 0x14, 0x80, 0x06, 0x46, 0x00, 0xe0, 0x2c, 0x84, 0x14, 0x82,
+ 0x2c, 0x84, 0x14, 0xc0, 0x21, 0xa1, 0x21, 0x42, 0x20, 0xe0, 0x14, 0xc2,
+ 0x31, 0x42, 0x20, 0xe0, 0x15, 0x00, 0x10, 0xe0, 0x21, 0x42, 0x20, 0xe0,
+ 0x05, 0x00, 0x14, 0xe0, 0x01, 0x90, 0x06, 0x42, 0x00, 0xe0, 0x16, 0x80,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x7c, 0xfa, 0x4a, 0x40, 0x80, 0xe0,
+ 0xf0, 0x1e, 0x30, 0xec, 0xe5, 0x82, 0xa6, 0x40, 0x00, 0xe1, 0x1a, 0x80,
+ 0x2a, 0xc0, 0x3a, 0xc2, 0x13, 0x40, 0x10, 0xe0, 0x1a, 0x82, 0x23, 0x40,
+ 0x18, 0xe0, 0x33, 0x40, 0x1c, 0xe0, 0x13, 0x40, 0x14, 0xe0, 0xf8, 0x61,
+ 0x68, 0xef, 0xc6, 0x13, 0x00, 0xe1, 0x15, 0x12, 0x28, 0xf8, 0x0b, 0x02,
+ 0x2c, 0xe0, 0x1b, 0x02, 0x24, 0xe0, 0x8a, 0x00, 0xa5, 0x64, 0x03, 0xc0,
+ 0x35, 0x82, 0x0a, 0x4e, 0x9c, 0xe1, 0x1a, 0x03, 0x11, 0x6f, 0x02, 0xc0,
+ 0xe8, 0x13, 0x01, 0x20, 0x00, 0xc0, 0x1f, 0xa0, 0x5a, 0x42, 0x80, 0xe0,
+ 0x0a, 0x4e, 0x9c, 0xe1, 0x68, 0x13, 0x00, 0xa0, 0x09, 0x12, 0x78, 0xf8,
+ 0xa1, 0x81, 0xf0, 0x02, 0x10, 0xe4, 0x07, 0xc4, 0x0c, 0xfc, 0xf0, 0x00,
+ 0x20, 0xe4, 0xa6, 0x91, 0xa8, 0x53, 0x74, 0xef, 0x05, 0x12, 0x30, 0xf8,
+ 0x25, 0x12, 0x28, 0xf8, 0x61, 0x87, 0x09, 0x00, 0x48, 0xe0, 0x81, 0x85,
+ 0x09, 0x86, 0x0b, 0xa7, 0x26, 0x0c, 0x00, 0xc0, 0x0b, 0xa1, 0x0b, 0x04,
+ 0x28, 0xe0, 0x16, 0x0c, 0x00, 0x80, 0x03, 0x52, 0x04, 0xf8, 0x0b, 0x04,
+ 0x20, 0xe0, 0x0c, 0xa6, 0x1b, 0x04, 0x2c, 0xe0, 0x3b, 0x04, 0x28, 0xe0,
+ 0x4b, 0x04, 0x20, 0xe0, 0x13, 0x86, 0x3b, 0x04, 0x24, 0xe0, 0x10, 0x0a,
+ 0x04, 0xec, 0x1a, 0xfc, 0x33, 0x88, 0x30, 0x06, 0x04, 0xec, 0x12, 0x4e,
+ 0x94, 0xf0, 0x32, 0x48, 0x84, 0xf0, 0x4c, 0xe4, 0x7c, 0xa4, 0xcb, 0x04,
+ 0x28, 0xe0, 0x14, 0x08, 0x84, 0xe1, 0xcd, 0xc9, 0xc2, 0x58, 0x90, 0x91,
+ 0x42, 0x4e, 0x94, 0x90, 0xc3, 0x52, 0x04, 0x98, 0x73, 0x52, 0x00, 0x80,
+ 0x5b, 0x04, 0x20, 0xe0, 0x5d, 0xc9, 0x52, 0x40, 0x90, 0x91, 0x42, 0x48,
+ 0x8c, 0x90, 0x03, 0x52, 0x04, 0x80, 0x43, 0x52, 0x08, 0x80, 0x3b, 0x04,
+ 0x2c, 0xe0, 0x49, 0x04, 0xb8, 0xe0, 0x33, 0x52, 0x1c, 0xf8, 0x2b, 0x04,
+ 0x24, 0xe0, 0x4b, 0xab, 0x23, 0x52, 0x18, 0xf8, 0x65, 0x8a, 0x4b, 0xa9,
+ 0xe5, 0x90, 0x4b, 0xa7, 0x22, 0x44, 0x84, 0xd0, 0x32, 0x46, 0x84, 0xd0,
+ 0x33, 0x52, 0x1c, 0xd8, 0x23, 0x52, 0x18, 0xd8, 0x95, 0x96, 0x20, 0x44,
+ 0xf9, 0x73, 0xff, 0xc0, 0x27, 0xc3, 0x23, 0x82, 0x23, 0x52, 0x18, 0xf8,
+ 0x24, 0x02, 0x80, 0xfb, 0x04, 0x00, 0x80, 0xfb, 0x2b, 0x8c, 0x58, 0x52,
+ 0x74, 0xef, 0x1b, 0x12, 0x1c, 0xf8, 0x2a, 0xfc, 0x0c, 0xe4, 0x17, 0xc3,
+ 0x13, 0x84, 0x13, 0x52, 0x1c, 0xf8, 0x0b, 0x12, 0x04, 0xf8, 0x14, 0x02,
+ 0x80, 0xfb, 0x2b, 0x8c, 0x68, 0x51, 0x74, 0xef, 0xc5, 0x87, 0x20, 0x44,
+ 0xe1, 0x73, 0xff, 0xc0, 0x27, 0xc7, 0x23, 0x82, 0x23, 0x52, 0x18, 0xf8,
+ 0x24, 0x02, 0x80, 0xfb, 0x04, 0x00, 0x80, 0xfb, 0x2b, 0x8c, 0x78, 0x57,
+ 0x74, 0xef, 0x1b, 0x12, 0x1c, 0xf8, 0x2a, 0xfc, 0x0c, 0xe4, 0x17, 0xc7,
+ 0x13, 0x84, 0x13, 0x52, 0x1c, 0xf8, 0x0b, 0x12, 0x04, 0xf8, 0x14, 0x02,
+ 0x80, 0xfb, 0x2b, 0x8c, 0x88, 0x56, 0x74, 0xef, 0xe5, 0x83, 0x20, 0x44,
+ 0xf1, 0x73, 0xff, 0xc0, 0x27, 0xc5, 0x23, 0x82, 0x23, 0x52, 0x18, 0xf8,
+ 0x24, 0x02, 0x80, 0xfb, 0x04, 0x00, 0x80, 0xfb, 0x2b, 0x8c, 0x18, 0x52,
+ 0x74, 0xef, 0x1b, 0x12, 0x1c, 0xf8, 0x2a, 0xfc, 0x0c, 0xe4, 0x17, 0xc5,
+ 0x13, 0x84, 0x13, 0x52, 0x1c, 0xf8, 0x0b, 0x12, 0x04, 0xf8, 0x14, 0x02,
+ 0x80, 0xfb, 0x2b, 0x8c, 0x28, 0x51, 0x74, 0xef, 0x7b, 0x80, 0x7c, 0xa4,
+ 0x08, 0x91, 0xa3, 0x52, 0x1c, 0xe0, 0xa3, 0x52, 0x24, 0xe0, 0x0b, 0xa1,
+ 0x83, 0x52, 0x1c, 0x80, 0x83, 0x52, 0x24, 0x80, 0x89, 0x12, 0x78, 0xf8,
+ 0xf6, 0x57, 0xfc, 0xef, 0x6b, 0x12, 0x1c, 0xf8, 0xab, 0x12, 0x18, 0xf8,
+ 0xd6, 0x57, 0xfc, 0x8f, 0x8b, 0xa3, 0xa0, 0x40, 0x00, 0x9c, 0xa5, 0x86,
+ 0x64, 0x00, 0x80, 0xfb, 0x1b, 0x90, 0xf8, 0x7d, 0xf8, 0xee, 0x6b, 0x80,
+ 0xa4, 0x00, 0x80, 0xfb, 0x1b, 0x90, 0x98, 0x7d, 0xf8, 0xee, 0x15, 0x12,
+ 0x28, 0xf8, 0x19, 0x02, 0xb8, 0xe0, 0x1b, 0xad, 0x95, 0x82, 0x1a, 0xa6,
+ 0xa0, 0x44, 0xf9, 0x73, 0xff, 0xc0, 0x27, 0xc3, 0x13, 0x94, 0x10, 0x02,
+ 0x08, 0xec, 0x1c, 0xe4, 0x23, 0x52, 0x18, 0xf8, 0x1b, 0x12, 0x04, 0xf8,
+ 0x03, 0x96, 0x03, 0x52, 0x28, 0xe0, 0x1c, 0xe6, 0x0a, 0xa6, 0x1a, 0xe4,
+ 0x63, 0x96, 0x63, 0x52, 0x20, 0xe0, 0x73, 0x52, 0x10, 0xe0, 0x03, 0x52,
+ 0x14, 0xe0, 0x13, 0x52, 0x18, 0xe0, 0x98, 0x52, 0x74, 0xef, 0x09, 0x12,
+ 0x8c, 0xe0, 0x0b, 0xa1, 0x01, 0x81, 0x01, 0x52, 0x90, 0xe0, 0x65, 0x82,
+ 0x05, 0x12, 0x30, 0xf8, 0x09, 0x00, 0xa8, 0xe0, 0x0a, 0x00, 0x0c, 0xf8,
+ 0x16, 0x00, 0x00, 0xc0, 0x01, 0x52, 0x90, 0xc0, 0x46, 0x41, 0x84, 0xe0,
+ 0x0a, 0x80, 0x0a, 0x4e, 0x9c, 0xe9, 0x1a, 0x00, 0x08, 0xe0, 0x38, 0x01,
+ 0x01, 0x20, 0x00, 0xc0, 0x0b, 0x12, 0x1c, 0xe0, 0x1b, 0x12, 0x24, 0xe0,
+ 0x2b, 0x12, 0x28, 0xe0, 0x03, 0x52, 0x2c, 0xe0, 0x0b, 0x12, 0x20, 0xe0,
+ 0x13, 0x52, 0x34, 0xe0, 0x23, 0x52, 0x38, 0xe0, 0x03, 0x52, 0x30, 0xe0,
+ 0x0c, 0x00, 0x00, 0xe2, 0xf1, 0x98, 0x0c, 0x0c, 0x7c, 0xf2, 0x93, 0xdd,
+ 0x13, 0xa9, 0x00, 0x00, 0xa8, 0xc1, 0x40, 0x00, 0x68, 0x04, 0xa0, 0xe0,
+ 0x40, 0x6c, 0x40, 0x00, 0xe8, 0x34, 0xc8, 0xe0, 0xfc, 0x91, 0x40, 0x00,
+ 0x68, 0x1f, 0xb8, 0xe0, 0x30, 0x16, 0x41, 0x00, 0x28, 0x39, 0x74, 0xe0,
+ 0xb0, 0x7e, 0x40, 0x00, 0xe8, 0x38, 0xc0, 0xe0, 0x30, 0x04, 0x41, 0x00,
+ 0x48, 0x1b, 0x80, 0xe0, 0x30, 0x2e, 0x40, 0x00, 0x88, 0x0c, 0xec, 0xe0,
+ 0x10, 0x9f, 0x40, 0x00, 0x88, 0x08, 0xb4, 0xe0, 0x10, 0x01, 0x41, 0x00,
+ 0x68, 0x01, 0x84, 0xe0, 0x54, 0xd6, 0x40, 0x00, 0xc8, 0x1a, 0x98, 0xe0,
+ 0xd0, 0xc8, 0x40, 0x00, 0x68, 0x08, 0xa0, 0xe0, 0x80, 0xdb, 0x40, 0x00,
+ 0xe8, 0x35, 0x94, 0xe0, 0x74, 0xff, 0x40, 0x00, 0xa8, 0x11, 0x80, 0xe0,
+ 0xf8, 0x89, 0x40, 0x00, 0x88, 0x16, 0xbc, 0xe0, 0x00, 0x90, 0x40, 0x00,
+ 0x08, 0x35, 0xb8, 0xe0, 0x7c, 0x73, 0x40, 0x00, 0x88, 0x1b, 0xc8, 0xe0,
+ 0xf4, 0xff, 0x40, 0x00, 0x68, 0x39, 0x80, 0xe0, 0xa4, 0xa4, 0x40, 0x00,
+ 0xa8, 0x16, 0xb0, 0xe0, 0x50, 0xc9, 0x40, 0x00, 0x28, 0x3a, 0x98, 0xe0,
+ 0x00, 0xb9, 0x00, 0x00, 0xb6, 0x85, 0x00, 0x00,
+};
+
+static const char * const vd55g1_tp_menu[] = {
+ "Disabled",
+ "Diagonal Grey Scale",
+ "Pseudo-random Noise",
+};
+
+static const s64 vd55g1_ev_bias_menu[] = {
+ -3000, -2500, -2000, -1500, -1000, -500,
+ 0,
+ 500, 1000, 1500, 2000, 2500, 3000,
+};
+
+static const char * const vd55g1_hdr_menu[] = {
+ "No HDR",
+ /*
+ * This mode acquires 2 frames on the sensor, the first one is ditched
+ * out and only used for auto exposure data, the second one is output to
+ * the host
+ */
+ "Internal subtraction",
+};
+
+static const char * const vd55g1_supply_name[] = {
+ "vcore",
+ "vddio",
+ "vana",
+};
+
+enum vd55g1_hdr_mode {
+ VD55G1_NO_HDR,
+ VD55G1_HDR_SUB,
+};
+
+struct vd55g1_mode {
+ u32 width;
+ u32 height;
+};
+
+struct vd55g1_fmt_desc {
+ u32 code;
+ u8 bpp;
+ u8 data_type;
+};
+
+static const struct vd55g1_fmt_desc vd55g1_mbus_codes[] = {
+ {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .bpp = 8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .bpp = 10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ },
+};
+
+static const struct vd55g1_mode vd55g1_supported_modes[] = {
+ {
+ .width = VD55G1_WIDTH,
+ .height = VD55G1_HEIGHT,
+ },
+ {
+ .width = 800,
+ .height = VD55G1_HEIGHT,
+ },
+ {
+ .width = 800,
+ .height = 600,
+ },
+ {
+ .width = 640,
+ .height = 480,
+ },
+ {
+ .width = 320,
+ .height = 240,
+ },
+};
+
+enum vd55g1_expo_state {
+ VD55G1_EXP_AUTO,
+ VD55G1_EXP_FREEZE,
+ VD55G1_EXP_MANUAL,
+ VD55G1_EXP_SINGLE_STEP,
+ VD55G1_EXP_BYPASS,
+};
+
+struct vd55g1_vblank_limits {
+ u16 min;
+ u16 def;
+ u16 max;
+};
+
+struct vd55g1 {
+ struct device *dev;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(vd55g1_supply_name)];
+ struct gpio_desc *reset_gpio;
+ struct clk *xclk;
+ struct regmap *regmap;
+ u32 xclk_freq;
+ u16 oif_ctrl;
+ u8 gpios[VD55G1_NB_GPIOS];
+ unsigned long ext_leds_mask;
+ u32 mipi_rate;
+ u32 pixel_clock;
+ u64 link_freq;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *pixel_rate_ctrl;
+ struct v4l2_ctrl *vblank_ctrl;
+ struct v4l2_ctrl *hblank_ctrl;
+ struct {
+ struct v4l2_ctrl *hflip_ctrl;
+ struct v4l2_ctrl *vflip_ctrl;
+ };
+ struct v4l2_ctrl *patgen_ctrl;
+ struct {
+ struct v4l2_ctrl *ae_ctrl;
+ struct v4l2_ctrl *expo_ctrl;
+ struct v4l2_ctrl *again_ctrl;
+ struct v4l2_ctrl *dgain_ctrl;
+ };
+ struct v4l2_ctrl *ae_lock_ctrl;
+ struct v4l2_ctrl *ae_bias_ctrl;
+ struct v4l2_ctrl *led_ctrl;
+ struct v4l2_ctrl *hdr_ctrl;
+};
+
+static inline struct vd55g1 *to_vd55g1(struct v4l2_subdev *sd)
+{
+ return container_of_const(sd, struct vd55g1, sd);
+}
+
+static inline struct vd55g1 *ctrl_to_vd55g1(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = &container_of_const(ctrl->handler,
+ struct vd55g1,
+ ctrl_handler)->sd;
+
+ return to_vd55g1(sd);
+}
+
+static const struct vd55g1_fmt_desc *vd55g1_get_fmt_desc(struct vd55g1 *sensor,
+ u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vd55g1_mbus_codes); i++) {
+ if (vd55g1_mbus_codes[i].code == code)
+ return &vd55g1_mbus_codes[i];
+ }
+
+ /* Should never happen */
+ dev_warn(sensor->dev, "Unsupported code %d. default to 8 bpp\n", code);
+
+ return &vd55g1_mbus_codes[0];
+}
+
+static s32 vd55g1_get_pixel_rate(struct vd55g1 *sensor,
+ struct v4l2_mbus_framefmt *format)
+{
+ return sensor->mipi_rate /
+ vd55g1_get_fmt_desc(sensor, format->code)->bpp;
+}
+
+static unsigned int vd55g1_get_hblank_min(struct vd55g1 *sensor,
+ struct v4l2_mbus_framefmt *format,
+ struct v4l2_rect *crop)
+{
+ u32 mipi_req_line_time;
+ u32 mipi_req_line_length;
+ u32 min_line_length;
+
+ /* MIPI required time */
+ mipi_req_line_time = (crop->width *
+ vd55g1_get_fmt_desc(sensor, format->code)->bpp +
+ VD55G1_MIPI_MARGIN) /
+ (sensor->mipi_rate / MEGA);
+ mipi_req_line_length = mipi_req_line_time * sensor->pixel_clock /
+ HZ_PER_MHZ;
+
+ /* Absolute time required for ADCs to convert pixels */
+ min_line_length = VD55G1_LINE_LENGTH_MIN;
+ if (sensor->hdr_ctrl->val == VD55G1_HDR_SUB)
+ min_line_length = VD55G1_LINE_LENGTH_SUB_MIN;
+
+ /* Respect both constraint */
+ min_line_length = max(min_line_length, mipi_req_line_length);
+
+ return min_line_length - crop->width;
+}
+
+static void vd55g1_get_vblank_limits(struct vd55g1 *sensor,
+ struct v4l2_rect *crop,
+ struct vd55g1_vblank_limits *limits)
+{
+ limits->min = VD55G1_VBLANK_MIN;
+ limits->def = VD55G1_FRAME_LENGTH_DEF - crop->height;
+ limits->max = VD55G1_VBLANK_MAX - crop->height;
+}
+
+#define vd55g1_read(sensor, reg, val, err) \
+ cci_read((sensor)->regmap, reg, val, err)
+
+#define vd55g1_write(sensor, reg, val, err) \
+ cci_write((sensor)->regmap, reg, val, err)
+
+static int vd55g1_write_array(struct vd55g1 *sensor, u32 reg, unsigned int len,
+ const u8 *array, int *err)
+{
+ unsigned int chunk_sz = 1024;
+ unsigned int sz;
+ int ret = 0;
+
+ if (err && *err)
+ return *err;
+
+ /*
+ * This loop isn't necessary but in certains conditions (platforms, cpu
+ * load, etc.) it has been observed that the bulk write could timeout.
+ */
+ while (len) {
+ sz = min(len, chunk_sz);
+ ret = regmap_bulk_write(sensor->regmap, reg, array, sz);
+ if (ret < 0)
+ goto out;
+ len -= sz;
+ reg += sz;
+ array += sz;
+ }
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int vd55g1_poll_reg(struct vd55g1 *sensor, u32 reg, u8 poll_val,
+ int *err)
+{
+ unsigned int val = 0;
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ ret = regmap_read_poll_timeout(sensor->regmap, CCI_REG_ADDR(reg), val,
+ (val == poll_val), 2000,
+ 500 * USEC_PER_MSEC);
+
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int vd55g1_wait_state(struct vd55g1 *sensor, int state, int *err)
+{
+ return vd55g1_poll_reg(sensor, VD55G1_REG_SYSTEM_FSM, state, err);
+}
+
+static int vd55g1_prepare_clock_tree(struct vd55g1 *sensor)
+{
+ u32 sys_clk, mipi_div, pixel_div;
+
+ if (sensor->xclk_freq < VD55G1_XCLK_FREQ_MIN ||
+ sensor->xclk_freq > VD55G1_XCLK_FREQ_MAX) {
+ dev_err(sensor->dev,
+ "Only %luMhz-%luMhz clock range supported. Provided %lu MHz\n",
+ VD55G1_XCLK_FREQ_MIN / HZ_PER_MHZ,
+ VD55G1_XCLK_FREQ_MAX / HZ_PER_MHZ,
+ sensor->xclk_freq / HZ_PER_MHZ);
+ return -EINVAL;
+ }
+
+ /* MIPI bus is double data rate */
+ sensor->mipi_rate = sensor->link_freq * 2;
+
+ if (sensor->mipi_rate < VD55G1_MIPI_RATE_MIN ||
+ sensor->mipi_rate > VD55G1_MIPI_RATE_MAX) {
+ dev_err(sensor->dev,
+ "Only %luMbps-%luMbps data rate range supported. Provided %lu Mbps\n",
+ VD55G1_MIPI_RATE_MIN / MEGA,
+ VD55G1_MIPI_RATE_MAX / MEGA,
+ sensor->mipi_rate / MEGA);
+ return -EINVAL;
+ }
+
+ if (sensor->mipi_rate <= 300 * MEGA)
+ mipi_div = 4;
+ else if (sensor->mipi_rate <= 600 * MEGA)
+ mipi_div = 2;
+ else
+ mipi_div = 1;
+
+ sys_clk = sensor->mipi_rate * mipi_div;
+
+ if (sys_clk <= 780 * HZ_PER_MHZ)
+ pixel_div = 5;
+ else if (sys_clk <= 900 * HZ_PER_MHZ)
+ pixel_div = 6;
+ else
+ pixel_div = 8;
+
+ sensor->pixel_clock = sys_clk / pixel_div;
+
+ return 0;
+}
+
+static int vd55g1_update_patgen(struct vd55g1 *sensor, u32 patgen_index)
+{
+ static const u8 index2val[] = {
+ 0x0, 0x22, 0x28
+ };
+ u32 pattern = index2val[patgen_index];
+ u32 reg = pattern << VD55G1_PATGEN_TYPE_SHIFT;
+ u8 duster = VD55G1_DUSTER_RING_ENABLE | VD55G1_DUSTER_DYN_ENABLE |
+ VD55G1_DUSTER_ENABLE;
+ int ret = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(index2val) != ARRAY_SIZE(vd55g1_tp_menu));
+
+ if (pattern != 0) {
+ reg |= VD55G1_PATGEN_ENABLE;
+ /* Take care of duster to not mess up the test pattern output */
+ duster = VD55G1_DUSTER_DISABLE;
+ }
+
+ vd55g1_write(sensor, VD55G1_REG_DUSTER_CTRL, duster, &ret);
+ vd55g1_write(sensor, VD55G1_REG_PATGEN_CTRL, reg, &ret);
+
+ return ret;
+}
+
+static int vd55g1_update_expo_cluster(struct vd55g1 *sensor, bool is_auto)
+{
+ enum vd55g1_expo_state expo_state = is_auto ? VD55G1_EXP_AUTO :
+ VD55G1_EXP_MANUAL;
+ int ret = 0;
+
+ if (sensor->ae_ctrl->is_new)
+ vd55g1_write(sensor, VD55G1_REG_EXP_MODE(0), expo_state, &ret);
+
+ if (sensor->hdr_ctrl->val == VD55G1_HDR_SUB &&
+ sensor->hdr_ctrl->is_new) {
+ vd55g1_write(sensor, VD55G1_REG_EXP_MODE(1), VD55G1_EXP_BYPASS,
+ &ret);
+ if (ret)
+ return ret;
+ }
+
+ if (!is_auto && sensor->expo_ctrl->is_new)
+ vd55g1_write(sensor, VD55G1_REG_MANUAL_COARSE_EXPOSURE,
+ sensor->expo_ctrl->val, &ret);
+
+ if (!is_auto && sensor->again_ctrl->is_new)
+ vd55g1_write(sensor, VD55G1_REG_MANUAL_ANALOG_GAIN,
+ sensor->again_ctrl->val, &ret);
+
+ if (!is_auto && sensor->dgain_ctrl->is_new)
+ vd55g1_write(sensor, VD55G1_REG_MANUAL_DIGITAL_GAIN,
+ sensor->dgain_ctrl->val, &ret);
+
+ return ret;
+}
+
+static int vd55g1_lock_exposure(struct vd55g1 *sensor, u32 lock_val)
+{
+ bool ae_lock = lock_val & V4L2_LOCK_EXPOSURE;
+ enum vd55g1_expo_state expo_state = ae_lock ? VD55G1_EXP_FREEZE :
+ VD55G1_EXP_AUTO;
+ int ret = 0;
+
+ if (sensor->ae_ctrl->val == V4L2_EXPOSURE_AUTO)
+ vd55g1_write(sensor, VD55G1_REG_EXP_MODE(0), expo_state, &ret);
+
+ return ret;
+}
+
+static int vd55g1_read_expo_cluster(struct vd55g1 *sensor)
+{
+ u64 exposure = 0;
+ u64 again = 0;
+ u64 dgain = 0;
+ int ret = 0;
+
+ vd55g1_read(sensor, VD55G1_REG_APPLIED_COARSE_EXPOSURE, &exposure,
+ &ret);
+ vd55g1_read(sensor, VD55G1_REG_APPLIED_ANALOG_GAIN, &again, &ret);
+ vd55g1_read(sensor, VD55G1_REG_APPLIED_DIGITAL_GAIN, &dgain, &ret);
+ if (ret)
+ return ret;
+
+ sensor->expo_ctrl->cur.val = exposure;
+ sensor->again_ctrl->cur.val = again;
+ sensor->dgain_ctrl->cur.val = dgain;
+
+ return 0;
+}
+
+static int vd55g1_update_frame_length(struct vd55g1 *sensor,
+ unsigned int frame_length)
+{
+ int ret = 0;
+
+ if (sensor->hdr_ctrl->val == VD55G1_HDR_SUB)
+ vd55g1_write(sensor, VD55G1_REG_FRAME_LENGTH(1), frame_length,
+ &ret);
+ vd55g1_write(sensor, VD55G1_REG_FRAME_LENGTH(0), frame_length, &ret);
+
+ return ret;
+}
+
+static int vd55g1_update_exposure_target(struct vd55g1 *sensor, int index)
+{
+ /*
+ * Find auto exposure target with: default target exposure * 2^EV
+ * Defaut target exposure being 27 for the sensor.
+ */
+ static const unsigned int index2exposure_target[] = {
+ 3, 5, 7, 10, 14, 19, 27, 38, 54, 76, 108, 153, 216,
+ };
+ int exposure_target = index2exposure_target[index];
+
+ return vd55g1_write(sensor, VD55G1_REG_AE_TARGET_PERCENTAGE,
+ exposure_target, NULL);
+}
+
+static int vd55g1_apply_cold_start(struct vd55g1 *sensor,
+ struct v4l2_rect *crop)
+{
+ /*
+ * Cold start register is a single register expressed as exposure time
+ * in us. This differ from status registers being a combination of
+ * exposure, digital gain, and analog gain, requiring the following
+ * format conversion.
+ */
+ unsigned int line_length = crop->width + sensor->hblank_ctrl->val;
+ unsigned int line_time_us = DIV_ROUND_UP(line_length * MEGA,
+ sensor->pixel_clock);
+ u8 d_gain = DIV_ROUND_CLOSEST(sensor->dgain_ctrl->val, 1 << 8);
+ u8 a_gain = DIV_ROUND_CLOSEST(32, (32 - sensor->again_ctrl->val));
+ unsigned int expo_us = sensor->expo_ctrl->val * d_gain * a_gain *
+ line_time_us;
+ int ret = 0;
+
+ vd55g1_write(sensor, VD55G1_REG_AE_FORCE_COLDSTART, 1, &ret);
+ vd55g1_write(sensor, VD55G1_REG_AE_COLDSTART_EXP_TIME, expo_us, &ret);
+
+ return ret;
+}
+
+static void vd55g1_update_img_pad_format(struct vd55g1 *sensor,
+ const struct vd55g1_mode *mode,
+ u32 code,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = code;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int vd55g1_update_hdr_mode(struct vd55g1 *sensor)
+{
+ int ret = 0;
+
+ switch (sensor->hdr_ctrl->val) {
+ case VD55G1_NO_HDR:
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_MAX_COARSE,
+ VD55G1_EXPOSURE_MAX_COARSE_DEF, &ret);
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_USE_CASES, 0, &ret);
+ vd55g1_write(sensor, VD55G1_REG_NEXT_CTX, 0x0, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_CTX_REPEAT_COUNT_CTX0, 0, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_VT_MODE(0),
+ VD55G1_VT_MODE_NORMAL, &ret);
+ vd55g1_write(sensor, VD55G1_REG_MASK_FRAME_CTRL(0),
+ VD55G1_MASK_FRAME_CTRL_OUTPUT, &ret);
+ break;
+ case VD55G1_HDR_SUB:
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_MAX_COARSE,
+ VD55G1_EXPOSURE_MAX_COARSE_SUB, &ret);
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_USE_CASES,
+ VD55G1_EXPOSURE_USE_CASES_MULTI_CONTEXT, &ret);
+ vd55g1_write(sensor, VD55G1_REG_NEXT_CTX, 0x0001, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_CTX_REPEAT_COUNT_CTX0, 1, &ret);
+ vd55g1_write(sensor, VD55G1_REG_CTX_REPEAT_COUNT_CTX1, 1, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_VT_MODE(0),
+ VD55G1_VT_MODE_NORMAL, &ret);
+ vd55g1_write(sensor, VD55G1_REG_MASK_FRAME_CTRL(0),
+ VD55G1_MASK_FRAME_CTRL_MASK, &ret);
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_INSTANCE(0), 0, &ret);
+ vd55g1_write(sensor, VD55G1_REG_VT_MODE(1),
+ VD55G1_VT_MODE_SUBTRACTION, &ret);
+ vd55g1_write(sensor, VD55G1_REG_MASK_FRAME_CTRL(1),
+ VD55G1_MASK_FRAME_CTRL_OUTPUT, &ret);
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_INSTANCE(1), 1, &ret);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vd55g1_set_framefmt(struct vd55g1 *sensor,
+ struct v4l2_mbus_framefmt *format,
+ struct v4l2_rect *crop)
+{
+ u8 binning;
+ int ret = 0;
+
+ vd55g1_write(sensor, VD55G1_REG_FORMAT_CTRL,
+ vd55g1_get_fmt_desc(sensor, format->code)->bpp, &ret);
+ vd55g1_write(sensor, VD55G1_REG_OIF_IMG_CTRL,
+ vd55g1_get_fmt_desc(sensor, format->code)->data_type,
+ &ret);
+
+ switch (crop->width / format->width) {
+ case 1:
+ default:
+ binning = VD55G1_READOUT_CTRL_BIN_MODE_NORMAL;
+ break;
+ case 2:
+ binning = VD55G1_READOUT_CTRL_BIN_MODE_DIGITAL_X2;
+ break;
+ }
+ vd55g1_write(sensor, VD55G1_REG_READOUT_CTRL, binning, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_X_START(0), crop->left, &ret);
+ vd55g1_write(sensor, VD55G1_REG_X_WIDTH(0), crop->width, &ret);
+ vd55g1_write(sensor, VD55G1_REG_Y_START(0), crop->top, &ret);
+ vd55g1_write(sensor, VD55G1_REG_Y_HEIGHT(0), crop->height, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_X_START(1), crop->left, &ret);
+ vd55g1_write(sensor, VD55G1_REG_X_WIDTH(1), crop->width, &ret);
+ vd55g1_write(sensor, VD55G1_REG_Y_START(1), crop->top, &ret);
+ vd55g1_write(sensor, VD55G1_REG_Y_HEIGHT(1), crop->height, &ret);
+
+ return ret;
+}
+
+static int vd55g1_update_gpios(struct vd55g1 *sensor, unsigned long gpio_mask)
+{
+ unsigned long io;
+ u8 gpio_val;
+ int ret = 0;
+
+ for_each_set_bit(io, &gpio_mask, VD55G1_NB_GPIOS) {
+ gpio_val = sensor->gpios[io];
+
+ if (gpio_val == VD55G1_GPIO_MODE_STROBE &&
+ sensor->led_ctrl->val == V4L2_FLASH_LED_MODE_NONE) {
+ gpio_val = VD55G1_GPIO_MODE_IN;
+ if (sensor->hdr_ctrl->val == VD55G1_HDR_SUB) {
+ /* Make its context 1 counterpart strobe too */
+ vd55g1_write(sensor,
+ VD55G1_REG_GPIO_0_CTRL(1) + io,
+ gpio_val, &ret);
+ }
+ }
+
+ ret = vd55g1_write(sensor, VD55G1_REG_GPIO_0_CTRL(0) + io,
+ gpio_val, &ret);
+ }
+
+ return ret;
+}
+
+static int vd55g1_ro_ctrls_setup(struct vd55g1 *sensor, struct v4l2_rect *crop)
+{
+ return vd55g1_write(sensor, VD55G1_REG_LINE_LENGTH,
+ crop->width + sensor->hblank_ctrl->val, NULL);
+}
+
+static void vd55g1_grab_ctrls(struct vd55g1 *sensor, bool enable)
+{
+ /* These settings cannot change during stream */
+ v4l2_ctrl_grab(sensor->hflip_ctrl, enable);
+ v4l2_ctrl_grab(sensor->vflip_ctrl, enable);
+ v4l2_ctrl_grab(sensor->patgen_ctrl, enable);
+ v4l2_ctrl_grab(sensor->hdr_ctrl, enable);
+}
+
+static int vd55g1_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ struct v4l2_rect *crop =
+ v4l2_subdev_state_get_crop(state, 0);
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(sensor->dev);
+ if (ret < 0)
+ return ret;
+
+ vd55g1_write(sensor, VD55G1_REG_EXT_CLOCK, sensor->xclk_freq, &ret);
+
+ /* Configure output */
+ vd55g1_write(sensor, VD55G1_REG_MIPI_DATA_RATE,
+ sensor->mipi_rate, &ret);
+ vd55g1_write(sensor, VD55G1_REG_OIF_CTRL, sensor->oif_ctrl, &ret);
+ vd55g1_write(sensor, VD55G1_REG_ISL_ENABLE, 0, &ret);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = vd55g1_set_framefmt(sensor, format, crop);
+ if (ret)
+ goto err_rpm_put;
+
+ /* Setup default GPIO values; could be overridden by V4L2 ctrl setup */
+ ret = vd55g1_update_gpios(sensor, GENMASK(VD55G1_NB_GPIOS - 1, 0));
+ if (ret)
+ goto err_rpm_put;
+
+ ret = vd55g1_apply_cold_start(sensor, crop);
+ if (ret)
+ goto err_rpm_put;
+
+ /* Apply settings from V4L2 ctrls */
+ ret = __v4l2_ctrl_handler_setup(&sensor->ctrl_handler);
+ if (ret)
+ goto err_rpm_put;
+
+ /* Also apply settings from read-only V4L2 ctrls */
+ ret = vd55g1_ro_ctrls_setup(sensor, crop);
+ if (ret)
+ goto err_rpm_put;
+
+ /* Start streaming */
+ vd55g1_write(sensor, VD55G1_REG_STBY, VD55G1_STBY_START_STREAM, &ret);
+ vd55g1_poll_reg(sensor, VD55G1_REG_STBY, 0, &ret);
+ vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_STREAMING, &ret);
+ if (ret)
+ goto err_rpm_put;
+
+ vd55g1_grab_ctrls(sensor, true);
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put(sensor->dev);
+ return 0;
+}
+
+static int vd55g1_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ int ret = 0;
+
+ /* Retrieve Expo cluster to enable coldstart of AE */
+ ret = vd55g1_read_expo_cluster(sensor);
+
+ vd55g1_write(sensor, VD55G1_REG_STREAMING, VD55G1_STREAMING_STOP_STREAM,
+ &ret);
+ vd55g1_poll_reg(sensor, VD55G1_REG_STREAMING, 0, &ret);
+ vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_SW_STBY, &ret);
+
+ if (ret)
+ dev_warn(sensor->dev, "Can't disable stream\n");
+
+ vd55g1_grab_ctrls(sensor, false);
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static int vd55g1_patch(struct vd55g1 *sensor)
+{
+ u64 patch;
+ int ret = 0;
+
+ vd55g1_write_array(sensor, VD55G1_REG_FWPATCH_START_ADDR,
+ sizeof(patch_array), patch_array, &ret);
+ vd55g1_write(sensor, VD55G1_REG_BOOT, VD55G1_BOOT_PATCH_SETUP, &ret);
+ vd55g1_poll_reg(sensor, VD55G1_REG_BOOT, 0, &ret);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to apply patch\n");
+ return ret;
+ }
+
+ vd55g1_read(sensor, VD55G1_REG_FWPATCH_REVISION, &patch, &ret);
+ if (patch != (VD55G1_FWPATCH_REVISION_MAJOR << 8) +
+ VD55G1_FWPATCH_REVISION_MINOR) {
+ dev_err(sensor->dev, "Bad patch version expected %d.%d got %d.%d\n",
+ VD55G1_FWPATCH_REVISION_MAJOR,
+ VD55G1_FWPATCH_REVISION_MINOR,
+ (u8)(patch >> 8), (u8)(patch & 0xff));
+ return -ENODEV;
+ }
+ dev_dbg(sensor->dev, "patch %d.%d applied\n",
+ (u8)(patch >> 8), (u8)(patch & 0xff));
+
+ return 0;
+}
+
+static int vd55g1_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ const struct v4l2_rect *crop = v4l2_subdev_state_get_crop(sd_state, 0);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *crop;
+ return 0;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = VD55G1_WIDTH;
+ sel->r.height = VD55G1_HEIGHT;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vd55g1_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(vd55g1_mbus_codes))
+ return -EINVAL;
+
+ code->code = vd55g1_mbus_codes[code->index].code;
+
+ return 0;
+}
+
+static int vd55g1_new_format_change_controls(struct vd55g1 *sensor,
+ struct v4l2_mbus_framefmt *format,
+ struct v4l2_rect *crop)
+{
+ struct vd55g1_vblank_limits vblank;
+ unsigned int hblank;
+ unsigned int frame_length = 0;
+ unsigned int expo_max;
+ int ret;
+
+ /* Reset vblank and frame length to default */
+ vd55g1_get_vblank_limits(sensor, crop, &vblank);
+ ret = __v4l2_ctrl_modify_range(sensor->vblank_ctrl, vblank.min,
+ vblank.max, 1, vblank.def);
+ if (ret)
+ return ret;
+
+ /* Max exposure changes with vblank */
+ frame_length = crop->height + sensor->vblank_ctrl->val;
+ expo_max = frame_length - VD55G1_EXPO_MAX_TERM;
+ ret = __v4l2_ctrl_modify_range(sensor->expo_ctrl, 0, expo_max, 1,
+ VD55G1_EXPO_DEF);
+ if (ret)
+ return ret;
+
+ /* Update pixel rate to reflect new bpp */
+ ret = __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_ctrl,
+ vd55g1_get_pixel_rate(sensor, format));
+ if (ret)
+ return ret;
+
+ /* Update hblank according to new width */
+ hblank = vd55g1_get_hblank_min(sensor, format, crop);
+ ret = __v4l2_ctrl_modify_range(sensor->hblank_ctrl, hblank, hblank, 1,
+ hblank);
+
+ return ret;
+}
+
+static int vd55g1_set_pad_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sd_fmt)
+{
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ const struct vd55g1_mode *new_mode;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect pad_crop;
+ unsigned int binning;
+
+ new_mode = v4l2_find_nearest_size(vd55g1_supported_modes,
+ ARRAY_SIZE(vd55g1_supported_modes),
+ width, height, sd_fmt->format.width,
+ sd_fmt->format.height);
+
+ vd55g1_update_img_pad_format(sensor, new_mode, sd_fmt->format.code,
+ &sd_fmt->format);
+
+ /*
+ * Use binning to maximize the crop rectangle size, and centre it in the
+ * sensor.
+ */
+ binning = min(VD55G1_WIDTH / sd_fmt->format.width,
+ VD55G1_HEIGHT / sd_fmt->format.height);
+ binning = min(binning, 2U);
+ pad_crop.width = sd_fmt->format.width * binning;
+ pad_crop.height = sd_fmt->format.height * binning;
+ pad_crop.left = (VD55G1_WIDTH - pad_crop.width) / 2;
+ pad_crop.top = (VD55G1_HEIGHT - pad_crop.height) / 2;
+
+ format = v4l2_subdev_state_get_format(sd_state, sd_fmt->pad);
+
+ *format = sd_fmt->format;
+
+ *v4l2_subdev_state_get_crop(sd_state, sd_fmt->pad) = pad_crop;
+ if (sd_fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return vd55g1_new_format_change_controls(sensor,
+ &sd_fmt->format,
+ &pad_crop);
+
+ return 0;
+}
+
+static int vd55g1_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int def_mode = VD55G1_DEFAULT_MODE;
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ struct v4l2_subdev_format fmt = { 0 };
+ struct v4l2_subdev_route routes[] = {
+ { .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE }
+ };
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+ int ret;
+
+ /* Needed by v4l2_subdev_s_stream_helper(), even with 1 stream only */
+ ret = v4l2_subdev_set_routing(sd, sd_state, &routing);
+ if (ret)
+ return ret;
+
+ vd55g1_update_img_pad_format(sensor, &vd55g1_supported_modes[def_mode],
+ VD55G1_MEDIA_BUS_FMT_DEF, &fmt.format);
+
+ return vd55g1_set_pad_fmt(sd, sd_state, &fmt);
+}
+
+static int vd55g1_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(vd55g1_supported_modes))
+ return -EINVAL;
+
+ fse->min_width = vd55g1_supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = vd55g1_supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops vd55g1_internal_ops = {
+ .init_state = vd55g1_init_state,
+};
+
+static const struct v4l2_subdev_pad_ops vd55g1_pad_ops = {
+ .enum_mbus_code = vd55g1_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = vd55g1_set_pad_fmt,
+ .get_selection = vd55g1_get_selection,
+ .enum_frame_size = vd55g1_enum_frame_size,
+ .enable_streams = vd55g1_enable_streams,
+ .disable_streams = vd55g1_disable_streams,
+};
+
+static const struct v4l2_subdev_video_ops vd55g1_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_ops vd55g1_subdev_ops = {
+ .video = &vd55g1_video_ops,
+ .pad = &vd55g1_pad_ops,
+};
+
+static int vd55g1_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vd55g1 *sensor = ctrl_to_vd55g1(ctrl);
+ int ret = 0;
+
+ /* Interact with HW only when it is powered ON */
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = vd55g1_read_expo_cluster(sensor);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static int vd55g1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vd55g1 *sensor = ctrl_to_vd55g1(ctrl);
+ unsigned int frame_length = 0;
+ unsigned int expo_max;
+ struct v4l2_subdev_state *state =
+ v4l2_subdev_get_locked_active_state(&sensor->sd);
+ struct v4l2_rect *crop =
+ v4l2_subdev_state_get_crop(state, 0);
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ unsigned int hblank = vd55g1_get_hblank_min(sensor, format, crop);
+ bool is_auto = false;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return 0;
+
+ /* Update controls state, range, etc. whatever the state of the HW */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ frame_length = crop->height + ctrl->val;
+ expo_max = frame_length - VD55G1_EXPO_MAX_TERM;
+ ret = __v4l2_ctrl_modify_range(sensor->expo_ctrl, 0, expo_max,
+ 1, VD55G1_EXPO_DEF);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ is_auto = (ctrl->val == V4L2_EXPOSURE_AUTO);
+ __v4l2_ctrl_grab(sensor->ae_lock_ctrl, !is_auto);
+ __v4l2_ctrl_grab(sensor->ae_bias_ctrl, !is_auto);
+ break;
+ case V4L2_CID_HDR_SENSOR_MODE:
+ /* Discriminate if the userspace changed the control value */
+ if (ctrl->val != ctrl->cur.val) {
+ /* Max horizontal blanking changes with hdr mode */
+ ret = __v4l2_ctrl_modify_range(sensor->hblank_ctrl,
+ hblank, hblank, 1,
+ hblank);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Don't modify hardware if controls modification failed */
+ if (ret)
+ return ret;
+
+ /* Interact with HW only when it is powered ON */
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ret = vd55g1_write(sensor, VD55G1_REG_ORIENTATION,
+ sensor->hflip_ctrl->val |
+ (sensor->vflip_ctrl->val << 1),
+ NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = vd55g1_update_patgen(sensor, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = vd55g1_update_expo_cluster(sensor, is_auto);
+ break;
+ case V4L2_CID_3A_LOCK:
+ ret = vd55g1_lock_exposure(sensor, ctrl->val);
+ break;
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ /*
+ * We use auto exposure target percentage register to control
+ * exposure bias for more precision.
+ */
+ ret = vd55g1_update_exposure_target(sensor, ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = vd55g1_update_frame_length(sensor, frame_length);
+ break;
+ case V4L2_CID_FLASH_LED_MODE:
+ ret = vd55g1_update_gpios(sensor, sensor->ext_leds_mask);
+ break;
+ case V4L2_CID_HDR_SENSOR_MODE:
+ ret = vd55g1_update_hdr_mode(sensor);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops vd55g1_ctrl_ops = {
+ .g_volatile_ctrl = vd55g1_g_volatile_ctrl,
+ .s_ctrl = vd55g1_s_ctrl,
+};
+
+static int vd55g1_init_ctrls(struct vd55g1 *sensor)
+{
+ const struct v4l2_ctrl_ops *ops = &vd55g1_ctrl_ops;
+ struct v4l2_ctrl_handler *hdl = &sensor->ctrl_handler;
+ struct v4l2_ctrl *ctrl;
+ struct v4l2_fwnode_device_properties fwnode_props;
+ struct vd55g1_vblank_limits vblank;
+ unsigned int hblank;
+ struct v4l2_subdev_state *state =
+ v4l2_subdev_lock_and_get_active_state(&sensor->sd);
+ struct v4l2_rect *crop =
+ v4l2_subdev_state_get_crop(state, 0);
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ s32 pixel_rate = vd55g1_get_pixel_rate(sensor, format);
+ int ret;
+
+ v4l2_ctrl_handler_init(hdl, 16);
+
+ /* Flip cluster */
+ sensor->hflip_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ sensor->vflip_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_cluster(2, &sensor->hflip_ctrl);
+
+ /* Exposition cluster */
+ sensor->ae_ctrl = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO, 1,
+ ~0x3, V4L2_EXPOSURE_AUTO);
+ sensor->again_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
+ 0, 0x1c, 1, VD55G1_AGAIN_DEF);
+ sensor->dgain_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_DIGITAL_GAIN,
+ 256, 0xffff, 1,
+ VD55G1_DGAIN_DEF);
+ sensor->expo_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE, 0,
+ VD55G1_FRAME_LENGTH_DEF -
+ VD55G1_EXPO_MAX_TERM,
+ 1, VD55G1_EXPO_DEF);
+ v4l2_ctrl_auto_cluster(4, &sensor->ae_ctrl, V4L2_EXPOSURE_MANUAL, true);
+
+ sensor->patgen_ctrl =
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(vd55g1_tp_menu) - 1, 0,
+ 0, vd55g1_tp_menu);
+ ctrl = v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_LINK_FREQ,
+ 0, 0, &sensor->link_freq);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->pixel_rate_ctrl = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_PIXEL_RATE, 1,
+ INT_MAX, 1,
+ pixel_rate);
+ if (sensor->pixel_rate_ctrl)
+ sensor->pixel_rate_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->ae_lock_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_3A_LOCK,
+ 0, 1, 0, 0);
+ sensor->ae_bias_ctrl =
+ v4l2_ctrl_new_int_menu(hdl, ops,
+ V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(vd55g1_ev_bias_menu) - 1,
+ ARRAY_SIZE(vd55g1_ev_bias_menu) / 2,
+ vd55g1_ev_bias_menu);
+ sensor->hdr_ctrl =
+ v4l2_ctrl_new_std_menu_items(hdl, ops,
+ V4L2_CID_HDR_SENSOR_MODE,
+ ARRAY_SIZE(vd55g1_hdr_menu) - 1, 0,
+ VD55G1_NO_HDR, vd55g1_hdr_menu);
+ hblank = vd55g1_get_hblank_min(sensor, format, crop);
+ sensor->hblank_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK,
+ hblank, hblank, 1, hblank);
+ if (sensor->hblank_ctrl)
+ sensor->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ vd55g1_get_vblank_limits(sensor, crop, &vblank);
+ sensor->vblank_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK,
+ vblank.min, vblank.max,
+ 1, vblank.def);
+
+ /* Additional controls based on device tree properties */
+ if (sensor->ext_leds_mask) {
+ sensor->led_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_FLASH_LED_MODE,
+ V4L2_FLASH_LED_MODE_FLASH, 0,
+ V4L2_FLASH_LED_MODE_NONE);
+ }
+
+ ret = v4l2_fwnode_device_parse(sensor->dev, &fwnode_props);
+ if (ret)
+ goto free_ctrls;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, ops, &fwnode_props);
+ if (ret)
+ goto free_ctrls;
+
+ sensor->sd.ctrl_handler = hdl;
+ goto unlock_state;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(hdl);
+unlock_state:
+ v4l2_subdev_unlock_state(state);
+ return ret;
+}
+
+static int vd55g1_detect(struct vd55g1 *sensor)
+{
+ u64 device_rev;
+ u64 id;
+ int ret;
+
+ ret = vd55g1_read(sensor, VD55G1_REG_MODEL_ID, &id, NULL);
+ if (ret)
+ return ret;
+
+ if (id != VD55G1_MODEL_ID) {
+ dev_warn(sensor->dev, "Unsupported sensor id %x\n", (u32)id);
+ return -ENODEV;
+ }
+
+ ret = vd55g1_read(sensor, VD55G1_REG_REVISION, &device_rev, NULL);
+ if (ret)
+ return ret;
+
+ if (device_rev != VD55G1_REVISION_CCB) {
+ dev_err(sensor->dev, "Unsupported sensor revision (0x%x)\n",
+ (u16)device_rev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int vd55g1_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(vd55g1_supply_name),
+ sensor->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulators %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sensor->xclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock %d\n", ret);
+ goto disable_bulk;
+ }
+
+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ usleep_range(5000, 10000);
+ ret = vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_READY_TO_BOOT, NULL);
+ if (ret) {
+ dev_err(dev, "Sensor reset failed %d\n", ret);
+ goto disable_clock;
+ }
+
+ ret = vd55g1_detect(sensor);
+ if (ret) {
+ dev_err(dev, "Sensor detect failed %d\n", ret);
+ goto disable_clock;
+ }
+
+ ret = vd55g1_patch(sensor);
+ if (ret) {
+ dev_err(dev, "Sensor patch failed %d\n", ret);
+ goto disable_clock;
+ }
+
+ ret = vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_SW_STBY, NULL);
+ if (ret) {
+ dev_err(dev, "Sensor waiting after patch failed %d\n",
+ ret);
+ goto disable_clock;
+ }
+
+ return 0;
+
+disable_clock:
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ clk_disable_unprepare(sensor->xclk);
+disable_bulk:
+ regulator_bulk_disable(ARRAY_SIZE(vd55g1_supply_name),
+ sensor->supplies);
+
+ return ret;
+}
+
+static int vd55g1_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct vd55g1 *sensor = to_vd55g1(sd);
+
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ clk_disable_unprepare(sensor->xclk);
+ regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+
+ return 0;
+}
+
+static int vd55g1_check_csi_conf(struct vd55g1 *sensor,
+ struct fwnode_handle *endpoint)
+{
+ struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
+ u8 n_lanes;
+ int ret;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep);
+ if (ret)
+ return -EINVAL;
+
+ /* Check lanes number */
+ n_lanes = ep.bus.mipi_csi2.num_data_lanes;
+ if (n_lanes != 1) {
+ dev_err(sensor->dev, "Sensor only supports 1 lane, found %d\n",
+ n_lanes);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Clock lane must be first */
+ if (ep.bus.mipi_csi2.clock_lane != 0) {
+ dev_err(sensor->dev, "Clock lane must be mapped to lane 0\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Handle polarities in sensor configuration */
+ sensor->oif_ctrl = (ep.bus.mipi_csi2.lane_polarities[0] << 3) |
+ (ep.bus.mipi_csi2.lane_polarities[1] << 6);
+
+ /* Check the link frequency set in device tree */
+ if (!ep.nr_of_link_frequencies) {
+ dev_err(sensor->dev, "link-frequency property not found in DT\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ if (ep.nr_of_link_frequencies != 1) {
+ dev_err(sensor->dev, "Multiple link frequencies not supported\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ sensor->link_freq = ep.link_frequencies[0];
+
+done:
+ v4l2_fwnode_endpoint_free(&ep);
+
+ return ret;
+}
+
+static int vd55g1_parse_dt_gpios_array(struct vd55g1 *sensor,
+ char *prop_name, u32 *array, int *nb)
+{
+ unsigned int i;
+ int ret;
+
+ *nb = device_property_count_u32(sensor->dev, prop_name);
+ if (*nb == -EINVAL) {
+ /* Property not found */
+ *nb = 0;
+ return 0;
+ }
+
+ ret = device_property_read_u32_array(sensor->dev,
+ prop_name, array, *nb);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to read %s prop\n", prop_name);
+ return ret;
+ }
+ for (i = 0; i < *nb; i++) {
+ if (array[i] >= VD55G1_NB_GPIOS) {
+ dev_err(sensor->dev, "Invalid GPIO number %d\n",
+ array[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int vd55g1_parse_dt_gpios(struct vd55g1 *sensor)
+{
+ u32 led_gpios[VD55G1_NB_GPIOS];
+ int nb_gpios_leds;
+ unsigned int i;
+ int ret;
+
+ /* Initialize GPIOs to default */
+ for (i = 0; i < VD55G1_NB_GPIOS; i++)
+ sensor->gpios[i] = VD55G1_GPIO_MODE_IN;
+ sensor->ext_leds_mask = 0;
+
+ /* Take into account optional 'st,leds' output for GPIOs */
+ ret = vd55g1_parse_dt_gpios_array(sensor, "st,leds", led_gpios,
+ &nb_gpios_leds);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nb_gpios_leds; i++) {
+ sensor->gpios[led_gpios[i]] = VD55G1_GPIO_MODE_STROBE;
+ set_bit(led_gpios[i], &sensor->ext_leds_mask);
+ }
+
+ return 0;
+}
+
+static int vd55g1_parse_dt(struct vd55g1 *sensor)
+{
+ struct fwnode_handle *endpoint;
+ int ret;
+
+ endpoint = fwnode_graph_get_endpoint_by_id(dev_fwnode(sensor->dev),
+ 0, 0, 0);
+ if (!endpoint) {
+ dev_err(sensor->dev, "Endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = vd55g1_check_csi_conf(sensor, endpoint);
+ fwnode_handle_put(endpoint);
+ if (ret)
+ return ret;
+
+ return vd55g1_parse_dt_gpios(sensor);
+}
+
+static int vd55g1_subdev_init(struct vd55g1 *sensor)
+{
+ int ret;
+
+ /* Init sub device */
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->sd.internal_ops = &vd55g1_internal_ops;
+
+ /* Init source pad */
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to init media entity: %d\n", ret);
+ return ret;
+ }
+
+ sensor->sd.state_lock = sensor->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&sensor->sd);
+ if (ret) {
+ dev_err(sensor->dev, "Subdev init error: %d\n", ret);
+ goto err_ctrls;
+ }
+
+ /*
+ * Initialize controls after v4l2_subdev_init_finalize() to make sure
+ * active state is set
+ */
+ ret = vd55g1_init_ctrls(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "Controls initialization failed %d\n",
+ ret);
+ goto err_media;
+ }
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(sensor->sd.ctrl_handler);
+
+err_media:
+ media_entity_cleanup(&sensor->sd.entity);
+ return ret;
+}
+
+static void vd55g1_subdev_cleanup(struct vd55g1 *sensor)
+{
+ v4l2_async_unregister_subdev(&sensor->sd);
+ v4l2_subdev_cleanup(&sensor->sd);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(sensor->sd.ctrl_handler);
+}
+
+static int vd55g1_get_regulators(struct vd55g1 *sensor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vd55g1_supply_name); i++)
+ sensor->supplies[i].supply = vd55g1_supply_name[i];
+
+ return devm_regulator_bulk_get(sensor->dev,
+ ARRAY_SIZE(vd55g1_supply_name),
+ sensor->supplies);
+}
+
+static int vd55g1_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct vd55g1 *sensor;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+ sensor->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&sensor->sd, client, &vd55g1_subdev_ops);
+
+ ret = vd55g1_parse_dt(sensor);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to parse Device Tree\n");
+
+ /* Get (and check) resources : power regs, ext clock, reset gpio */
+ ret = vd55g1_get_regulators(sensor);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ sensor->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "Failed to get xclk\n");
+
+ sensor->xclk_freq = clk_get_rate(sensor->xclk);
+ ret = vd55g1_prepare_clock_tree(sensor);
+ if (ret)
+ return ret;
+
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(sensor->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(sensor->regmap))
+ return dev_err_probe(dev, PTR_ERR(sensor->regmap),
+ "Failed to init regmap\n");
+
+ /* Detect if sensor is present and if its revision is supported */
+ ret = vd55g1_power_on(dev);
+ if (ret)
+ return ret;
+
+ /* Enable pm_runtime and power off the sensor */
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 4000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ ret = vd55g1_subdev_init(sensor);
+ if (ret) {
+ dev_err(dev, "V4l2 init failed: %d\n", ret);
+ goto err_power_off;
+ }
+
+ ret = v4l2_async_register_subdev(&sensor->sd);
+ if (ret) {
+ dev_err(dev, "async subdev register failed %d\n", ret);
+ goto err_subdev;
+ }
+
+ return 0;
+
+err_subdev:
+ vd55g1_subdev_cleanup(sensor);
+err_power_off:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ vd55g1_power_off(dev);
+
+ return ret;
+}
+
+static void vd55g1_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct vd55g1 *sensor = to_vd55g1(sd);
+
+ vd55g1_subdev_cleanup(sensor);
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ vd55g1_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_dont_use_autosuspend(&client->dev);
+}
+
+static const struct of_device_id vd55g1_dt_ids[] = {
+ { .compatible = "st,vd55g1" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vd55g1_dt_ids);
+
+static const struct dev_pm_ops vd55g1_pm_ops = {
+ SET_RUNTIME_PM_OPS(vd55g1_power_off, vd55g1_power_on, NULL)
+};
+
+static struct i2c_driver vd55g1_i2c_driver = {
+ .driver = {
+ .name = "vd55g1",
+ .of_match_table = vd55g1_dt_ids,
+ .pm = &vd55g1_pm_ops,
+ },
+ .probe = vd55g1_probe,
+ .remove = vd55g1_remove,
+};
+
+module_i2c_driver(vd55g1_i2c_driver);
+
+MODULE_AUTHOR("Benjamin Mugnier <benjamin.mugnier@foss.st.com>");
+MODULE_AUTHOR("Sylvain Petinot <sylvain.petinot@foss.st.com>");
+MODULE_DESCRIPTION("VD55G1 camera subdev driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/vd56g3.c b/drivers/media/i2c/vd56g3.c
new file mode 100644
index 000000000000..5d951ad0b478
--- /dev/null
+++ b/drivers/media/i2c/vd56g3.c
@@ -0,0 +1,1586 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A V4L2 driver for ST VD56G3 (Mono) and VD66GY (RGB) global shutter cameras.
+ * Copyright (C) 2024, STMicroelectronics SA
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+/* Register Map */
+#define VD56G3_REG_MODEL_ID CCI_REG16_LE(0x0000)
+#define VD56G3_MODEL_ID 0x5603
+#define VD56G3_REG_REVISION CCI_REG16_LE(0x0002)
+#define VD56G3_REVISION_CUT3 0x31
+#define VD56G3_REG_OPTICAL_REVISION CCI_REG8(0x001a)
+#define VD56G3_OPTICAL_REVISION_MONO 0
+#define VD56G3_OPTICAL_REVISION_BAYER 1
+#define VD56G3_REG_SYSTEM_FSM CCI_REG8(0x0028)
+#define VD56G3_SYSTEM_FSM_READY_TO_BOOT 0x01
+#define VD56G3_SYSTEM_FSM_SW_STBY 0x02
+#define VD56G3_SYSTEM_FSM_STREAMING 0x03
+#define VD56G3_REG_APPLIED_COARSE_EXPOSURE CCI_REG16_LE(0x0064)
+#define VD56G3_REG_APPLIED_ANALOG_GAIN CCI_REG8(0x0068)
+#define VD56G3_REG_APPLIED_DIGITAL_GAIN CCI_REG16_LE(0x006a)
+#define VD56G3_REG_BOOT CCI_REG8(0x0200)
+#define VD56G3_CMD_ACK 0
+#define VD56G3_CMD_BOOT 1
+#define VD56G3_REG_STBY CCI_REG8(0x0201)
+#define VD56G3_CMD_START_STREAM 1
+#define VD56G3_REG_STREAMING CCI_REG8(0x0202)
+#define VD56G3_CMD_STOP_STREAM 1
+#define VD56G3_REG_EXT_CLOCK CCI_REG32_LE(0x0220)
+#define VD56G3_REG_CLK_PLL_PREDIV CCI_REG8(0x0224)
+#define VD56G3_REG_CLK_SYS_PLL_MULT CCI_REG8(0x0226)
+#define VD56G3_REG_ORIENTATION CCI_REG8(0x0302)
+#define VD56G3_REG_FORMAT_CTRL CCI_REG8(0x030a)
+#define VD56G3_REG_OIF_CTRL CCI_REG16_LE(0x030c)
+#define VD56G3_REG_OIF_IMG_CTRL CCI_REG8(0x030f)
+#define VD56G3_REG_OIF_CSI_BITRATE CCI_REG16_LE(0x0312)
+#define VD56G3_REG_DUSTER_CTRL CCI_REG8(0x0318)
+#define VD56G3_DUSTER_DISABLE 0
+#define VD56G3_DUSTER_ENABLE_DEF_MODULES 0x13
+#define VD56G3_REG_ISL_ENABLE CCI_REG8(0x0333)
+#define VD56G3_REG_DARKCAL_CTRL CCI_REG8(0x0340)
+#define VD56G3_DARKCAL_ENABLE 1
+#define VD56G3_DARKCAL_DISABLE_DARKAVG 2
+#define VD56G3_REG_PATGEN_CTRL CCI_REG16_LE(0x0400)
+#define VD56G3_PATGEN_ENABLE 1
+#define VD56G3_PATGEN_TYPE_SHIFT 4
+#define VD56G3_REG_AE_COLDSTART_COARSE_EXPOSURE CCI_REG16_LE(0x042a)
+#define VD56G3_REG_AE_COLDSTART_ANALOG_GAIN CCI_REG8(0x042c)
+#define VD56G3_REG_AE_COLDSTART_DIGITAL_GAIN CCI_REG16_LE(0x042e)
+#define VD56G3_REG_AE_ROI_START_H CCI_REG16_LE(0x0432)
+#define VD56G3_REG_AE_ROI_START_V CCI_REG16_LE(0x0434)
+#define VD56G3_REG_AE_ROI_END_H CCI_REG16_LE(0x0436)
+#define VD56G3_REG_AE_ROI_END_V CCI_REG16_LE(0x0438)
+#define VD56G3_REG_AE_COMPENSATION CCI_REG16_LE(0x043a)
+#define VD56G3_REG_EXP_MODE CCI_REG8(0x044c)
+#define VD56G3_EXP_MODE_AUTO 0
+#define VD56G3_EXP_MODE_FREEZE 1
+#define VD56G3_EXP_MODE_MANUAL 2
+#define VD56G3_REG_MANUAL_ANALOG_GAIN CCI_REG8(0x044d)
+#define VD56G3_REG_MANUAL_COARSE_EXPOSURE CCI_REG16_LE(0x044e)
+#define VD56G3_REG_MANUAL_DIGITAL_GAIN_CH0 CCI_REG16_LE(0x0450)
+#define VD56G3_REG_MANUAL_DIGITAL_GAIN_CH1 CCI_REG16_LE(0x0452)
+#define VD56G3_REG_MANUAL_DIGITAL_GAIN_CH2 CCI_REG16_LE(0x0454)
+#define VD56G3_REG_MANUAL_DIGITAL_GAIN_CH3 CCI_REG16_LE(0x0456)
+#define VD56G3_REG_FRAME_LENGTH CCI_REG16_LE(0x0458)
+#define VD56G3_REG_Y_START CCI_REG16_LE(0x045a)
+#define VD56G3_REG_Y_END CCI_REG16_LE(0x045c)
+#define VD56G3_REG_OUT_ROI_X_START CCI_REG16_LE(0x045e)
+#define VD56G3_REG_OUT_ROI_X_END CCI_REG16_LE(0x0460)
+#define VD56G3_REG_OUT_ROI_Y_START CCI_REG16_LE(0x0462)
+#define VD56G3_REG_OUT_ROI_Y_END CCI_REG16_LE(0x0464)
+#define VD56G3_REG_GPIO_0_CTRL CCI_REG8(0x0467)
+#define VD56G3_GPIOX_GPIO_IN 0x01
+#define VD56G3_GPIOX_STROBE_MODE 0x02
+#define VD56G3_REG_READOUT_CTRL CCI_REG8(0x047e)
+#define READOUT_NORMAL 0x00
+#define READOUT_DIGITAL_BINNING_X2 0x01
+
+/* The VD56G3 is a portrait image sensor with native resolution of 1124x1364. */
+#define VD56G3_NATIVE_WIDTH 1124
+#define VD56G3_NATIVE_HEIGHT 1364
+#define VD56G3_DEFAULT_MODE 0
+
+/* PLL settings */
+#define VD56G3_TARGET_PLL 804000000UL
+#define VD56G3_VT_CLOCK_DIV 5
+
+/* External clock must be in [6Mhz-27Mhz] */
+#define VD56G3_XCLK_FREQ_MIN (6 * HZ_PER_MHZ)
+#define VD56G3_XCLK_FREQ_MAX (27 * HZ_PER_MHZ)
+
+/* Line length and Frame length (settings are for standard 10bits ADC mode) */
+#define VD56G3_LINE_LENGTH_MIN 1236
+#define VD56G3_VBLANK_MIN 110
+#define VD56G3_FRAME_LENGTH_DEF_60FPS 2168
+#define VD56G3_FRAME_LENGTH_MAX 0xffff
+
+/* Exposure settings */
+#define VD56G3_EXPOSURE_MARGIN 75
+#define VD56G3_EXPOSURE_MIN 5
+#define VD56G3_EXPOSURE_DEFAULT 1420
+
+/* Output Interface settings */
+#define VD56G3_MAX_CSI_DATA_LANES 2
+#define VD56G3_LINK_FREQ_DEF_1LANE 750000000UL
+#define VD56G3_LINK_FREQ_DEF_2LANES 402000000UL
+
+/* GPIOs */
+#define VD56G3_NB_GPIOS 8
+
+/* regulator supplies */
+static const char *const vd56g3_supply_names[] = {
+ "vcore",
+ "vddio",
+ "vana",
+};
+
+/* -----------------------------------------------------------------------------
+ * Models (VD56G3: Mono, VD66GY: Bayer RGB), Modes and formats
+ */
+
+enum vd56g3_models {
+ VD56G3_MODEL_VD56G3,
+ VD56G3_MODEL_VD66GY,
+};
+
+struct vd56g3_mode {
+ u32 width;
+ u32 height;
+};
+
+static const struct vd56g3_mode vd56g3_supported_modes[] = {
+ {
+ .width = VD56G3_NATIVE_WIDTH,
+ .height = VD56G3_NATIVE_HEIGHT,
+ },
+ {
+ .width = 1120,
+ .height = 1360,
+ },
+ {
+ .width = 1024,
+ .height = 1280,
+ },
+ {
+ .width = 1024,
+ .height = 768,
+ },
+ {
+ .width = 768,
+ .height = 1024,
+ },
+ {
+ .width = 720,
+ .height = 1280,
+ },
+ {
+ .width = 640,
+ .height = 480,
+ },
+ {
+ .width = 480,
+ .height = 640,
+ },
+ {
+ .width = 320,
+ .height = 240,
+ },
+};
+
+/*
+ * Sensor support 8bits and 10bits output in both variants
+ * - Monochrome
+ * - RGB (with all H/V flip variations)
+ */
+static const unsigned int vd56g3_mbus_codes[2][5] = {
+ {
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ },
+ {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ },
+};
+
+struct vd56g3 {
+ struct device *dev;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(vd56g3_supply_names)];
+ struct gpio_desc *reset_gpio;
+ struct clk *xclk;
+ struct regmap *regmap;
+ u32 xclk_freq;
+ u32 pll_prediv;
+ u32 pll_mult;
+ u32 pixel_clock;
+ u16 oif_ctrl;
+ u8 nb_of_lane;
+ u32 gpios[VD56G3_NB_GPIOS];
+ unsigned long ext_leds_mask;
+ bool is_mono;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *hblank_ctrl;
+ struct v4l2_ctrl *vblank_ctrl;
+ struct {
+ struct v4l2_ctrl *hflip_ctrl;
+ struct v4l2_ctrl *vflip_ctrl;
+ };
+ struct v4l2_ctrl *patgen_ctrl;
+ struct {
+ struct v4l2_ctrl *ae_ctrl;
+ struct v4l2_ctrl *expo_ctrl;
+ struct v4l2_ctrl *again_ctrl;
+ struct v4l2_ctrl *dgain_ctrl;
+ };
+ struct v4l2_ctrl *ae_lock_ctrl;
+ struct v4l2_ctrl *ae_bias_ctrl;
+ struct v4l2_ctrl *led_ctrl;
+};
+
+static inline struct vd56g3 *to_vd56g3(struct v4l2_subdev *sd)
+{
+ return container_of_const(sd, struct vd56g3, sd);
+}
+
+static inline struct vd56g3 *ctrl_to_vd56g3(struct v4l2_ctrl *ctrl)
+{
+ return container_of_const(ctrl->handler, struct vd56g3, ctrl_handler);
+}
+
+/* -----------------------------------------------------------------------------
+ * Additional i2c register helpers
+ */
+
+static int vd56g3_poll_reg(struct vd56g3 *sensor, u32 reg, u8 poll_val,
+ int *err)
+{
+ unsigned int val = 0;
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ /*
+ * Timeout must be higher than longuest frame duration. With current
+ * blanking constraints, frame duration can take up to 504ms.
+ */
+ ret = regmap_read_poll_timeout(sensor->regmap, CCI_REG_ADDR(reg), val,
+ (val == poll_val), 2000,
+ 600 * USEC_PER_MSEC);
+
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int vd56g3_wait_state(struct vd56g3 *sensor, int state, int *err)
+{
+ return vd56g3_poll_reg(sensor, VD56G3_REG_SYSTEM_FSM, state, err);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls: definitions, helpers and handlers
+ */
+
+static const char *const vd56g3_tp_menu[] = { "Disabled",
+ "Solid Color",
+ "Vertical Color Bars",
+ "Horizontal Gray Scale",
+ "Vertical Gray Scale",
+ "Diagonal Gray Scale",
+ "Pseudo Random" };
+
+static const s64 vd56g3_ev_bias_qmenu[] = { -4000, -3500, -3000, -2500, -2000,
+ -1500, -1000, -500, 0, 500,
+ 1000, 1500, 2000, 2500, 3000,
+ 3500, 4000 };
+
+static const s64 vd56g3_link_freq_1lane[] = { VD56G3_LINK_FREQ_DEF_1LANE };
+
+static const s64 vd56g3_link_freq_2lanes[] = { VD56G3_LINK_FREQ_DEF_2LANES };
+
+static u8 vd56g3_get_bpp(__u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ default:
+ return 8;
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ return 10;
+ }
+}
+
+static u8 vd56g3_get_datatype(__u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ default:
+ return MIPI_CSI2_DT_RAW8;
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ return MIPI_CSI2_DT_RAW10;
+ }
+}
+
+static int vd56g3_read_expo_cluster(struct vd56g3 *sensor, bool force_cur_val)
+{
+ u64 exposure;
+ u64 again;
+ u64 dgain;
+ int ret = 0;
+
+ /*
+ * When 'force_cur_val' is enabled, save the ctrl value in 'cur.val'
+ * instead of the normal 'val', this is used during poweroff to cache
+ * volatile ctrls and enable coldstart.
+ */
+ cci_read(sensor->regmap, VD56G3_REG_APPLIED_COARSE_EXPOSURE, &exposure,
+ &ret);
+ cci_read(sensor->regmap, VD56G3_REG_APPLIED_ANALOG_GAIN, &again, &ret);
+ cci_read(sensor->regmap, VD56G3_REG_APPLIED_DIGITAL_GAIN, &dgain, &ret);
+ if (ret)
+ return ret;
+
+ if (force_cur_val) {
+ sensor->expo_ctrl->cur.val = exposure;
+ sensor->again_ctrl->cur.val = again;
+ sensor->dgain_ctrl->cur.val = dgain;
+ } else {
+ sensor->expo_ctrl->val = exposure;
+ sensor->again_ctrl->val = again;
+ sensor->dgain_ctrl->val = dgain;
+ }
+
+ return ret;
+}
+
+static int vd56g3_update_patgen(struct vd56g3 *sensor, u32 patgen_index)
+{
+ u32 pattern = patgen_index <= 2 ? patgen_index : patgen_index + 13;
+ u16 patgen = pattern << VD56G3_PATGEN_TYPE_SHIFT;
+ u8 duster = VD56G3_DUSTER_ENABLE_DEF_MODULES;
+ u8 darkcal = VD56G3_DARKCAL_ENABLE;
+ int ret = 0;
+
+ if (patgen_index) {
+ patgen |= VD56G3_PATGEN_ENABLE;
+ duster = VD56G3_DUSTER_DISABLE;
+ darkcal = VD56G3_DARKCAL_DISABLE_DARKAVG;
+ }
+
+ cci_write(sensor->regmap, VD56G3_REG_DUSTER_CTRL, duster, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_DARKCAL_CTRL, darkcal, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_PATGEN_CTRL, patgen, &ret);
+
+ return ret;
+}
+
+static int vd56g3_update_expo_cluster(struct vd56g3 *sensor, bool is_auto)
+{
+ u8 expo_state = is_auto ? VD56G3_EXP_MODE_AUTO : VD56G3_EXP_MODE_MANUAL;
+ int ret = 0;
+
+ if (sensor->ae_ctrl->is_new)
+ cci_write(sensor->regmap, VD56G3_REG_EXP_MODE, expo_state,
+ &ret);
+
+ /* In Auto expo, set coldstart parameters */
+ if (is_auto && sensor->ae_ctrl->is_new) {
+ cci_write(sensor->regmap,
+ VD56G3_REG_AE_COLDSTART_COARSE_EXPOSURE,
+ sensor->expo_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_COLDSTART_ANALOG_GAIN,
+ sensor->again_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_COLDSTART_DIGITAL_GAIN,
+ sensor->dgain_ctrl->val, &ret);
+ }
+
+ /* In Manual expo, set exposure, analog and digital gains */
+ if (!is_auto && sensor->expo_ctrl->is_new)
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_COARSE_EXPOSURE,
+ sensor->expo_ctrl->val, &ret);
+
+ if (!is_auto && sensor->again_ctrl->is_new)
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_ANALOG_GAIN,
+ sensor->again_ctrl->val, &ret);
+
+ if (!is_auto && sensor->dgain_ctrl->is_new) {
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_DIGITAL_GAIN_CH0,
+ sensor->dgain_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_DIGITAL_GAIN_CH1,
+ sensor->dgain_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_DIGITAL_GAIN_CH2,
+ sensor->dgain_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_DIGITAL_GAIN_CH3,
+ sensor->dgain_ctrl->val, &ret);
+ }
+
+ return ret;
+}
+
+static int vd56g3_lock_exposure(struct vd56g3 *sensor, u32 lock_val)
+{
+ bool ae_lock = lock_val & V4L2_LOCK_EXPOSURE;
+ u8 expo_state = ae_lock ? VD56G3_EXP_MODE_FREEZE : VD56G3_EXP_MODE_AUTO;
+
+ if (sensor->ae_ctrl->val == V4L2_EXPOSURE_AUTO)
+ return cci_write(sensor->regmap, VD56G3_REG_EXP_MODE,
+ expo_state, NULL);
+
+ return 0;
+}
+
+static int vd56g3_write_gpiox(struct vd56g3 *sensor, unsigned long gpio_mask)
+{
+ unsigned long io;
+ u32 gpio_val;
+ int ret = 0;
+
+ for_each_set_bit(io, &gpio_mask, VD56G3_NB_GPIOS) {
+ gpio_val = sensor->gpios[io];
+
+ if (gpio_val == VD56G3_GPIOX_STROBE_MODE &&
+ sensor->led_ctrl->val == V4L2_FLASH_LED_MODE_NONE)
+ gpio_val = VD56G3_GPIOX_GPIO_IN;
+
+ cci_write(sensor->regmap, VD56G3_REG_GPIO_0_CTRL + io, gpio_val,
+ &ret);
+ }
+
+ return ret;
+}
+
+static int vd56g3_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vd56g3 *sensor = ctrl_to_vd56g3(ctrl);
+ int ret = 0;
+
+ /* Interact with HW only when it is powered ON */
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = vd56g3_read_expo_cluster(sensor, false);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static int vd56g3_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vd56g3 *sensor = ctrl_to_vd56g3(ctrl);
+ struct v4l2_subdev_state *state;
+ const struct v4l2_rect *crop;
+ unsigned int frame_length = 0;
+ unsigned int expo_max;
+ unsigned int ae_compensation;
+ bool is_auto = false;
+ int ret = 0;
+
+ state = v4l2_subdev_get_locked_active_state(&sensor->sd);
+ crop = v4l2_subdev_state_get_crop(state, 0);
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return 0;
+
+ /* Update controls state, range, etc. whatever the state of the HW */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ frame_length = crop->height + ctrl->val;
+ expo_max = frame_length - VD56G3_EXPOSURE_MARGIN;
+ ret = __v4l2_ctrl_modify_range(sensor->expo_ctrl,
+ VD56G3_EXPOSURE_MIN, expo_max, 1,
+ min(VD56G3_EXPOSURE_DEFAULT,
+ expo_max));
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ is_auto = (ctrl->val == V4L2_EXPOSURE_AUTO);
+ __v4l2_ctrl_grab(sensor->ae_lock_ctrl, !is_auto);
+ __v4l2_ctrl_grab(sensor->ae_bias_ctrl, !is_auto);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Interact with HW only when it is powered ON */
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ret = cci_write(sensor->regmap, VD56G3_REG_ORIENTATION,
+ sensor->hflip_ctrl->val |
+ (sensor->vflip_ctrl->val << 1),
+ NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = vd56g3_update_patgen(sensor, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = vd56g3_update_expo_cluster(sensor, is_auto);
+ break;
+ case V4L2_CID_3A_LOCK:
+ ret = vd56g3_lock_exposure(sensor, ctrl->val);
+ break;
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ ae_compensation =
+ DIV_ROUND_CLOSEST((int)vd56g3_ev_bias_qmenu[ctrl->val] *
+ 256, 1000);
+ ret = cci_write(sensor->regmap, VD56G3_REG_AE_COMPENSATION,
+ ae_compensation, NULL);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = cci_write(sensor->regmap, VD56G3_REG_FRAME_LENGTH,
+ frame_length, NULL);
+ break;
+ case V4L2_CID_FLASH_LED_MODE:
+ ret = vd56g3_write_gpiox(sensor, sensor->ext_leds_mask);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops vd56g3_ctrl_ops = {
+ .g_volatile_ctrl = vd56g3_g_volatile_ctrl,
+ .s_ctrl = vd56g3_s_ctrl,
+};
+
+static int vd56g3_update_controls(struct vd56g3 *sensor)
+{
+ struct v4l2_subdev_state *state;
+ const struct v4l2_rect *crop;
+ unsigned int hblank;
+ unsigned int vblank_min, vblank, vblank_max;
+ unsigned int frame_length;
+ unsigned int expo_max;
+ int ret;
+
+ state = v4l2_subdev_get_locked_active_state(&sensor->sd);
+ crop = v4l2_subdev_state_get_crop(state, 0);
+ hblank = VD56G3_LINE_LENGTH_MIN - crop->width;
+ vblank_min = VD56G3_VBLANK_MIN;
+ vblank = VD56G3_FRAME_LENGTH_DEF_60FPS - crop->height;
+ vblank_max = VD56G3_FRAME_LENGTH_MAX - crop->height;
+ frame_length = crop->height + vblank;
+ expo_max = frame_length - VD56G3_EXPOSURE_MARGIN;
+
+ /* Update blanking and exposure (ranges + values) */
+ ret = __v4l2_ctrl_modify_range(sensor->hblank_ctrl, hblank, hblank, 1,
+ hblank);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_modify_range(sensor->vblank_ctrl, vblank_min,
+ vblank_max, 1, vblank);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl(sensor->vblank_ctrl, vblank);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_modify_range(sensor->expo_ctrl, VD56G3_EXPOSURE_MIN,
+ expo_max, 1, VD56G3_EXPOSURE_DEFAULT);
+ if (ret)
+ return ret;
+
+ return __v4l2_ctrl_s_ctrl(sensor->expo_ctrl, VD56G3_EXPOSURE_DEFAULT);
+}
+
+static int vd56g3_init_controls(struct vd56g3 *sensor)
+{
+ const struct v4l2_ctrl_ops *ops = &vd56g3_ctrl_ops;
+ struct v4l2_ctrl_handler *hdl = &sensor->ctrl_handler;
+ struct v4l2_fwnode_device_properties fwnode_props;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdl, 25);
+
+ /* Horizontal & vertical flips modify bayer code on RGB variant */
+ sensor->hflip_ctrl =
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (sensor->hflip_ctrl)
+ sensor->hflip_ctrl->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ sensor->vflip_ctrl =
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (sensor->vflip_ctrl)
+ sensor->vflip_ctrl->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ sensor->patgen_ctrl =
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(vd56g3_tp_menu) - 1, 0,
+ 0, vd56g3_tp_menu);
+
+ ctrl = v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(vd56g3_link_freq_1lane) - 1, 0,
+ (sensor->nb_of_lane == 2) ?
+ vd56g3_link_freq_2lanes :
+ vd56g3_link_freq_1lane);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
+ sensor->pixel_clock, sensor->pixel_clock, 1,
+ sensor->pixel_clock);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ sensor->ae_ctrl = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+
+ sensor->ae_lock_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_3A_LOCK, 0,
+ GENMASK(2, 0), 0, 0);
+
+ sensor->ae_bias_ctrl =
+ v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(vd56g3_ev_bias_qmenu) - 1,
+ ARRAY_SIZE(vd56g3_ev_bias_qmenu) / 2,
+ vd56g3_ev_bias_qmenu);
+
+ /*
+ * Analog gain [1, 8] is computed with the following logic :
+ * 32/(32 - again_reg), with again_reg in the range [0:28]
+ * Digital gain [1.00, 8.00] is coded as a Fixed Point 5.8
+ */
+ sensor->again_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
+ 0, 28, 1, 0);
+ sensor->dgain_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_DIGITAL_GAIN,
+ 0x100, 0x800, 1, 0x100);
+
+ /*
+ * Set the exposure, horizontal and vertical blanking ctrls
+ * to hardcoded values, they will be updated in vd56g3_update_controls.
+ * Exposure being in an auto-cluster, set a significant value here.
+ */
+ sensor->expo_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ VD56G3_EXPOSURE_DEFAULT,
+ VD56G3_EXPOSURE_DEFAULT, 1,
+ VD56G3_EXPOSURE_DEFAULT);
+ sensor->hblank_ctrl =
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK, 1, 1, 1, 1);
+ if (sensor->hblank_ctrl)
+ sensor->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->vblank_ctrl =
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK, 1, 1, 1, 1);
+
+ /* Additional control based on device tree properties */
+ if (sensor->ext_leds_mask)
+ sensor->led_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_FLASH_LED_MODE,
+ V4L2_FLASH_LED_MODE_FLASH, 0,
+ V4L2_FLASH_LED_MODE_NONE);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ goto free_ctrls;
+ }
+
+ v4l2_ctrl_cluster(2, &sensor->hflip_ctrl);
+ v4l2_ctrl_auto_cluster(4, &sensor->ae_ctrl, V4L2_EXPOSURE_MANUAL, true);
+
+ /* Optional controls coming from fwnode (e.g. rotation, orientation). */
+ ret = v4l2_fwnode_device_parse(sensor->dev, &fwnode_props);
+ if (ret)
+ goto free_ctrls;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, ops, &fwnode_props);
+ if (ret)
+ goto free_ctrls;
+
+ sensor->sd.ctrl_handler = hdl;
+
+ return 0;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(hdl);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pad ops
+ */
+
+/* Media bus code is dependent of :
+ * - 8bits or 10bits output
+ * - variant : Mono or RGB
+ * - H/V flips parameters in case of RGB
+ */
+static u32 vd56g3_get_mbus_code(struct vd56g3 *sensor, u32 code)
+{
+ unsigned int i_bpp;
+ unsigned int j;
+
+ for (i_bpp = 0; i_bpp < ARRAY_SIZE(vd56g3_mbus_codes); i_bpp++) {
+ for (j = 0; j < ARRAY_SIZE(vd56g3_mbus_codes[i_bpp]); j++) {
+ if (vd56g3_mbus_codes[i_bpp][j] == code)
+ goto endloops;
+ }
+ }
+
+endloops:
+ if (i_bpp >= ARRAY_SIZE(vd56g3_mbus_codes))
+ i_bpp = 0;
+
+ if (sensor->is_mono)
+ j = 0;
+ else
+ j = 1 + (sensor->hflip_ctrl->val ? 1 : 0) +
+ (sensor->vflip_ctrl->val ? 2 : 0);
+
+ return vd56g3_mbus_codes[i_bpp][j];
+}
+
+static int vd56g3_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vd56g3 *sensor = to_vd56g3(sd);
+
+ if (code->index >= ARRAY_SIZE(vd56g3_mbus_codes))
+ return -EINVAL;
+
+ code->code =
+ vd56g3_get_mbus_code(sensor, vd56g3_mbus_codes[code->index][0]);
+
+ return 0;
+}
+
+static int vd56g3_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(vd56g3_supported_modes))
+ return -EINVAL;
+
+ fse->min_width = vd56g3_supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = vd56g3_supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void vd56g3_update_img_pad_format(struct vd56g3 *sensor,
+ const struct vd56g3_mode *mode,
+ u32 mbus_code,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ mbus_fmt->width = mode->width;
+ mbus_fmt->height = mode->height;
+ mbus_fmt->code = vd56g3_get_mbus_code(sensor, mbus_code);
+ mbus_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ mbus_fmt->field = V4L2_FIELD_NONE;
+ mbus_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mbus_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ mbus_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+static int vd56g3_set_pad_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sd_fmt)
+{
+ struct vd56g3 *sensor = to_vd56g3(sd);
+ const struct vd56g3_mode *new_mode;
+ struct v4l2_rect pad_crop;
+ unsigned int binning;
+
+ new_mode = v4l2_find_nearest_size(vd56g3_supported_modes,
+ ARRAY_SIZE(vd56g3_supported_modes),
+ width, height, sd_fmt->format.width,
+ sd_fmt->format.height);
+
+ vd56g3_update_img_pad_format(sensor, new_mode, sd_fmt->format.code,
+ &sd_fmt->format);
+ *v4l2_subdev_state_get_format(sd_state, sd_fmt->pad) = sd_fmt->format;
+
+ /* Compute and update crop rectangle (maximized via binning) */
+ binning = min(VD56G3_NATIVE_WIDTH / sd_fmt->format.width,
+ VD56G3_NATIVE_HEIGHT / sd_fmt->format.height);
+ binning = min(binning, 2U);
+ pad_crop.width = sd_fmt->format.width * binning;
+ pad_crop.height = sd_fmt->format.height * binning;
+ pad_crop.left = (VD56G3_NATIVE_WIDTH - pad_crop.width) / 2;
+ pad_crop.top = (VD56G3_NATIVE_HEIGHT - pad_crop.height) / 2;
+ *v4l2_subdev_state_get_crop(sd_state, sd_fmt->pad) = pad_crop;
+
+ /* Update controls in case of active state */
+ if (sd_fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return vd56g3_update_controls(sensor);
+
+ return 0;
+}
+
+static int vd56g3_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
+ break;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = VD56G3_NATIVE_WIDTH;
+ sel->r.height = VD56G3_NATIVE_HEIGHT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vd56g3_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct v4l2_subdev_state *state;
+ const struct v4l2_mbus_framefmt *format;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ format = v4l2_subdev_state_get_format(state, pad);
+ v4l2_subdev_unlock_state(state);
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+ fd->num_entries = 1;
+ fd->entry[0].pixelcode = format->code;
+ fd->entry[0].stream = 0;
+ fd->entry[0].bus.csi2.vc = 0;
+ fd->entry[0].bus.csi2.dt = vd56g3_get_datatype(format->code);
+
+ return 0;
+}
+
+static int vd56g3_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct vd56g3 *sensor = to_vd56g3(sd);
+ const struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ const struct v4l2_rect *crop = v4l2_subdev_state_get_crop(state, 0);
+ unsigned int csi_mbps = ((sensor->nb_of_lane == 2) ?
+ VD56G3_LINK_FREQ_DEF_2LANES :
+ VD56G3_LINK_FREQ_DEF_1LANE) *
+ 2 / MEGA;
+ unsigned int binning;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(sensor->dev);
+ if (ret < 0)
+ return ret;
+
+ /* configure clocks */
+ cci_write(sensor->regmap, VD56G3_REG_EXT_CLOCK, sensor->xclk_freq,
+ &ret);
+ cci_write(sensor->regmap, VD56G3_REG_CLK_PLL_PREDIV, sensor->pll_prediv,
+ &ret);
+ cci_write(sensor->regmap, VD56G3_REG_CLK_SYS_PLL_MULT, sensor->pll_mult,
+ &ret);
+
+ /* configure output */
+ cci_write(sensor->regmap, VD56G3_REG_FORMAT_CTRL,
+ vd56g3_get_bpp(format->code), &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OIF_CTRL, sensor->oif_ctrl, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OIF_CSI_BITRATE, csi_mbps, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OIF_IMG_CTRL,
+ vd56g3_get_datatype(format->code), &ret);
+ cci_write(sensor->regmap, VD56G3_REG_ISL_ENABLE, 0, &ret);
+
+ /* configure binning mode */
+ switch (crop->width / format->width) {
+ case 1:
+ default:
+ binning = READOUT_NORMAL;
+ break;
+ case 2:
+ binning = READOUT_DIGITAL_BINNING_X2;
+ break;
+ }
+ cci_write(sensor->regmap, VD56G3_REG_READOUT_CTRL, binning, &ret);
+
+ /* configure ROIs */
+ cci_write(sensor->regmap, VD56G3_REG_Y_START, crop->top, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_Y_END,
+ crop->top + crop->height - 1, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OUT_ROI_X_START, crop->left, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OUT_ROI_X_END,
+ crop->left + crop->width - 1, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OUT_ROI_Y_START, 0, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OUT_ROI_Y_END, crop->height - 1,
+ &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_ROI_START_H, crop->left, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_ROI_END_H,
+ crop->left + crop->width - 1, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_ROI_START_V, 0, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_ROI_END_V, crop->height - 1,
+ &ret);
+ if (ret)
+ goto rpm_put;
+
+ /* Setup default GPIO values; could be overridden by V4L2 ctrl setup */
+ ret = vd56g3_write_gpiox(sensor, GENMASK(VD56G3_NB_GPIOS - 1, 0));
+ if (ret)
+ goto rpm_put;
+
+ /* Apply settings from V4L2 ctrls */
+ ret = __v4l2_ctrl_handler_setup(&sensor->ctrl_handler);
+ if (ret)
+ goto rpm_put;
+
+ /* start streaming */
+ cci_write(sensor->regmap, VD56G3_REG_STBY, VD56G3_CMD_START_STREAM,
+ &ret);
+ vd56g3_poll_reg(sensor, VD56G3_REG_STBY, VD56G3_CMD_ACK, &ret);
+ vd56g3_wait_state(sensor, VD56G3_SYSTEM_FSM_STREAMING, &ret);
+ if (ret)
+ goto rpm_put;
+
+ /* some controls are locked during streaming */
+ __v4l2_ctrl_grab(sensor->hflip_ctrl, true);
+ __v4l2_ctrl_grab(sensor->vflip_ctrl, true);
+ __v4l2_ctrl_grab(sensor->patgen_ctrl, true);
+
+ return ret;
+
+rpm_put:
+ dev_err(sensor->dev, "Failed to start streaming\n");
+ pm_runtime_put_sync(sensor->dev);
+
+ return ret;
+}
+
+static int vd56g3_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct vd56g3 *sensor = to_vd56g3(sd);
+ int ret;
+
+ /* Retrieve Expo cluster to enable coldstart of AE */
+ ret = vd56g3_read_expo_cluster(sensor, true);
+
+ cci_write(sensor->regmap, VD56G3_REG_STREAMING, VD56G3_CMD_STOP_STREAM,
+ &ret);
+ vd56g3_poll_reg(sensor, VD56G3_REG_STREAMING, VD56G3_CMD_ACK, &ret);
+ vd56g3_wait_state(sensor, VD56G3_SYSTEM_FSM_SW_STBY, &ret);
+
+ /* locked controls must be unlocked */
+ __v4l2_ctrl_grab(sensor->hflip_ctrl, false);
+ __v4l2_ctrl_grab(sensor->vflip_ctrl, false);
+ __v4l2_ctrl_grab(sensor->patgen_ctrl, false);
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static int vd56g3_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int def_mode = VD56G3_DEFAULT_MODE;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .pad = 0,
+ .format = {
+ .code = vd56g3_mbus_codes[0][0],
+ .width = vd56g3_supported_modes[def_mode].width,
+ .height = vd56g3_supported_modes[def_mode].height,
+ },
+ };
+
+ return vd56g3_set_pad_fmt(sd, sd_state, &fmt);
+}
+
+static const struct v4l2_subdev_video_ops vd56g3_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops vd56g3_pad_ops = {
+ .enum_mbus_code = vd56g3_enum_mbus_code,
+ .enum_frame_size = vd56g3_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = vd56g3_set_pad_fmt,
+ .get_selection = vd56g3_get_selection,
+ .get_frame_desc = vd56g3_get_frame_desc,
+ .enable_streams = vd56g3_enable_streams,
+ .disable_streams = vd56g3_disable_streams,
+};
+
+static const struct v4l2_subdev_ops vd56g3_subdev_ops = {
+ .video = &vd56g3_video_ops,
+ .pad = &vd56g3_pad_ops,
+};
+
+static const struct media_entity_operations vd56g3_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops vd56g3_internal_ops = {
+ .init_state = vd56g3_init_state,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+static int vd56g3_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct vd56g3 *sensor = to_vd56g3(sd);
+ int ret;
+
+ /* power on */
+ ret = regulator_bulk_enable(ARRAY_SIZE(sensor->supplies),
+ sensor->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sensor->xclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock: %d\n", ret);
+ goto disable_reg;
+ }
+
+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ usleep_range(3500, 4000);
+ ret = vd56g3_wait_state(sensor, VD56G3_SYSTEM_FSM_READY_TO_BOOT, NULL);
+ if (ret) {
+ dev_err(dev, "Sensor reset failed: %d\n", ret);
+ goto disable_clock;
+ }
+
+ /* boot sensor */
+ cci_write(sensor->regmap, VD56G3_REG_BOOT, VD56G3_CMD_BOOT, &ret);
+ vd56g3_poll_reg(sensor, VD56G3_REG_BOOT, VD56G3_CMD_ACK, &ret);
+ vd56g3_wait_state(sensor, VD56G3_SYSTEM_FSM_SW_STBY, &ret);
+ if (ret) {
+ dev_err(dev, "Sensor boot failed: %d\n", ret);
+ goto disable_clock;
+ }
+
+ return 0;
+
+disable_clock:
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ clk_disable_unprepare(sensor->xclk);
+disable_reg:
+ regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+
+ return ret;
+}
+
+static int vd56g3_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct vd56g3 *sensor = to_vd56g3(sd);
+
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ clk_disable_unprepare(sensor->xclk);
+ regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+
+ return 0;
+}
+
+static const struct dev_pm_ops vd56g3_pm_ops = {
+ SET_RUNTIME_PM_OPS(vd56g3_power_off, vd56g3_power_on, NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe and initialization
+ */
+
+static int vd56g3_check_csi_conf(struct vd56g3 *sensor,
+ struct fwnode_handle *endpoint)
+{
+ struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
+ u32 phy_data_lanes[VD56G3_MAX_CSI_DATA_LANES] = { ~0, ~0 };
+ u8 n_lanes;
+ u64 frequency;
+ int p, l;
+ int ret = 0;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep);
+ if (ret)
+ return -EINVAL;
+
+ /* Check lanes number */
+ n_lanes = ep.bus.mipi_csi2.num_data_lanes;
+ if (n_lanes != 1 && n_lanes != 2) {
+ dev_err(sensor->dev, "Invalid data lane number: %d\n", n_lanes);
+ ret = -EINVAL;
+ goto done;
+ }
+ sensor->nb_of_lane = n_lanes;
+
+ /* Clock lane must be first */
+ if (ep.bus.mipi_csi2.clock_lane != 0) {
+ dev_err(sensor->dev, "Clock lane must be mapped to lane 0\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Prepare Output Interface conf based on lane settings
+ * logical to physical lane conversion (+ pad remaining slots)
+ */
+ for (l = 0; l < n_lanes; l++)
+ phy_data_lanes[ep.bus.mipi_csi2.data_lanes[l] - 1] = l;
+ for (p = 0; p < VD56G3_MAX_CSI_DATA_LANES; p++) {
+ if (phy_data_lanes[p] != ~0)
+ continue;
+ phy_data_lanes[p] = l;
+ l++;
+ }
+ sensor->oif_ctrl = n_lanes |
+ (ep.bus.mipi_csi2.lane_polarities[0] << 3) |
+ ((phy_data_lanes[0]) << 4) |
+ (ep.bus.mipi_csi2.lane_polarities[1] << 6) |
+ ((phy_data_lanes[1]) << 7) |
+ (ep.bus.mipi_csi2.lane_polarities[2] << 9);
+
+ /* Check link frequency */
+ if (!ep.nr_of_link_frequencies) {
+ dev_err(sensor->dev, "link-frequency not found in DT\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ frequency = (n_lanes == 2) ? VD56G3_LINK_FREQ_DEF_2LANES :
+ VD56G3_LINK_FREQ_DEF_1LANE;
+ if (ep.nr_of_link_frequencies != 1 ||
+ ep.link_frequencies[0] != frequency) {
+ dev_err(sensor->dev, "Link frequency not supported: %lld\n",
+ ep.link_frequencies[0]);
+ ret = -EINVAL;
+ goto done;
+ }
+
+done:
+ v4l2_fwnode_endpoint_free(&ep);
+
+ return ret;
+}
+
+static int vd56g3_parse_dt_gpios_array(struct vd56g3 *sensor, char *prop_name,
+ u32 *array, unsigned int *nb)
+{
+ struct device *dev = sensor->dev;
+ unsigned int i;
+ int ret;
+
+ if (!device_property_present(dev, prop_name)) {
+ *nb = 0;
+ return 0;
+ }
+
+ ret = device_property_count_u32(dev, prop_name);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read %s count\n", prop_name);
+ return ret;
+ }
+
+ *nb = ret;
+ ret = device_property_read_u32_array(dev, prop_name, array, *nb);
+ if (ret) {
+ dev_err(dev, "Failed to read %s prop\n", prop_name);
+ return ret;
+ }
+
+ for (i = 0; i < *nb; i++) {
+ if (array[i] >= VD56G3_NB_GPIOS) {
+ dev_err(dev, "Invalid GPIO: %d\n", array[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int vd56g3_parse_dt_gpios(struct vd56g3 *sensor)
+{
+ u32 led_gpios[VD56G3_NB_GPIOS];
+ unsigned int nb_gpios_leds;
+ unsigned int i;
+ int ret;
+
+ /* Initialize GPIOs to default */
+ for (i = 0; i < VD56G3_NB_GPIOS; i++)
+ sensor->gpios[i] = VD56G3_GPIOX_GPIO_IN;
+ sensor->ext_leds_mask = 0;
+
+ /* Take into account optional 'st,leds' output for GPIOs */
+ ret = vd56g3_parse_dt_gpios_array(sensor, "st,leds", led_gpios,
+ &nb_gpios_leds);
+ if (ret)
+ return ret;
+ for (i = 0; i < nb_gpios_leds; i++) {
+ sensor->gpios[led_gpios[i]] = VD56G3_GPIOX_STROBE_MODE;
+ set_bit(led_gpios[i], &sensor->ext_leds_mask);
+ }
+
+ return 0;
+}
+
+static int vd56g3_parse_dt(struct vd56g3 *sensor)
+{
+ struct fwnode_handle *endpoint;
+ int ret;
+
+ endpoint = fwnode_graph_get_endpoint_by_id(dev_fwnode(sensor->dev), 0,
+ 0, 0);
+ if (!endpoint) {
+ dev_err(sensor->dev, "Endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = vd56g3_check_csi_conf(sensor, endpoint);
+ fwnode_handle_put(endpoint);
+ if (ret)
+ return ret;
+
+ return vd56g3_parse_dt_gpios(sensor);
+}
+
+static int vd56g3_get_regulators(struct vd56g3 *sensor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sensor->supplies); i++)
+ sensor->supplies[i].supply = vd56g3_supply_names[i];
+
+ return devm_regulator_bulk_get(sensor->dev,
+ ARRAY_SIZE(sensor->supplies),
+ sensor->supplies);
+}
+
+static int vd56g3_prepare_clock_tree(struct vd56g3 *sensor)
+{
+ const unsigned int predivs[] = { 1, 2, 4 };
+ u32 pll_out;
+ int i;
+
+ /* External clock must be in [6Mhz-27Mhz] */
+ if (sensor->xclk_freq < VD56G3_XCLK_FREQ_MIN ||
+ sensor->xclk_freq > VD56G3_XCLK_FREQ_MAX) {
+ dev_err(sensor->dev,
+ "Only 6Mhz-27Mhz clock range supported. Provided %lu MHz\n",
+ sensor->xclk_freq / HZ_PER_MHZ);
+ return -EINVAL;
+ }
+
+ /* PLL input should be in [6Mhz-12Mhz[ */
+ for (i = 0; i < ARRAY_SIZE(predivs); i++) {
+ sensor->pll_prediv = predivs[i];
+ if (sensor->xclk_freq / sensor->pll_prediv < 12 * HZ_PER_MHZ)
+ break;
+ }
+
+ /* PLL output clock must be as close as possible to 804Mhz */
+ sensor->pll_mult = (VD56G3_TARGET_PLL * sensor->pll_prediv +
+ sensor->xclk_freq / 2) /
+ sensor->xclk_freq;
+ pll_out = sensor->xclk_freq * sensor->pll_mult / sensor->pll_prediv;
+
+ /* Target Pixel Clock for standard 10bit ADC mode : 160.8Mhz */
+ sensor->pixel_clock = pll_out / VD56G3_VT_CLOCK_DIV;
+
+ return 0;
+}
+
+static int vd56g3_detect(struct vd56g3 *sensor)
+{
+ struct device *dev = sensor->dev;
+ unsigned int model;
+ u64 model_id;
+ u64 device_revision;
+ u64 optical_revision;
+ int ret = 0;
+
+ model = (uintptr_t)device_get_match_data(dev);
+
+ ret = cci_read(sensor->regmap, VD56G3_REG_MODEL_ID, &model_id, NULL);
+ if (ret)
+ return ret;
+
+ if (model_id != VD56G3_MODEL_ID) {
+ dev_err(dev, "Unsupported sensor id: %x\n", (u16)model_id);
+ return -ENODEV;
+ }
+
+ ret = cci_read(sensor->regmap, VD56G3_REG_REVISION, &device_revision,
+ NULL);
+ if (ret)
+ return ret;
+
+ if ((device_revision >> 8) != VD56G3_REVISION_CUT3) {
+ dev_err(dev, "Unsupported version: %x\n", (u16)device_revision);
+ return -ENODEV;
+ }
+
+ ret = cci_read(sensor->regmap, VD56G3_REG_OPTICAL_REVISION,
+ &optical_revision, NULL);
+ if (ret)
+ return ret;
+
+ sensor->is_mono =
+ ((optical_revision & 1) == VD56G3_OPTICAL_REVISION_MONO);
+ if ((sensor->is_mono && model == VD56G3_MODEL_VD66GY) ||
+ (!sensor->is_mono && model == VD56G3_MODEL_VD56G3)) {
+ dev_err(dev, "Found %s sensor, while %s model is defined in DT\n",
+ (sensor->is_mono) ? "Mono" : "Bayer",
+ (model == VD56G3_MODEL_VD56G3) ? "vd56g3" : "vd66gy");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int vd56g3_subdev_init(struct vd56g3 *sensor)
+{
+ struct v4l2_subdev_state *state;
+ int ret;
+
+ /* Init remaining sub device ops */
+ sensor->sd.internal_ops = &vd56g3_internal_ops;
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->sd.entity.ops = &vd56g3_subdev_entity_ops;
+
+ /* Init source pad */
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to init media entity: %d\n", ret);
+ return ret;
+ }
+
+ /* Init controls */
+ ret = vd56g3_init_controls(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "Controls initialization failed: %d\n",
+ ret);
+ goto err_media;
+ }
+
+ /* Init vd56g3 struct : default resolution + raw8 */
+ sensor->sd.state_lock = sensor->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&sensor->sd);
+ if (ret) {
+ dev_err(sensor->dev, "Subdev init failed: %d\n", ret);
+ goto err_ctrls;
+ }
+
+ /* Update controls according to the resolution set */
+ state = v4l2_subdev_lock_and_get_active_state(&sensor->sd);
+ ret = vd56g3_update_controls(sensor);
+ v4l2_subdev_unlock_state(state);
+ if (ret) {
+ dev_err(sensor->dev, "Controls update failed: %d\n", ret);
+ goto err_ctrls;
+ }
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(sensor->sd.ctrl_handler);
+
+err_media:
+ media_entity_cleanup(&sensor->sd.entity);
+
+ return ret;
+}
+
+static void vd56g3_subdev_cleanup(struct vd56g3 *sensor)
+{
+ v4l2_async_unregister_subdev(&sensor->sd);
+ v4l2_subdev_cleanup(&sensor->sd);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(sensor->sd.ctrl_handler);
+}
+
+static int vd56g3_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct vd56g3 *sensor;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&sensor->sd, client, &vd56g3_subdev_ops);
+ sensor->dev = dev;
+
+ ret = vd56g3_parse_dt(sensor);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to parse Device Tree\n");
+
+ /* Get (and check) resources : power regs, ext clock, reset gpio */
+ ret = vd56g3_get_regulators(sensor);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ sensor->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "Failed to get xclk\n");
+ sensor->xclk_freq = clk_get_rate(sensor->xclk);
+ ret = vd56g3_prepare_clock_tree(sensor);
+ if (ret)
+ return ret;
+
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(sensor->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(sensor->regmap))
+ return dev_err_probe(dev, PTR_ERR(sensor->regmap),
+ "Failed to init regmap\n");
+
+ /* Power ON */
+ ret = vd56g3_power_on(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Sensor power on failed\n");
+
+ /* Enable PM runtime with autosuspend (sensor being ON, set active) */
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ /* Check HW model/version */
+ ret = vd56g3_detect(sensor);
+ if (ret) {
+ dev_err(dev, "Sensor detect failed: %d\n", ret);
+ goto err_power_off;
+ }
+
+ /* Initialize & register subdev (v4l2_i2c subdev already initialized) */
+ ret = vd56g3_subdev_init(sensor);
+ if (ret) {
+ dev_err(dev, "V4l2 init failed: %d\n", ret);
+ goto err_power_off;
+ }
+
+ ret = v4l2_async_register_subdev(&sensor->sd);
+ if (ret) {
+ dev_err(dev, "Async subdev register failed: %d\n", ret);
+ goto err_subdev;
+ }
+
+ /* Sensor could now be powered off (after the autosuspend delay) */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_dbg(dev, "Successfully probe %s sensor\n",
+ (sensor->is_mono) ? "vd56g3" : "vd66gy");
+
+ return 0;
+
+err_subdev:
+ vd56g3_subdev_cleanup(sensor);
+err_power_off:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ vd56g3_power_off(dev);
+
+ return ret;
+}
+
+static void vd56g3_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct vd56g3 *sensor = to_vd56g3(sd);
+
+ vd56g3_subdev_cleanup(sensor);
+
+ pm_runtime_disable(sensor->dev);
+ if (!pm_runtime_status_suspended(sensor->dev))
+ vd56g3_power_off(sensor->dev);
+ pm_runtime_set_suspended(sensor->dev);
+ pm_runtime_dont_use_autosuspend(sensor->dev);
+}
+
+static const struct of_device_id vd56g3_dt_ids[] = {
+ { .compatible = "st,vd56g3", .data = (void *)VD56G3_MODEL_VD56G3 },
+ { .compatible = "st,vd66gy", .data = (void *)VD56G3_MODEL_VD66GY },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vd56g3_dt_ids);
+
+static struct i2c_driver vd56g3_i2c_driver = {
+ .driver = {
+ .name = "vd56g3",
+ .of_match_table = vd56g3_dt_ids,
+ .pm = &vd56g3_pm_ops,
+ },
+ .probe = vd56g3_probe,
+ .remove = vd56g3_remove,
+};
+
+module_i2c_driver(vd56g3_i2c_driver);
+
+MODULE_AUTHOR("Benjamin Mugnier <benjamin.mugnier@foss.st.com>");
+MODULE_AUTHOR("Mickael Guene <mickael.guene@st.com>");
+MODULE_AUTHOR("Sylvain Petinot <sylvain.petinot@foss.st.com>");
+MODULE_DESCRIPTION("ST VD56G3 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 7f65aa609388..eebb16c58f3d 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -15,7 +15,6 @@ if MEDIA_CAMERA_SUPPORT
source "drivers/media/pci/mgb4/Kconfig"
source "drivers/media/pci/solo6x10/Kconfig"
-source "drivers/media/pci/sta2x11/Kconfig"
source "drivers/media/pci/tw5864/Kconfig"
source "drivers/media/pci/tw68/Kconfig"
source "drivers/media/pci/tw686x/Kconfig"
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index f18c7e15abe3..02763ad88511 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -22,8 +22,6 @@ obj-y += ttpci/ \
# Please keep it alphabetically sorted by Kconfig name
# (e. g. LC_ALL=C sort Makefile)
-obj-$(CONFIG_STA2X11_VIP) += sta2x11/
-
obj-$(CONFIG_VIDEO_BT848) += bt8xx/
obj-$(CONFIG_VIDEO_COBALT) += cobalt/
obj-$(CONFIG_VIDEO_CX18) += cx18/
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index 1cb745855600..83e682e1a4b7 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -66,6 +66,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
IPU_SENSOR_CONFIG("INT347E", 1, 319200000),
/* Hynix Hi-556 */
IPU_SENSOR_CONFIG("INT3537", 1, 437000000),
+ /* Lontium lt6911uxe */
+ IPU_SENSOR_CONFIG("INTC10C5", 0),
/* Omnivision OV01A10 / OV01A1S */
IPU_SENSOR_CONFIG("OVTI01A0", 1, 400000000),
IPU_SENSOR_CONFIG("OVTI01AS", 1, 400000000),
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 0c365eb59085..16fde96c9fb2 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1702,14 +1702,13 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
dev_info(dev, "device 0x%x (rev: 0x%x)\n",
pci_dev->device, pci_dev->revision);
- r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
+ cio2->base = pcim_iomap_region(pci_dev, CIO2_PCI_BAR, CIO2_NAME);
+ r = PTR_ERR_OR_ZERO(cio2->base);
if (r) {
dev_err(dev, "failed to remap I/O memory (%d)\n", r);
return -ENODEV;
}
- cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
-
pci_set_drvdata(pci_dev, cio2);
pci_set_master(pci_dev);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.c b/drivers/media/pci/intel/ipu6/ipu6-bus.c
index 37d88ddb6ee7..5cee2748983b 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-bus.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-bus.c
@@ -82,7 +82,7 @@ static void ipu6_bus_release(struct device *dev)
struct ipu6_bus_device *
ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
- void *pdata, struct ipu6_buttress_ctrl *ctrl,
+ void *pdata, const struct ipu6_buttress_ctrl *ctrl,
char *name)
{
struct auxiliary_device *auxdev;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.h b/drivers/media/pci/intel/ipu6/ipu6-bus.h
index bb4926dfdf08..a08c5468d536 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-bus.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-bus.h
@@ -15,8 +15,6 @@
struct firmware;
struct pci_dev;
-#define IPU6_BUS_NAME IPU6_NAME "-bus"
-
struct ipu6_buttress_ctrl;
struct ipu6_bus_device {
@@ -27,8 +25,7 @@ struct ipu6_bus_device {
void *pdata;
struct ipu6_mmu *mmu;
struct ipu6_device *isp;
- struct ipu6_buttress_ctrl *ctrl;
- u64 dma_mask;
+ const struct ipu6_buttress_ctrl *ctrl;
const struct firmware *fw;
struct sg_table fw_sgt;
u64 *pkg_dir;
@@ -50,7 +47,7 @@ struct ipu6_auxdrv_data {
struct ipu6_bus_device *
ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
- void *pdata, struct ipu6_buttress_ctrl *ctrl,
+ void *pdata, const struct ipu6_buttress_ctrl *ctrl,
char *name);
int ipu6_bus_add_device(struct ipu6_bus_device *adev);
void ipu6_bus_del_devices(struct pci_dev *pdev);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
index d8db5aa5d528..103386c4f6ae 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-buttress.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
@@ -443,8 +443,8 @@ irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr)
return ret;
}
-int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
- bool on)
+int ipu6_buttress_power(struct device *dev,
+ const struct ipu6_buttress_ctrl *ctrl, bool on)
{
struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp;
u32 pwr_sts, val;
@@ -478,8 +478,6 @@ int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
dev_err(&isp->pdev->dev,
"Change power status timeout with 0x%x\n", val);
- ctrl->started = !ret && on;
-
mutex_unlock(&isp->buttress.power_mutex);
return ret;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/drivers/media/pci/intel/ipu6/ipu6-buttress.h
index 482978c2a09d..51e5ad48db82 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-buttress.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.h
@@ -26,7 +26,6 @@ struct ipu6_buttress_ctrl {
u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off;
unsigned int ratio;
unsigned int qos_floor;
- bool started;
};
struct ipu6_buttress_ipc {
@@ -66,8 +65,8 @@ int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
struct sg_table *sgt);
void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys,
struct sg_table *sgt);
-int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
- bool on);
+int ipu6_buttress_power(struct device *dev,
+ const struct ipu6_buttress_ctrl *ctrl, bool on);
bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp);
int ipu6_buttress_authenticate(struct ipu6_device *isp);
int ipu6_buttress_reset_authentication(struct ipu6_device *isp);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c
index 1ca60ca79dba..7296373d36b0 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-dma.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c
@@ -172,7 +172,7 @@ void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size,
count = PHYS_PFN(size);
iova = alloc_iova(&mmu->dmap->iovad, count,
- PHYS_PFN(dma_get_mask(dev)), 0);
+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
if (!iova)
goto out_kfree;
@@ -398,7 +398,7 @@ int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
nents, npages);
iova = alloc_iova(&mmu->dmap->iovad, npages,
- PHYS_PFN(dma_get_mask(dev)), 0);
+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
if (!iova)
return 0;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.h b/drivers/media/pci/intel/ipu6/ipu6-dma.h
index 2882850d9366..ae9b9a5df57f 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-dma.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.h
@@ -4,9 +4,6 @@
#ifndef IPU6_DMA_H
#define IPU6_DMA_H
-#include <linux/dma-map-ops.h>
-#include <linux/dma-mapping.h>
-#include <linux/iova.h>
#include <linux/iova.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
index bc8594c94f99..ce8eed91065c 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
@@ -14,7 +14,6 @@ struct v4l2_mbus_frame_desc_entry;
struct ipu6_isys_video;
struct ipu6_isys;
-struct ipu6_isys_csi2_pdata;
struct ipu6_isys_stream;
#define NR_OF_CSI2_VC 16
@@ -37,7 +36,6 @@ struct ipu6_isys_stream;
struct ipu6_isys_csi2 {
struct ipu6_isys_subdev asd;
- struct ipu6_isys_csi2_pdata *pdata;
struct ipu6_isys *isys;
struct ipu6_isys_video av[NR_OF_CSI2_SRC_PADS];
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
index 72f5f987ef48..aa2cf7287477 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
@@ -652,10 +652,8 @@ static void stop_streaming(struct vb2_queue *q)
}
static unsigned int
-get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream,
- struct ipu6_fw_isys_resp_info_abi *info)
+get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream, u64 time)
{
- u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0];
struct ipu6_isys *isys = stream->isys;
struct device *dev = &isys->adev->auxdev.dev;
unsigned int i;
@@ -681,8 +679,7 @@ get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream,
return 0;
}
-static u64 get_sof_ns_delta(struct ipu6_isys_video *av,
- struct ipu6_fw_isys_resp_info_abi *info)
+static u64 get_sof_ns_delta(struct ipu6_isys_video *av, u64 timestamp)
{
struct ipu6_bus_device *adev = av->isys->adev;
struct ipu6_device *isp = adev->isp;
@@ -692,13 +689,13 @@ static u64 get_sof_ns_delta(struct ipu6_isys_video *av,
if (!tsc_now)
return 0;
- delta = tsc_now - ((u64)info->timestamp[1] << 32 | info->timestamp[0]);
+ delta = tsc_now - timestamp;
return ipu6_buttress_tsc_ticks_to_ns(delta, isp);
}
-void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
- struct ipu6_fw_isys_resp_info_abi *info)
+static void
+ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib, u64 time)
{
struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -709,8 +706,8 @@ void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
u64 ns;
u32 sequence;
- ns = ktime_get_ns() - get_sof_ns_delta(av, info);
- sequence = get_sof_sequence_by_timestamp(stream, info);
+ ns = ktime_get_ns() - get_sof_ns_delta(av, time);
+ sequence = get_sof_sequence_by_timestamp(stream, time);
vbuf->vb2_buf.timestamp = ns;
vbuf->sequence = sequence;
@@ -721,7 +718,7 @@ void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
vbuf->vb2_buf.timestamp);
}
-void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib)
+static void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib)
{
struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
@@ -737,10 +734,11 @@ void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib)
}
}
-void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
- struct ipu6_fw_isys_resp_info_abi *info)
+static void
+ipu6_stream_buf_ready(struct ipu6_isys_stream *stream, u8 pin_id, u32 pin_addr,
+ u64 time, bool error_check)
{
- struct ipu6_isys_queue *aq = stream->output_pins[info->pin_id].aq;
+ struct ipu6_isys_queue *aq = stream->output_pins_queue[pin_id];
struct ipu6_isys *isys = stream->isys;
struct device *dev = &isys->adev->auxdev.dev;
struct ipu6_isys_buffer *ib;
@@ -766,7 +764,7 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
ivb = vb2_buffer_to_ipu6_isys_video_buffer(vvb);
addr = ivb->dma_addr;
- if (info->pin.addr != addr) {
+ if (pin_addr != addr) {
if (first)
dev_err(dev, "Unexpected buffer address %pad\n",
&addr);
@@ -774,8 +772,7 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
continue;
}
- if (info->error_info.error ==
- IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) {
+ if (error_check) {
/*
* Check for error message:
* 'IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO'
@@ -790,18 +787,27 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
list_del(&ib->head);
spin_unlock_irqrestore(&aq->lock, flags);
- ipu6_isys_buf_calc_sequence_time(ib, info);
+ ipu6_isys_buf_calc_sequence_time(ib, time);
ipu6_isys_queue_buf_done(ib);
return;
}
- dev_err(dev, "Failed to find a matching video buffer");
+ dev_err(dev, "Failed to find a matching video buffer\n");
spin_unlock_irqrestore(&aq->lock, flags);
}
+void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
+ struct ipu6_fw_isys_resp_info_abi *info)
+{
+ u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0];
+ bool err = info->error_info.error == IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO;
+
+ ipu6_stream_buf_ready(stream, info->pin_id, info->pin.addr, time, err);
+}
+
static const struct vb2_ops ipu6_isys_queue_ops = {
.queue_setup = ipu6_isys_queue_setup,
.buf_init = ipu6_isys_buf_init,
@@ -835,7 +841,6 @@ int ipu6_isys_queue_init(struct ipu6_isys_queue *aq)
if (ret)
return ret;
- aq->dev = &adev->auxdev.dev;
aq->vbq.dev = &adev->isp->pdev->dev;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->active);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
index fe8fc796a58f..844dfda15ab6 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
@@ -20,11 +20,7 @@ struct ipu6_isys_stream;
struct ipu6_isys_queue {
struct vb2_queue vbq;
struct list_head node;
- struct device *dev;
- /*
- * @lock: serialise access to queued and pre_streamon_queued
- */
- spinlock_t lock;
+ spinlock_t lock; /* Protects active and incoming lists */
struct list_head active;
struct list_head incoming;
unsigned int fw_output;
@@ -69,10 +65,6 @@ void
ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set,
struct ipu6_isys_stream *stream,
struct ipu6_isys_buffer_list *bl);
-void
-ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
- struct ipu6_fw_isys_resp_info_abi *info);
-void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib);
void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
struct ipu6_fw_isys_resp_info_abi *info);
int ipu6_isys_queue_init(struct ipu6_isys_queue *aq);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
index 9ef8d95464f5..268dfa01e903 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
@@ -37,10 +37,6 @@ int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum
*code);
-int ipu6_isys_subdev_link_validate(struct v4l2_subdev *sd,
- struct media_link *link,
- struct v4l2_subdev_format *source_fmt,
- struct v4l2_subdev_format *sink_fmt);
u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad);
int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
struct v4l2_mbus_framefmt *format);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
index 959869a88556..24a2ef93474c 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -241,7 +241,7 @@ static void ipu6_isys_try_fmt_cap(struct ipu6_isys_video *av, u32 type,
else
*bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE);
- *bytesperline = ALIGN(*bytesperline, av->isys->line_align);
+ *bytesperline = ALIGN(*bytesperline, 64);
/*
* (height + 1) * bytesperline due to a hardware issue: the DMA unit
@@ -486,8 +486,7 @@ static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av,
output_pins = cfg->nof_output_pins++;
aq->fw_output = output_pins;
- stream->output_pins[output_pins].pin_ready = ipu6_isys_queue_buf_ready;
- stream->output_pins[output_pins].aq = aq;
+ stream->output_pins_queue[output_pins] = aq;
output_pin = &cfg->output_pins[output_pins];
output_pin->input_pin_id = input_pins;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
index 1d945be2b879..1dd36f2a077e 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
@@ -37,12 +37,6 @@ struct sequence_info {
u64 timestamp;
};
-struct output_pin_data {
- void (*pin_ready)(struct ipu6_isys_stream *stream,
- struct ipu6_fw_isys_resp_info_abi *info);
- struct ipu6_isys_queue *aq;
-};
-
/*
* Align with firmware stream. Each stream represents a CSI virtual channel.
* May map to multiple video devices
@@ -68,7 +62,7 @@ struct ipu6_isys_stream {
struct completion stream_stop_completion;
struct ipu6_isys *isys;
- struct output_pin_data output_pins[IPU6_ISYS_OUTPUT_PINS];
+ struct ipu6_isys_queue *output_pins_queue[IPU6_ISYS_OUTPUT_PINS];
int error;
u8 vc;
};
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
index 8df1d83a74b5..fc0ec0a4b8f5 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -1089,7 +1089,6 @@ static int isys_probe(struct auxiliary_device *auxdev,
INIT_LIST_HEAD(&isys->framebuflist);
INIT_LIST_HEAD(&isys->framebuflist_fw);
- isys->line_align = IPU6_ISYS_2600_MEM_LINE_ALIGN;
isys->icache_prefetch = 0;
dev_set_drvdata(&auxdev->dev, isys);
@@ -1294,12 +1293,11 @@ static int isys_isr_one(struct ipu6_bus_device *adev)
*/
ipu6_put_fw_msg_buf(ipu6_bus_get_drvdata(adev), resp->buf_id);
if (resp->pin_id < IPU6_ISYS_OUTPUT_PINS &&
- stream->output_pins[resp->pin_id].pin_ready)
- stream->output_pins[resp->pin_id].pin_ready(stream,
- resp);
+ stream->output_pins_queue[resp->pin_id])
+ ipu6_isys_queue_buf_ready(stream, resp);
else
dev_warn(&adev->auxdev.dev,
- "%d:No data pin ready handler for pin id %d\n",
+ "%d:No queue for pin id %d\n",
resp->stream_handle, resp->pin_id);
if (csi2)
ipu6_isys_csi2_error(csi2);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.h b/drivers/media/pci/intel/ipu6/ipu6-isys.h
index 610b60e69152..f488e782c26e 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.h
@@ -29,8 +29,6 @@ struct ipu6_bus_device;
IPU6_ISYS_UNISPART_IRQ_CSI0 | \
IPU6_ISYS_UNISPART_IRQ_CSI1)
-#define IPU6_ISYS_2600_MEM_LINE_ALIGN 64
-
/*
* Current message queue configuration. These must be big enough
* so that they never gets full. Queues are located in system memory
@@ -118,7 +116,6 @@ struct sensor_async_sd {
* @streams: streams per firmware stream ID
* @fwcom: fw communication layer private pointer
* or optional external library private pointer
- * @line_align: line alignment in memory
* @phy_termcal_val: the termination calibration value, only used for DWC PHY
* @need_reset: Isys requires d0i0->i3 transition
* @ref_count: total number of callers fw open
@@ -140,7 +137,6 @@ struct ipu6_isys {
struct ipu6_isys_stream streams[IPU6_ISYS_MAX_STREAMS];
int streams_ref_count[IPU6_ISYS_MAX_STREAMS];
void *fwcom;
- unsigned int line_align;
u32 phy_termcal_val;
bool need_reset;
bool icache_prefetch;
diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
index 277af7cda8ee..1f4f20b9c94d 100644
--- a/drivers/media/pci/intel/ipu6/ipu6.c
+++ b/drivers/media/pci/intel/ipu6/ipu6.c
@@ -464,11 +464,6 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver)
{
int ret;
- /* disable IPU6 PCI ATS on mtl ES2 */
- if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 &&
- pci_ats_supported(dev))
- pci_disable_ats(dev);
-
/* No PCI msi capability for IPU6EP */
if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) {
/* likely do nothing as msi not enabled by default */
@@ -525,11 +520,11 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
phys = pci_resource_start(pdev, IPU6_PCI_BAR);
dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys);
- ret = pcim_iomap_regions(pdev, 1 << IPU6_PCI_BAR, pci_name(pdev));
- if (ret)
- return dev_err_probe(dev, ret, "Failed to I/O mem remapping\n");
+ isp->base = pcim_iomap_region(pdev, IPU6_PCI_BAR, IPU6_NAME);
+ if (IS_ERR(isp->base))
+ return dev_err_probe(dev, PTR_ERR(isp->base),
+ "Failed to I/O mem remapping\n");
- isp->base = pcim_iomap_table(pdev)[IPU6_PCI_BAR];
pci_set_drvdata(pdev, isp);
pci_set_master(pdev);
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index 434eaf0440e2..989e93f67f75 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -641,7 +641,14 @@ static int vidioc_query_dv_timings(struct file *file, void *fh,
static int vidioc_enum_dv_timings(struct file *file, void *fh,
struct v4l2_enum_dv_timings *timings)
{
- return v4l2_enum_dv_timings_cap(timings, &video_timings_cap, NULL, NULL);
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+ if (timings->index != 0)
+ return -EINVAL;
+ if (get_timings(vindev, &timings->timings) < 0)
+ return -ENODATA;
+
+ return 0;
}
static int vidioc_dv_timings_cap(struct file *file, void *fh,
@@ -749,14 +756,14 @@ static void signal_change(struct work_struct *work)
u32 width = resolution >> 16;
u32 height = resolution & 0xFFFF;
- if (timings->width != width || timings->height != height) {
- static const struct v4l2_event ev = {
- .type = V4L2_EVENT_SOURCE_CHANGE,
- .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
- };
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
- v4l2_event_queue(&vindev->vdev, &ev);
+ v4l2_event_queue(&vindev->vdev, &ev);
+ if (timings->width != width || timings->height != height) {
if (vb2_is_streaming(&vindev->queue))
vb2_queue_error(&vindev->queue);
}
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 246f73b8a9e7..c55aa782b72c 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -692,6 +692,7 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u8 rev;
u32 ver;
int i, ret;
+ void __iomem *iomem;
struct pt3_board *pt3;
struct i2c_adapter *i2c;
@@ -703,10 +704,6 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
pci_set_master(pdev);
- ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2), DRV_NAME);
- if (ret < 0)
- return ret;
-
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(&pdev->dev, "Failed to set DMA mask\n");
@@ -719,8 +716,16 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pt3);
pt3->pdev = pdev;
mutex_init(&pt3->lock);
- pt3->regs[0] = pcim_iomap_table(pdev)[0];
- pt3->regs[1] = pcim_iomap_table(pdev)[2];
+
+ iomem = pcim_iomap_region(pdev, 0, DRV_NAME);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+ pt3->regs[0] = iomem;
+
+ iomem = pcim_iomap_region(pdev, 2, DRV_NAME);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+ pt3->regs[1] = iomem;
ver = ioread32(pt3->regs[0] + REG_VERSION);
if ((ver >> 16) != 0x0301) {
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 6ec1480a6d18..febb2c156cf6 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -477,10 +477,10 @@ static int solo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_write_config_byte(pdev, 0x40, 0x00);
pci_write_config_byte(pdev, 0x41, 0x00);
- ret = pcim_iomap_regions(pdev, BIT(0), SOLO6X10_NAME);
+ solo_dev->reg_base = pcim_iomap_region(pdev, 0, SOLO6X10_NAME);
+ ret = PTR_ERR_OR_ZERO(solo_dev->reg_base);
if (ret)
goto fail_probe;
- solo_dev->reg_base = pcim_iomap_table(pdev)[0];
chip_id = solo_reg_read(solo_dev, SOLO_CHIP_OPTION) &
SOLO_CHIP_ID_MASK;
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
deleted file mode 100644
index 118b922c08c3..000000000000
--- a/drivers/media/pci/sta2x11/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config STA2X11_VIP
- tristate "STA2X11 VIP Video For Linux"
- depends on PCI && VIDEO_DEV && I2C
- depends on STA2X11 || COMPILE_TEST
- select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEOBUF2_DMA_CONTIG
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- help
- Say Y for support for STA2X11 VIP (Video Input Port) capture
- device.
-
- To compile this driver as a module, choose M here: the
- module will be called sta2x11_vip.
diff --git a/drivers/media/pci/sta2x11/Makefile b/drivers/media/pci/sta2x11/Makefile
deleted file mode 100644
index bb684a7b6270..000000000000
--- a/drivers/media/pci/sta2x11/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_STA2X11_VIP) += sta2x11_vip.o
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
deleted file mode 100644
index 3049bad20f14..000000000000
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ /dev/null
@@ -1,1270 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * This is the driver for the STA2x11 Video Input Port.
- *
- * Copyright (C) 2012 ST Microelectronics
- * author: Federico Vaga <federico.vaga@gmail.com>
- * Copyright (C) 2010 WindRiver Systems, Inc.
- * authors: Andreas Kies <andreas.kies@windriver.com>
- * Vlad Lungu <vlad.lungu@windriver.com>
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/videodev2.h>
-#include <linux/kmod.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-event.h>
-#include <media/videobuf2-dma-contig.h>
-
-#include "sta2x11_vip.h"
-
-#define DRV_VERSION "1.3"
-
-#ifndef PCI_DEVICE_ID_STMICRO_VIP
-#define PCI_DEVICE_ID_STMICRO_VIP 0xCC0D
-#endif
-
-#define MAX_FRAMES 4
-
-/*Register offsets*/
-#define DVP_CTL 0x00
-#define DVP_TFO 0x04
-#define DVP_TFS 0x08
-#define DVP_BFO 0x0C
-#define DVP_BFS 0x10
-#define DVP_VTP 0x14
-#define DVP_VBP 0x18
-#define DVP_VMP 0x1C
-#define DVP_ITM 0x98
-#define DVP_ITS 0x9C
-#define DVP_STA 0xA0
-#define DVP_HLFLN 0xA8
-#define DVP_RGB 0xC0
-#define DVP_PKZ 0xF0
-
-/*Register fields*/
-#define DVP_CTL_ENA 0x00000001
-#define DVP_CTL_RST 0x80000000
-#define DVP_CTL_DIS (~0x00040001)
-
-#define DVP_IT_VSB 0x00000008
-#define DVP_IT_VST 0x00000010
-#define DVP_IT_FIFO 0x00000020
-
-#define DVP_HLFLN_SD 0x00000001
-
-#define SAVE_COUNT 8
-#define AUX_COUNT 3
-#define IRQ_COUNT 1
-
-
-struct vip_buffer {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
- dma_addr_t dma;
-};
-static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2)
-{
- return container_of(vb2, struct vip_buffer, vb);
-}
-
-/**
- * struct sta2x11_vip - All internal data for one instance of device
- * @v4l2_dev: device registered in v4l layer
- * @video_dev: properties of our device
- * @pdev: PCI device
- * @adapter: contains I2C adapter information
- * @register_save_area: All relevant register are saved here during suspend
- * @decoder: contains information about video DAC
- * @ctrl_hdl: handler for control framework
- * @format: pixel format, fixed UYVY
- * @std: video standard (e.g. PAL/NTSC)
- * @input: input line for video signal ( 0 or 1 )
- * @disabled: Device is in power down state
- * @slock: for excluse access of registers
- * @vb_vidq: queue maintained by videobuf2 layer
- * @buffer_list: list of buffer in use
- * @sequence: sequence number of acquired buffer
- * @active: current active buffer
- * @lock: used in videobuf2 callback
- * @v4l_lock: serialize its video4linux ioctls
- * @tcount: Number of top frames
- * @bcount: Number of bottom frames
- * @overflow: Number of FIFO overflows
- * @iomem: hardware base address
- * @config: I2C and gpio config from platform
- *
- * All non-local data is accessed via this structure.
- */
-struct sta2x11_vip {
- struct v4l2_device v4l2_dev;
- struct video_device video_dev;
- struct pci_dev *pdev;
- struct i2c_adapter *adapter;
- unsigned int register_save_area[IRQ_COUNT + SAVE_COUNT + AUX_COUNT];
- struct v4l2_subdev *decoder;
- struct v4l2_ctrl_handler ctrl_hdl;
-
-
- struct v4l2_pix_format format;
- v4l2_std_id std;
- unsigned int input;
- int disabled;
- spinlock_t slock;
-
- struct vb2_queue vb_vidq;
- struct list_head buffer_list;
- unsigned int sequence;
- struct vip_buffer *active; /* current active buffer */
- spinlock_t lock; /* Used in videobuf2 callback */
- struct mutex v4l_lock;
-
- /* Interrupt counters */
- int tcount, bcount;
- int overflow;
-
- void __iomem *iomem; /* I/O Memory */
- struct vip_config *config;
-};
-
-static const unsigned int registers_to_save[AUX_COUNT] = {
- DVP_HLFLN, DVP_RGB, DVP_PKZ
-};
-
-static struct v4l2_pix_format formats_50[] = {
- { /*PAL interlaced */
- .width = 720,
- .height = 576,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_INTERLACED,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 576,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
- { /*PAL top */
- .width = 720,
- .height = 288,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_TOP,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 288,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
- { /*PAL bottom */
- .width = 720,
- .height = 288,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_BOTTOM,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 288,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
-
-};
-
-static struct v4l2_pix_format formats_60[] = {
- { /*NTSC interlaced */
- .width = 720,
- .height = 480,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_INTERLACED,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 480,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
- { /*NTSC top */
- .width = 720,
- .height = 240,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_TOP,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 240,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
- { /*NTSC bottom */
- .width = 720,
- .height = 240,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_BOTTOM,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 240,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
-};
-
-/* Write VIP register */
-static inline void reg_write(struct sta2x11_vip *vip, unsigned int reg, u32 val)
-{
- iowrite32((val), (vip->iomem)+(reg));
-}
-/* Read VIP register */
-static inline u32 reg_read(struct sta2x11_vip *vip, unsigned int reg)
-{
- return ioread32((vip->iomem)+(reg));
-}
-/* Start DMA acquisition */
-static void start_dma(struct sta2x11_vip *vip, struct vip_buffer *vip_buf)
-{
- unsigned long offset = 0;
-
- if (vip->format.field == V4L2_FIELD_INTERLACED)
- offset = vip->format.width * 2;
-
- spin_lock_irq(&vip->slock);
- /* Enable acquisition */
- reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) | DVP_CTL_ENA);
- /* Set Top and Bottom Field memory address */
- reg_write(vip, DVP_VTP, (u32)vip_buf->dma);
- reg_write(vip, DVP_VBP, (u32)vip_buf->dma + offset);
- spin_unlock_irq(&vip->slock);
-}
-
-/* Fetch the next buffer to activate */
-static void vip_active_buf_next(struct sta2x11_vip *vip)
-{
- /* Get the next buffer */
- spin_lock(&vip->lock);
- if (list_empty(&vip->buffer_list)) {/* No available buffer */
- spin_unlock(&vip->lock);
- return;
- }
- vip->active = list_first_entry(&vip->buffer_list,
- struct vip_buffer,
- list);
- /* Reset Top and Bottom counter */
- vip->tcount = 0;
- vip->bcount = 0;
- spin_unlock(&vip->lock);
- if (vb2_is_streaming(&vip->vb_vidq)) { /* streaming is on */
- start_dma(vip, vip->active); /* start dma capture */
- }
-}
-
-
-/* Videobuf2 Operations */
-static int queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], struct device *alloc_devs[])
-{
- struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
-
- if (!(*nbuffers) || *nbuffers < MAX_FRAMES)
- *nbuffers = MAX_FRAMES;
-
- *nplanes = 1;
- sizes[0] = vip->format.sizeimage;
-
- vip->sequence = 0;
- vip->active = NULL;
- vip->tcount = 0;
- vip->bcount = 0;
-
- return 0;
-};
-static int buffer_init(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
-
- vip_buf->dma = vb2_dma_contig_plane_dma_addr(vb, 0);
- INIT_LIST_HEAD(&vip_buf->list);
- return 0;
-}
-
-static int buffer_prepare(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
- struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
- unsigned long size;
-
- size = vip->format.sizeimage;
- if (vb2_plane_size(vb, 0) < size) {
- v4l2_err(&vip->v4l2_dev, "buffer too small (%lu < %lu)\n",
- vb2_plane_size(vb, 0), size);
- return -EINVAL;
- }
-
- vb2_set_plane_payload(&vip_buf->vb.vb2_buf, 0, size);
-
- return 0;
-}
-static void buffer_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
- struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
-
- spin_lock(&vip->lock);
- list_add_tail(&vip_buf->list, &vip->buffer_list);
- if (!vip->active) { /* No active buffer, active the first one */
- vip->active = list_first_entry(&vip->buffer_list,
- struct vip_buffer,
- list);
- if (vb2_is_streaming(&vip->vb_vidq)) /* streaming is on */
- start_dma(vip, vip_buf); /* start dma capture */
- }
- spin_unlock(&vip->lock);
-}
-static void buffer_finish(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
- struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
-
- /* Buffer handled, remove it from the list */
- spin_lock(&vip->lock);
- list_del_init(&vip_buf->list);
- spin_unlock(&vip->lock);
-
- if (vb2_is_streaming(vb->vb2_queue))
- vip_active_buf_next(vip);
-}
-
-static int start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
-
- spin_lock_irq(&vip->slock);
- /* Enable interrupt VSYNC Top and Bottom*/
- reg_write(vip, DVP_ITM, DVP_IT_VSB | DVP_IT_VST);
- spin_unlock_irq(&vip->slock);
-
- if (count)
- start_dma(vip, vip->active);
-
- return 0;
-}
-
-/* abort streaming and wait for last buffer */
-static void stop_streaming(struct vb2_queue *vq)
-{
- struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
- struct vip_buffer *vip_buf, *node;
-
- /* Disable acquisition */
- reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
- /* Disable all interrupts */
- reg_write(vip, DVP_ITM, 0);
-
- /* Release all active buffers */
- spin_lock(&vip->lock);
- list_for_each_entry_safe(vip_buf, node, &vip->buffer_list, list) {
- vb2_buffer_done(&vip_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- list_del(&vip_buf->list);
- }
- spin_unlock(&vip->lock);
-}
-
-static const struct vb2_ops vip_video_qops = {
- .queue_setup = queue_setup,
- .buf_init = buffer_init,
- .buf_prepare = buffer_prepare,
- .buf_finish = buffer_finish,
- .buf_queue = buffer_queue,
- .start_streaming = start_streaming,
- .stop_streaming = stop_streaming,
-};
-
-
-/* File Operations */
-static const struct v4l2_file_operations vip_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .release = vb2_fop_release,
- .unlocked_ioctl = video_ioctl2,
- .read = vb2_fop_read,
- .mmap = vb2_fop_mmap,
- .poll = vb2_fop_poll
-};
-
-
-/**
- * vidioc_querycap - return capabilities of device
- * @file: descriptor of device
- * @cap: contains return values
- * @priv: unused
- *
- * the capabilities of the device are returned
- *
- * return value: 0, no error.
- */
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
- strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
- return 0;
-}
-
-/**
- * vidioc_s_std - set video standard
- * @file: descriptor of device
- * @std: contains standard to be set
- * @priv: unused
- *
- * the video standard is set
- *
- * return value: 0, no error.
- *
- * -EIO, no input signal detected
- *
- * other, returned from video DAC.
- */
-static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id std)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- /*
- * This is here for backwards compatibility only.
- * The use of V4L2_STD_ALL to trigger a querystd is non-standard.
- */
- if (std == V4L2_STD_ALL) {
- v4l2_subdev_call(vip->decoder, video, querystd, &std);
- if (std == V4L2_STD_UNKNOWN)
- return -EIO;
- }
-
- if (vip->std != std) {
- vip->std = std;
- if (V4L2_STD_525_60 & std)
- vip->format = formats_60[0];
- else
- vip->format = formats_50[0];
- }
-
- return v4l2_subdev_call(vip->decoder, video, s_std, std);
-}
-
-/**
- * vidioc_g_std - get video standard
- * @file: descriptor of device
- * @priv: unused
- * @std: contains return values
- *
- * the current video standard is returned
- *
- * return value: 0, no error.
- */
-static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- *std = vip->std;
- return 0;
-}
-
-/**
- * vidioc_querystd - get possible video standards
- * @file: descriptor of device
- * @priv: unused
- * @std: contains return values
- *
- * all possible video standards are returned
- *
- * return value: delivered by video DAC routine.
- */
-static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- return v4l2_subdev_call(vip->decoder, video, querystd, std);
-}
-
-static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- if (inp->index > 1)
- return -EINVAL;
-
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- inp->std = V4L2_STD_ALL;
- sprintf(inp->name, "Camera %u", inp->index);
-
- return 0;
-}
-
-/**
- * vidioc_s_input - set input line
- * @file: descriptor of device
- * @priv: unused
- * @i: new input line number
- *
- * the current active input line is set
- *
- * return value: 0, no error.
- *
- * -EINVAL, line number out of range
- */
-static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
- int ret;
-
- if (i > 1)
- return -EINVAL;
- ret = v4l2_subdev_call(vip->decoder, video, s_routing, i, 0, 0);
-
- if (!ret)
- vip->input = i;
-
- return 0;
-}
-
-/**
- * vidioc_g_input - return input line
- * @file: descriptor of device
- * @priv: unused
- * @i: returned input line number
- *
- * the current active input line is returned
- *
- * return value: always 0.
- */
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- *i = vip->input;
- return 0;
-}
-
-/**
- * vidioc_enum_fmt_vid_cap - return video capture format
- * @file: descriptor of device
- * @priv: unused
- * @f: returned format information
- *
- * returns name and format of video capture
- * Only UYVY is supported by hardware.
- *
- * return value: always 0.
- */
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
-
- if (f->index != 0)
- return -EINVAL;
-
- f->pixelformat = V4L2_PIX_FMT_UYVY;
- return 0;
-}
-
-/**
- * vidioc_try_fmt_vid_cap - set video capture format
- * @file: descriptor of device
- * @priv: unused
- * @f: new format
- *
- * new video format is set which includes width and
- * field type. width is fixed to 720, no scaling.
- * Only UYVY is supported by this hardware.
- * the minimum height is 200, the maximum is 576 (PAL)
- *
- * return value: 0, no error
- *
- * -EINVAL, pixel or field format not supported
- *
- */
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
- int interlace_lim;
-
- if (V4L2_PIX_FMT_UYVY != f->fmt.pix.pixelformat) {
- v4l2_warn(&vip->v4l2_dev, "Invalid format, only UYVY supported\n");
- return -EINVAL;
- }
-
- if (V4L2_STD_525_60 & vip->std)
- interlace_lim = 240;
- else
- interlace_lim = 288;
-
- switch (f->fmt.pix.field) {
- default:
- case V4L2_FIELD_ANY:
- if (interlace_lim < f->fmt.pix.height)
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- else
- f->fmt.pix.field = V4L2_FIELD_BOTTOM;
- break;
- case V4L2_FIELD_TOP:
- case V4L2_FIELD_BOTTOM:
- if (interlace_lim < f->fmt.pix.height)
- f->fmt.pix.height = interlace_lim;
- break;
- case V4L2_FIELD_INTERLACED:
- break;
- }
-
- /* It is the only supported format */
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
- f->fmt.pix.height &= ~1;
- if (2 * interlace_lim < f->fmt.pix.height)
- f->fmt.pix.height = 2 * interlace_lim;
- if (200 > f->fmt.pix.height)
- f->fmt.pix.height = 200;
- f->fmt.pix.width = 720;
- f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
- f->fmt.pix.sizeimage = f->fmt.pix.width * 2 * f->fmt.pix.height;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- return 0;
-}
-
-/**
- * vidioc_s_fmt_vid_cap - set current video format parameters
- * @file: descriptor of device
- * @priv: unused
- * @f: returned format information
- *
- * set new capture format
- * return value: 0, no error
- *
- * other, delivered by video DAC routine.
- */
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
- unsigned int t_stop, b_stop, pitch;
- int ret;
-
- ret = vidioc_try_fmt_vid_cap(file, priv, f);
- if (ret)
- return ret;
-
- if (vb2_is_busy(&vip->vb_vidq)) {
- /* Can't change format during acquisition */
- v4l2_err(&vip->v4l2_dev, "device busy\n");
- return -EBUSY;
- }
- vip->format = f->fmt.pix;
- switch (vip->format.field) {
- case V4L2_FIELD_INTERLACED:
- t_stop = ((vip->format.height / 2 - 1) << 16) |
- (2 * vip->format.width - 1);
- b_stop = t_stop;
- pitch = 4 * vip->format.width;
- break;
- case V4L2_FIELD_TOP:
- t_stop = ((vip->format.height - 1) << 16) |
- (2 * vip->format.width - 1);
- b_stop = (0 << 16) | (2 * vip->format.width - 1);
- pitch = 2 * vip->format.width;
- break;
- case V4L2_FIELD_BOTTOM:
- t_stop = (0 << 16) | (2 * vip->format.width - 1);
- b_stop = (vip->format.height << 16) |
- (2 * vip->format.width - 1);
- pitch = 2 * vip->format.width;
- break;
- default:
- v4l2_err(&vip->v4l2_dev, "unknown field format\n");
- return -EINVAL;
- }
-
- spin_lock_irq(&vip->slock);
- /* Y-X Top Field Offset */
- reg_write(vip, DVP_TFO, 0);
- /* Y-X Bottom Field Offset */
- reg_write(vip, DVP_BFO, 0);
- /* Y-X Top Field Stop*/
- reg_write(vip, DVP_TFS, t_stop);
- /* Y-X Bottom Field Stop */
- reg_write(vip, DVP_BFS, b_stop);
- /* Video Memory Pitch */
- reg_write(vip, DVP_VMP, pitch);
- spin_unlock_irq(&vip->slock);
-
- return 0;
-}
-
-/**
- * vidioc_g_fmt_vid_cap - get current video format parameters
- * @file: descriptor of device
- * @priv: unused
- * @f: contains format information
- *
- * returns current video format parameters
- *
- * return value: 0, always successful
- */
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- f->fmt.pix = vip->format;
-
- return 0;
-}
-
-static const struct v4l2_ioctl_ops vip_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- /* FMT handling */
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- /* Buffer handlers */
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- /* Stream on/off */
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
- /* Standard handling */
- .vidioc_g_std = vidioc_g_std,
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
- /* Input handling */
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- /* Log status ioctl */
- .vidioc_log_status = v4l2_ctrl_log_status,
- /* Event handling */
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static const struct video_device video_dev_template = {
- .name = KBUILD_MODNAME,
- .release = video_device_release_empty,
- .fops = &vip_fops,
- .ioctl_ops = &vip_ioctl_ops,
- .tvnorms = V4L2_STD_ALL,
- .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING,
-};
-
-/**
- * vip_irq - interrupt routine
- * @irq: Number of interrupt ( not used, correct number is assumed )
- * @data: local data structure containing all information
- *
- * check for both frame interrupts set ( top and bottom ).
- * check FIFO overflow, but limit number of log messages after open.
- * signal a complete buffer if done
- *
- * return value: IRQ_NONE, interrupt was not generated by VIP
- *
- * IRQ_HANDLED, interrupt done.
- */
-static irqreturn_t vip_irq(int irq, void *data)
-{
- struct sta2x11_vip *vip = data;
- unsigned int status;
-
- status = reg_read(vip, DVP_ITS);
-
- if (!status) /* No interrupt to handle */
- return IRQ_NONE;
-
- if (status & DVP_IT_FIFO)
- if (vip->overflow++ > 5)
- pr_info("VIP: fifo overflow\n");
-
- if ((status & DVP_IT_VST) && (status & DVP_IT_VSB)) {
- /* this is bad, we are too slow, hope the condition is gone
- * on the next frame */
- return IRQ_HANDLED;
- }
-
- if (status & DVP_IT_VST)
- if ((++vip->tcount) < 2)
- return IRQ_HANDLED;
- if (status & DVP_IT_VSB) {
- vip->bcount++;
- return IRQ_HANDLED;
- }
-
- if (vip->active) { /* Acquisition is over on this buffer */
- /* Disable acquisition */
- reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
- /* Remove the active buffer from the list */
- vip->active->vb.vb2_buf.timestamp = ktime_get_ns();
- vip->active->vb.sequence = vip->sequence++;
- vb2_buffer_done(&vip->active->vb.vb2_buf, VB2_BUF_STATE_DONE);
- }
-
- return IRQ_HANDLED;
-}
-
-static void sta2x11_vip_init_register(struct sta2x11_vip *vip)
-{
- /* Register initialization */
- spin_lock_irq(&vip->slock);
- /* Clean interrupt */
- reg_read(vip, DVP_ITS);
- /* Enable Half Line per vertical */
- reg_write(vip, DVP_HLFLN, DVP_HLFLN_SD);
- /* Reset VIP control */
- reg_write(vip, DVP_CTL, DVP_CTL_RST);
- /* Clear VIP control */
- reg_write(vip, DVP_CTL, 0);
- spin_unlock_irq(&vip->slock);
-}
-static void sta2x11_vip_clear_register(struct sta2x11_vip *vip)
-{
- spin_lock_irq(&vip->slock);
- /* Disable interrupt */
- reg_write(vip, DVP_ITM, 0);
- /* Reset VIP Control */
- reg_write(vip, DVP_CTL, DVP_CTL_RST);
- /* Clear VIP Control */
- reg_write(vip, DVP_CTL, 0);
- /* Clean VIP Interrupt */
- reg_read(vip, DVP_ITS);
- spin_unlock_irq(&vip->slock);
-}
-static int sta2x11_vip_init_buffer(struct sta2x11_vip *vip)
-{
- int err;
-
- err = dma_set_coherent_mask(&vip->pdev->dev, DMA_BIT_MASK(29));
- if (err) {
- v4l2_err(&vip->v4l2_dev, "Cannot configure coherent mask");
- return err;
- }
- memset(&vip->vb_vidq, 0, sizeof(struct vb2_queue));
- vip->vb_vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- vip->vb_vidq.io_modes = VB2_MMAP | VB2_READ;
- vip->vb_vidq.drv_priv = vip;
- vip->vb_vidq.buf_struct_size = sizeof(struct vip_buffer);
- vip->vb_vidq.ops = &vip_video_qops;
- vip->vb_vidq.mem_ops = &vb2_dma_contig_memops;
- vip->vb_vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vip->vb_vidq.dev = &vip->pdev->dev;
- vip->vb_vidq.lock = &vip->v4l_lock;
- err = vb2_queue_init(&vip->vb_vidq);
- if (err)
- return err;
- INIT_LIST_HEAD(&vip->buffer_list);
- spin_lock_init(&vip->lock);
- return 0;
-}
-
-static int sta2x11_vip_init_controls(struct sta2x11_vip *vip)
-{
- /*
- * Inititialize an empty control so VIP can inerithing controls
- * from ADV7180
- */
- v4l2_ctrl_handler_init(&vip->ctrl_hdl, 0);
-
- vip->v4l2_dev.ctrl_handler = &vip->ctrl_hdl;
- if (vip->ctrl_hdl.error) {
- int err = vip->ctrl_hdl.error;
-
- v4l2_ctrl_handler_free(&vip->ctrl_hdl);
- return err;
- }
-
- return 0;
-}
-
-/**
- * vip_gpio_reserve - reserve gpio pin
- * @dev: device
- * @pin: GPIO pin number
- * @dir: direction, input or output
- * @name: GPIO pin name
- *
- */
-static int vip_gpio_reserve(struct device *dev, int pin, int dir,
- const char *name)
-{
- struct gpio_desc *desc = gpio_to_desc(pin);
- int ret = -ENODEV;
-
- if (!gpio_is_valid(pin))
- return ret;
-
- ret = gpio_request(pin, name);
- if (ret) {
- dev_err(dev, "Failed to allocate pin %d (%s)\n", pin, name);
- return ret;
- }
-
- ret = gpiod_direction_output(desc, dir);
- if (ret) {
- dev_err(dev, "Failed to set direction for pin %d (%s)\n",
- pin, name);
- gpio_free(pin);
- return ret;
- }
-
- ret = gpiod_export(desc, false);
- if (ret) {
- dev_err(dev, "Failed to export pin %d (%s)\n", pin, name);
- gpio_free(pin);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * vip_gpio_release - release gpio pin
- * @dev: device
- * @pin: GPIO pin number
- * @name: GPIO pin name
- *
- */
-static void vip_gpio_release(struct device *dev, int pin, const char *name)
-{
- if (gpio_is_valid(pin)) {
- struct gpio_desc *desc = gpio_to_desc(pin);
-
- dev_dbg(dev, "releasing pin %d (%s)\n", pin, name);
- gpiod_unexport(desc);
- gpio_free(pin);
- }
-}
-
-/**
- * sta2x11_vip_init_one - init one instance of video device
- * @pdev: PCI device
- * @ent: (not used)
- *
- * allocate reset pins for DAC.
- * Reset video DAC, this is done via reset line.
- * allocate memory for managing device
- * request interrupt
- * map IO region
- * register device
- * find and initialize video DAC
- *
- * return value: 0, no error
- *
- * -ENOMEM, no memory
- *
- * -ENODEV, device could not be detected or registered
- */
-static int sta2x11_vip_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int ret;
- struct sta2x11_vip *vip;
- struct vip_config *config;
-
- /* Check if hardware support 26-bit DMA */
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(26))) {
- dev_err(&pdev->dev, "26-bit DMA addressing not available\n");
- return -EINVAL;
- }
- /* Enable PCI */
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- /* Get VIP platform data */
- config = dev_get_platdata(&pdev->dev);
- if (!config) {
- dev_info(&pdev->dev, "VIP slot disabled\n");
- ret = -EINVAL;
- goto disable;
- }
-
- /* Power configuration */
- ret = vip_gpio_reserve(&pdev->dev, config->pwr_pin, 0,
- config->pwr_name);
- if (ret)
- goto disable;
-
- ret = vip_gpio_reserve(&pdev->dev, config->reset_pin, 0,
- config->reset_name);
- if (ret) {
- vip_gpio_release(&pdev->dev, config->pwr_pin,
- config->pwr_name);
- goto disable;
- }
-
- if (gpio_is_valid(config->pwr_pin)) {
- /* Datasheet says 5ms between PWR and RST */
- usleep_range(5000, 25000);
- gpio_direction_output(config->pwr_pin, 1);
- }
-
- if (gpio_is_valid(config->reset_pin)) {
- /* Datasheet says 5ms between PWR and RST */
- usleep_range(5000, 25000);
- gpio_direction_output(config->reset_pin, 1);
- }
- usleep_range(5000, 25000);
-
- /* Allocate a new VIP instance */
- vip = kzalloc(sizeof(struct sta2x11_vip), GFP_KERNEL);
- if (!vip) {
- ret = -ENOMEM;
- goto release_gpios;
- }
- vip->pdev = pdev;
- vip->std = V4L2_STD_PAL;
- vip->format = formats_50[0];
- vip->config = config;
- mutex_init(&vip->v4l_lock);
-
- ret = sta2x11_vip_init_controls(vip);
- if (ret)
- goto free_mem;
- ret = v4l2_device_register(&pdev->dev, &vip->v4l2_dev);
- if (ret)
- goto free_mem;
-
- dev_dbg(&pdev->dev, "BAR #0 at 0x%lx 0x%lx irq %d\n",
- (unsigned long)pci_resource_start(pdev, 0),
- (unsigned long)pci_resource_len(pdev, 0), pdev->irq);
-
- pci_set_master(pdev);
-
- ret = pci_request_regions(pdev, KBUILD_MODNAME);
- if (ret)
- goto unreg;
-
- vip->iomem = pci_iomap(pdev, 0, 0x100);
- if (!vip->iomem) {
- ret = -ENOMEM;
- goto release;
- }
-
- pci_enable_msi(pdev);
-
- /* Initialize buffer */
- ret = sta2x11_vip_init_buffer(vip);
- if (ret)
- goto unmap;
-
- spin_lock_init(&vip->slock);
-
- ret = request_irq(pdev->irq, vip_irq, IRQF_SHARED, KBUILD_MODNAME, vip);
- if (ret) {
- dev_err(&pdev->dev, "request_irq failed\n");
- ret = -ENODEV;
- goto release_buf;
- }
-
- /* Initialize and register video device */
- vip->video_dev = video_dev_template;
- vip->video_dev.v4l2_dev = &vip->v4l2_dev;
- vip->video_dev.queue = &vip->vb_vidq;
- vip->video_dev.lock = &vip->v4l_lock;
- video_set_drvdata(&vip->video_dev, vip);
-
- ret = video_register_device(&vip->video_dev, VFL_TYPE_VIDEO, -1);
- if (ret)
- goto vrelease;
-
- /* Get ADV7180 subdevice */
- vip->adapter = i2c_get_adapter(vip->config->i2c_id);
- if (!vip->adapter) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "no I2C adapter found\n");
- goto vunreg;
- }
-
- vip->decoder = v4l2_i2c_new_subdev(&vip->v4l2_dev, vip->adapter,
- "adv7180", vip->config->i2c_addr,
- NULL);
- if (!vip->decoder) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "no decoder found\n");
- goto vunreg;
- }
-
- i2c_put_adapter(vip->adapter);
- v4l2_subdev_call(vip->decoder, core, init, 0);
-
- sta2x11_vip_init_register(vip);
-
- dev_info(&pdev->dev, "STA2X11 Video Input Port (VIP) loaded\n");
- return 0;
-
-vunreg:
- video_set_drvdata(&vip->video_dev, NULL);
-vrelease:
- vb2_video_unregister_device(&vip->video_dev);
- free_irq(pdev->irq, vip);
-release_buf:
- pci_disable_msi(pdev);
-unmap:
- pci_iounmap(pdev, vip->iomem);
-release:
- pci_release_regions(pdev);
-unreg:
- v4l2_device_unregister(&vip->v4l2_dev);
-free_mem:
- kfree(vip);
-release_gpios:
- vip_gpio_release(&pdev->dev, config->reset_pin, config->reset_name);
- vip_gpio_release(&pdev->dev, config->pwr_pin, config->pwr_name);
-disable:
- /*
- * do not call pci_disable_device on sta2x11 because it break all
- * other Bus masters on this EP
- */
- return ret;
-}
-
-/**
- * sta2x11_vip_remove_one - release device
- * @pdev: PCI device
- *
- * Undo everything done in .._init_one
- *
- * unregister video device
- * free interrupt
- * unmap ioadresses
- * free memory
- * free GPIO pins
- */
-static void sta2x11_vip_remove_one(struct pci_dev *pdev)
-{
- struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
- struct sta2x11_vip *vip =
- container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
-
- sta2x11_vip_clear_register(vip);
-
- video_set_drvdata(&vip->video_dev, NULL);
- vb2_video_unregister_device(&vip->video_dev);
- free_irq(pdev->irq, vip);
- pci_disable_msi(pdev);
- pci_iounmap(pdev, vip->iomem);
- pci_release_regions(pdev);
-
- v4l2_device_unregister(&vip->v4l2_dev);
-
- vip_gpio_release(&pdev->dev, vip->config->pwr_pin,
- vip->config->pwr_name);
- vip_gpio_release(&pdev->dev, vip->config->reset_pin,
- vip->config->reset_name);
-
- kfree(vip);
- /*
- * do not call pci_disable_device on sta2x11 because it break all
- * other Bus masters on this EP
- */
-}
-
-/**
- * sta2x11_vip_suspend - set device into power save mode
- * @dev_d: PCI device
- *
- * all relevant registers are saved and an attempt to set a new state is made.
- *
- * return value: 0 always indicate success,
- * even if device could not be disabled. (workaround for hardware problem)
- */
-static int __maybe_unused sta2x11_vip_suspend(struct device *dev_d)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev_d);
- struct sta2x11_vip *vip =
- container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&vip->slock, flags);
- vip->register_save_area[0] = reg_read(vip, DVP_CTL);
- reg_write(vip, DVP_CTL, vip->register_save_area[0] & DVP_CTL_DIS);
- vip->register_save_area[SAVE_COUNT] = reg_read(vip, DVP_ITM);
- reg_write(vip, DVP_ITM, 0);
- for (i = 1; i < SAVE_COUNT; i++)
- vip->register_save_area[i] = reg_read(vip, 4 * i);
- for (i = 0; i < AUX_COUNT; i++)
- vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i] =
- reg_read(vip, registers_to_save[i]);
- spin_unlock_irqrestore(&vip->slock, flags);
-
- vip->disabled = 1;
-
- pr_info("VIP: suspend\n");
- return 0;
-}
-
-/**
- * sta2x11_vip_resume - resume device operation
- * @dev_d : PCI device
- *
- * return value: 0, no error.
- *
- * other, could not set device to power on state.
- */
-static int __maybe_unused sta2x11_vip_resume(struct device *dev_d)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev_d);
- struct sta2x11_vip *vip =
- container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
- unsigned long flags;
- int i;
-
- pr_info("VIP: resume\n");
-
- vip->disabled = 0;
-
- spin_lock_irqsave(&vip->slock, flags);
- for (i = 1; i < SAVE_COUNT; i++)
- reg_write(vip, 4 * i, vip->register_save_area[i]);
- for (i = 0; i < AUX_COUNT; i++)
- reg_write(vip, registers_to_save[i],
- vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i]);
- reg_write(vip, DVP_CTL, vip->register_save_area[0]);
- reg_write(vip, DVP_ITM, vip->register_save_area[SAVE_COUNT]);
- spin_unlock_irqrestore(&vip->slock, flags);
- return 0;
-}
-
-static const struct pci_device_id sta2x11_vip_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIP)},
- {0,}
-};
-
-static SIMPLE_DEV_PM_OPS(sta2x11_vip_pm_ops,
- sta2x11_vip_suspend,
- sta2x11_vip_resume);
-
-static struct pci_driver sta2x11_vip_driver = {
- .name = KBUILD_MODNAME,
- .probe = sta2x11_vip_init_one,
- .remove = sta2x11_vip_remove_one,
- .id_table = sta2x11_vip_pci_tbl,
- .driver.pm = &sta2x11_vip_pm_ops,
-};
-
-static int __init sta2x11_vip_init_module(void)
-{
- return pci_register_driver(&sta2x11_vip_driver);
-}
-
-static void __exit sta2x11_vip_exit_module(void)
-{
- pci_unregister_driver(&sta2x11_vip_driver);
-}
-
-#ifdef MODULE
-module_init(sta2x11_vip_init_module);
-module_exit(sta2x11_vip_exit_module);
-#else
-late_initcall_sync(sta2x11_vip_init_module);
-#endif
-
-MODULE_DESCRIPTION("STA2X11 Video Input Port driver");
-MODULE_AUTHOR("Wind River");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
-MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl);
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.h b/drivers/media/pci/sta2x11/sta2x11_vip.h
deleted file mode 100644
index de6000e7943e..000000000000
--- a/drivers/media/pci/sta2x11/sta2x11_vip.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2011 Wind River Systems, Inc.
- *
- * Author: Anders Wallin <anders.wallin@windriver.com>
- */
-
-#ifndef __STA2X11_VIP_H
-#define __STA2X11_VIP_H
-
-/**
- * struct vip_config - video input configuration data
- * @pwr_name: ADV powerdown name
- * @pwr_pin: ADV powerdown pin
- * @reset_name: ADV reset name
- * @reset_pin: ADV reset pin
- * @i2c_id: ADV i2c adapter ID
- * @i2c_addr: ADV i2c address
- */
-struct vip_config {
- const char *pwr_name;
- int pwr_pin;
- const char *reset_name;
- int reset_pin;
- int i2c_id;
- int i2c_addr;
-};
-
-#endif /* __STA2X11_VIP_H */
diff --git a/drivers/media/pci/tw5864/tw5864-core.c b/drivers/media/pci/tw5864/tw5864-core.c
index 4d33caf83307..832788603f88 100644
--- a/drivers/media/pci/tw5864/tw5864-core.c
+++ b/drivers/media/pci/tw5864/tw5864-core.c
@@ -24,6 +24,8 @@
#include "tw5864.h"
#include "tw5864-reg.h"
+#define DRIVER_NAME "tw5864"
+
MODULE_DESCRIPTION("V4L2 driver module for tw5864-based multimedia capture & encoding devices");
MODULE_AUTHOR("Bluecherry Maintainers <maintainers@bluecherrydvr.com>");
MODULE_AUTHOR("Andrey Utkin <andrey.utkin@corp.bluecherry.net>");
@@ -246,7 +248,8 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
if (!dev)
return -ENOMEM;
- snprintf(dev->name, sizeof(dev->name), "tw5864:%s", pci_name(pci_dev));
+ snprintf(dev->name, sizeof(dev->name), "%s:%s", DRIVER_NAME,
+ pci_name(pci_dev));
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err)
@@ -269,12 +272,12 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
}
/* get mmio */
- err = pcim_iomap_regions(pci_dev, BIT(0), dev->name);
+ dev->mmio = pcim_iomap_region(pci_dev, 0, DRIVER_NAME);
+ err = PTR_ERR_OR_ZERO(dev->mmio);
if (err) {
dev_err(&dev->pci->dev, "Cannot request regions for MMIO\n");
goto unreg_v4l2;
}
- dev->mmio = pcim_iomap_table(pci_dev)[0];
spin_lock_init(&dev->slock);
@@ -290,7 +293,7 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
/* get irq */
err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw5864_isr,
- IRQF_SHARED, "tw5864", dev);
+ IRQF_SHARED, DRIVER_NAME, dev);
if (err < 0) {
dev_err(&dev->pci->dev, "can't get IRQ %d\n", pci_dev->irq);
goto fini_video;
@@ -324,7 +327,7 @@ static void tw5864_finidev(struct pci_dev *pci_dev)
}
static struct pci_driver tw5864_pci_driver = {
- .name = "tw5864",
+ .name = DRIVER_NAME,
.id_table = tw5864_pci_tbl,
.probe = tw5864_initdev,
.remove = tw5864_finidev,
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index 3975fc1b2ee3..e31f9f19a48a 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -1202,7 +1202,7 @@ static int zoran_debugfs_show(struct seq_file *seq, void *v)
seq_printf(seq, "JPG ver_dcm %u\n", zr->jpg_settings.ver_dcm);
seq_printf(seq, "JPG tmp_dcm %u\n", zr->jpg_settings.tmp_dcm);
seq_printf(seq, "JPG odd_even %u\n", zr->jpg_settings.odd_even);
- seq_printf(seq, "JPG crop %dx%d %d %d\n",
+ seq_printf(seq, "JPG crop (%d,%d)/%dx%d\n",
zr->jpg_settings.img_x,
zr->jpg_settings.img_y,
zr->jpg_settings.img_width,
diff --git a/drivers/media/pci/zoran/zr36016.c b/drivers/media/pci/zoran/zr36016.c
index 4b328ad6083f..d2e136c48a1b 100644
--- a/drivers/media/pci/zoran/zr36016.c
+++ b/drivers/media/pci/zoran/zr36016.c
@@ -216,7 +216,7 @@ static int zr36016_set_video(struct videocodec *codec, const struct tvnorm *norm
struct zr36016 *ptr = (struct zr36016 *)codec->data;
struct zoran *zr = videocodec_to_zoran(codec);
- zrdev_dbg(zr, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n",
+ zrdev_dbg(zr, "%s: set_video %d.%d, (%u,%u)/%ux%u (0x%x) call\n",
ptr->name, norm->h_start, norm->v_start,
cap->x, cap->y, cap->width, cap->height,
cap->decimation);
diff --git a/drivers/media/pci/zoran/zr36050.c b/drivers/media/pci/zoran/zr36050.c
index b07d7e5c1b4a..c17965073557 100644
--- a/drivers/media/pci/zoran/zr36050.c
+++ b/drivers/media/pci/zoran/zr36050.c
@@ -547,7 +547,7 @@ static int zr36050_set_video(struct videocodec *codec, const struct tvnorm *norm
struct zoran *zr = videocodec_to_zoran(codec);
int size;
- zrdev_dbg(zr, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n",
+ zrdev_dbg(zr, "%s: set_video %d.%d, (%u,%u)/%ux%u (0x%x) q%d call\n",
ptr->name, norm->h_start, norm->v_start,
cap->x, cap->y, cap->width, cap->height,
cap->decimation, cap->quality);
diff --git a/drivers/media/pci/zoran/zr36060.c b/drivers/media/pci/zoran/zr36060.c
index 75fd167603dc..d6c12efc5bb6 100644
--- a/drivers/media/pci/zoran/zr36060.c
+++ b/drivers/media/pci/zoran/zr36060.c
@@ -488,7 +488,7 @@ static int zr36060_set_video(struct videocodec *codec, const struct tvnorm *norm
u32 reg;
int size;
- zrdev_dbg(zr, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name,
+ zrdev_dbg(zr, "%s: set_video (%u,%u)/%ux%u (%%%d) call\n", ptr->name,
cap->x, cap->y, cap->width, cap->height, cap->decimation);
/* if () return -EINVAL;
diff --git a/drivers/media/platform/amlogic/Kconfig b/drivers/media/platform/amlogic/Kconfig
index 5014957404e9..458acf3d5fa8 100644
--- a/drivers/media/platform/amlogic/Kconfig
+++ b/drivers/media/platform/amlogic/Kconfig
@@ -2,4 +2,5 @@
comment "Amlogic media platform drivers"
+source "drivers/media/platform/amlogic/c3/Kconfig"
source "drivers/media/platform/amlogic/meson-ge2d/Kconfig"
diff --git a/drivers/media/platform/amlogic/Makefile b/drivers/media/platform/amlogic/Makefile
index d3cdb8fa4ddb..c744afcd1b9e 100644
--- a/drivers/media/platform/amlogic/Makefile
+++ b/drivers/media/platform/amlogic/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += c3/
obj-y += meson-ge2d/
diff --git a/drivers/media/platform/amlogic/c3/Kconfig b/drivers/media/platform/amlogic/c3/Kconfig
new file mode 100644
index 000000000000..d355d3a9358d
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+source "drivers/media/platform/amlogic/c3/isp/Kconfig"
+source "drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig"
+source "drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig"
diff --git a/drivers/media/platform/amlogic/c3/Makefile b/drivers/media/platform/amlogic/c3/Makefile
new file mode 100644
index 000000000000..14f305a493d2
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += isp/
+obj-y += mipi-adapter/
+obj-y += mipi-csi2/
diff --git a/drivers/media/platform/amlogic/c3/isp/Kconfig b/drivers/media/platform/amlogic/c3/isp/Kconfig
new file mode 100644
index 000000000000..02c62a50a5e8
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_C3_ISP
+ tristate "Amlogic C3 Image Signal Processor (ISP) driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on OF
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ help
+ Video4Linux2 driver for Amlogic C3 ISP pipeline.
+ The C3 ISP is used for processing raw images and
+ outputing results to memory.
+
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/amlogic/c3/isp/Makefile b/drivers/media/platform/amlogic/c3/isp/Makefile
new file mode 100644
index 000000000000..b1b064170b57
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+c3-isp-objs := c3-isp-dev.o \
+ c3-isp-params.o \
+ c3-isp-stats.o \
+ c3-isp-capture.o \
+ c3-isp-core.o \
+ c3-isp-resizer.o
+
+obj-$(CONFIG_VIDEO_C3_ISP) += c3-isp.o
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
new file mode 100644
index 000000000000..11d85f5342f0
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/cleanup.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+#define C3_ISP_WRMIFX3_REG(addr, id) ((addr) + (id) * 0x100)
+
+static const struct c3_isp_cap_format_info cap_formats[] = {
+ /* YUV formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_Y_ONLY,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV420,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_UV,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 2,
+ .vdiv = 2,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV420,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 2,
+ .vdiv = 2,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_NV16M,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV422,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_UV,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 1,
+ .vdiv = 2
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_NV61M,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV422,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 1,
+ .vdiv = 2,
+ },
+ /* RAW formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ },
+};
+
+/* Hardware configuration */
+
+/* Set the address of wrmifx3(write memory interface) */
+static void c3_isp_cap_wrmifx3_buff(struct c3_isp_capture *cap)
+{
+ dma_addr_t y_dma_addr;
+ dma_addr_t uv_dma_addr;
+
+ if (cap->buff) {
+ y_dma_addr = cap->buff->dma_addr[C3_ISP_PLANE_Y];
+ uv_dma_addr = cap->buff->dma_addr[C3_ISP_PLANE_UV];
+ } else {
+ y_dma_addr = cap->dummy_buff.dma_addr;
+ uv_dma_addr = cap->dummy_buff.dma_addr;
+ }
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH0_BADDR, cap->id),
+ ISP_WRMIFX3_0_CH0_BASE_ADDR(y_dma_addr));
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH1_BADDR, cap->id),
+ ISP_WRMIFX3_0_CH1_BASE_ADDR(uv_dma_addr));
+}
+
+static void c3_isp_cap_wrmifx3_format(struct c3_isp_capture *cap)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &cap->format.pix_mp;
+ const struct c3_isp_cap_format_info *info = cap->format.info;
+ u32 stride;
+ u32 chrom_h;
+ u32 chrom_v;
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_SIZE, cap->id),
+ ISP_WRMIFX3_0_FMT_SIZE_HSIZE(pix_mp->width) |
+ ISP_WRMIFX3_0_FMT_SIZE_VSIZE(pix_mp->height));
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_CTRL, cap->id),
+ ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_MASK, info->format);
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_CTRL, cap->id),
+ ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_MASK,
+ info->in_bits);
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_CTRL, cap->id),
+ ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_MASK, info->planes);
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_CTRL, cap->id),
+ ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_MASK,
+ info->uv_swap);
+
+ stride = DIV_ROUND_UP(pix_mp->plane_fmt[C3_ISP_PLANE_Y].bytesperline,
+ C3_ISP_DMA_SIZE_ALIGN_BYTES);
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH0_CTRL0, cap->id),
+ ISP_WRMIFX3_0_CH0_CTRL0_STRIDE_MASK,
+ ISP_WRMIFX3_0_CH0_CTRL0_STRIDE(stride));
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH0_CTRL1, cap->id),
+ ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_MODE_MASK,
+ info->ch0_pix_bits);
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_WIN_LUMA_H, cap->id),
+ ISP_WRMIFX3_0_WIN_LUMA_H_LUMA_HEND(pix_mp->width));
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_WIN_LUMA_V, cap->id),
+ ISP_WRMIFX3_0_WIN_LUMA_V_LUMA_VEND(pix_mp->height));
+
+ stride = DIV_ROUND_UP(pix_mp->plane_fmt[C3_ISP_PLANE_UV].bytesperline,
+ C3_ISP_DMA_SIZE_ALIGN_BYTES);
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH1_CTRL0, cap->id),
+ ISP_WRMIFX3_0_CH1_CTRL0_STRIDE_MASK,
+ ISP_WRMIFX3_0_CH1_CTRL0_STRIDE(stride));
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH1_CTRL1, cap->id),
+ ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_MODE_MASK,
+ ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_16BITS);
+
+ chrom_h = DIV_ROUND_UP(pix_mp->width, info->hdiv);
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_WIN_CHROM_H, cap->id),
+ ISP_WRMIFX3_0_WIN_CHROM_H_CHROM_HEND(chrom_h));
+
+ chrom_v = DIV_ROUND_UP(pix_mp->height, info->vdiv);
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_WIN_CHROM_V, cap->id),
+ ISP_WRMIFX3_0_WIN_CHROM_V_CHROM_VEND(chrom_v));
+}
+
+static int c3_isp_cap_dummy_buff_create(struct c3_isp_capture *cap)
+{
+ struct c3_isp_dummy_buffer *dummy_buff = &cap->dummy_buff;
+ struct v4l2_pix_format_mplane *pix_mp = &cap->format.pix_mp;
+
+ if (pix_mp->num_planes == 1)
+ dummy_buff->size = pix_mp->plane_fmt[C3_ISP_PLANE_Y].sizeimage;
+ else
+ dummy_buff->size =
+ max(pix_mp->plane_fmt[C3_ISP_PLANE_Y].sizeimage,
+ pix_mp->plane_fmt[C3_ISP_PLANE_UV].sizeimage);
+
+ /* The driver never access vaddr, no mapping is required */
+ dummy_buff->vaddr = dma_alloc_attrs(cap->isp->dev, dummy_buff->size,
+ &dummy_buff->dma_addr, GFP_KERNEL,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!dummy_buff->vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void c3_isp_cap_dummy_buff_destroy(struct c3_isp_capture *cap)
+{
+ dma_free_attrs(cap->isp->dev, cap->dummy_buff.size,
+ cap->dummy_buff.vaddr, cap->dummy_buff.dma_addr,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void c3_isp_cap_cfg_buff(struct c3_isp_capture *cap)
+{
+ cap->buff = list_first_entry_or_null(&cap->pending,
+ struct c3_isp_cap_buffer, list);
+
+ c3_isp_cap_wrmifx3_buff(cap);
+
+ if (cap->buff)
+ list_del(&cap->buff->list);
+}
+
+static void c3_isp_cap_start(struct c3_isp_capture *cap)
+{
+ u32 mask;
+ u32 val;
+
+ scoped_guard(spinlock_irqsave, &cap->buff_lock)
+ c3_isp_cap_cfg_buff(cap);
+
+ c3_isp_cap_wrmifx3_format(cap);
+
+ if (cap->id == C3_ISP_CAP_DEV_0) {
+ mask = ISP_TOP_PATH_EN_WRMIF0_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF0_EN;
+ } else if (cap->id == C3_ISP_CAP_DEV_1) {
+ mask = ISP_TOP_PATH_EN_WRMIF1_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF1_EN;
+ } else {
+ mask = ISP_TOP_PATH_EN_WRMIF2_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF2_EN;
+ }
+
+ c3_isp_update_bits(cap->isp, ISP_TOP_PATH_EN, mask, val);
+}
+
+static void c3_isp_cap_stop(struct c3_isp_capture *cap)
+{
+ u32 mask;
+ u32 val;
+
+ if (cap->id == C3_ISP_CAP_DEV_0) {
+ mask = ISP_TOP_PATH_EN_WRMIF0_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF0_DIS;
+ } else if (cap->id == C3_ISP_CAP_DEV_1) {
+ mask = ISP_TOP_PATH_EN_WRMIF1_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF1_DIS;
+ } else {
+ mask = ISP_TOP_PATH_EN_WRMIF2_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF2_DIS;
+ }
+
+ c3_isp_update_bits(cap->isp, ISP_TOP_PATH_EN, mask, val);
+}
+
+static void c3_isp_cap_done(struct c3_isp_capture *cap)
+{
+ struct c3_isp_cap_buffer *buff = cap->buff;
+
+ guard(spinlock_irqsave)(&cap->buff_lock);
+
+ if (buff) {
+ buff->vb.sequence = cap->isp->frm_sequence;
+ buff->vb.vb2_buf.timestamp = ktime_get();
+ buff->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&buff->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ c3_isp_cap_cfg_buff(cap);
+}
+
+/* V4L2 video operations */
+
+static const struct c3_isp_cap_format_info *c3_cap_find_fmt(u32 fourcc)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(cap_formats); i++) {
+ if (cap_formats[i].fourcc == fourcc)
+ return &cap_formats[i];
+ }
+
+ return NULL;
+}
+
+static void c3_cap_try_fmt(struct v4l2_pix_format_mplane *pix_mp)
+{
+ const struct c3_isp_cap_format_info *fmt;
+ const struct v4l2_format_info *info;
+ struct v4l2_plane_pix_format *plane;
+
+ fmt = c3_cap_find_fmt(pix_mp->pixelformat);
+ if (!fmt)
+ fmt = &cap_formats[0];
+
+ pix_mp->width = clamp(pix_mp->width, C3_ISP_MIN_WIDTH,
+ C3_ISP_MAX_WIDTH);
+ pix_mp->height = clamp(pix_mp->height, C3_ISP_MIN_HEIGHT,
+ C3_ISP_MAX_HEIGHT);
+ pix_mp->pixelformat = fmt->fourcc;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
+ pix_mp->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ pix_mp->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+
+ info = v4l2_format_info(fmt->fourcc);
+ pix_mp->num_planes = info->mem_planes;
+ memset(pix_mp->plane_fmt, 0, sizeof(pix_mp->plane_fmt));
+
+ for (unsigned int i = 0; i < info->comp_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
+ unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
+
+ plane = &pix_mp->plane_fmt[i];
+
+ plane->bytesperline = DIV_ROUND_UP(pix_mp->width, hdiv) *
+ info->bpp[i] / info->bpp_div[i];
+ plane->bytesperline = ALIGN(plane->bytesperline,
+ C3_ISP_DMA_SIZE_ALIGN_BYTES);
+ plane->sizeimage = plane->bytesperline *
+ DIV_ROUND_UP(pix_mp->height, vdiv);
+ }
+}
+
+static void c3_isp_cap_return_buffers(struct c3_isp_capture *cap,
+ enum vb2_buffer_state state)
+{
+ struct c3_isp_cap_buffer *buff;
+
+ guard(spinlock_irqsave)(&cap->buff_lock);
+
+ if (cap->buff) {
+ vb2_buffer_done(&cap->buff->vb.vb2_buf, state);
+ cap->buff = NULL;
+ }
+
+ while (!list_empty(&cap->pending)) {
+ buff = list_first_entry(&cap->pending,
+ struct c3_isp_cap_buffer, list);
+ list_del(&buff->list);
+ vb2_buffer_done(&buff->vb.vb2_buf, state);
+ }
+}
+
+static int c3_isp_cap_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, C3_ISP_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "AML C3 ISP", sizeof(cap->card));
+
+ return 0;
+}
+
+static int c3_isp_cap_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ const struct c3_isp_cap_format_info *fmt;
+ unsigned int index = 0;
+ unsigned int i;
+
+ if (!f->mbus_code) {
+ if (f->index >= ARRAY_SIZE(cap_formats))
+ return -EINVAL;
+
+ fmt = &cap_formats[f->index];
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cap_formats); i++) {
+ fmt = &cap_formats[i];
+ if (f->mbus_code != fmt->mbus_code)
+ continue;
+
+ if (index++ == f->index) {
+ f->pixelformat = cap_formats[i].fourcc;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int c3_isp_cap_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct c3_isp_capture *cap = video_drvdata(file);
+
+ f->fmt.pix_mp = cap->format.pix_mp;
+
+ return 0;
+}
+
+static int c3_isp_cap_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct c3_isp_capture *cap = video_drvdata(file);
+
+ c3_cap_try_fmt(&f->fmt.pix_mp);
+
+ cap->format.pix_mp = f->fmt.pix_mp;
+ cap->format.info = c3_cap_find_fmt(f->fmt.pix_mp.pixelformat);
+
+ return 0;
+}
+
+static int c3_isp_cap_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ c3_cap_try_fmt(&f->fmt.pix_mp);
+
+ return 0;
+}
+
+static int c3_isp_cap_enum_frmsize(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct c3_isp_cap_format_info *fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fmt = c3_cap_find_fmt(fsize->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = C3_ISP_MIN_WIDTH;
+ fsize->stepwise.min_height = C3_ISP_MIN_HEIGHT;
+ fsize->stepwise.max_width = C3_ISP_MAX_WIDTH;
+ fsize->stepwise.max_height = C3_ISP_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 2;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops isp_cap_v4l2_ioctl_ops = {
+ .vidioc_querycap = c3_isp_cap_querycap,
+ .vidioc_enum_fmt_vid_cap = c3_isp_cap_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = c3_isp_cap_g_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = c3_isp_cap_s_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = c3_isp_cap_try_fmt_mplane,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_framesizes = c3_isp_cap_enum_frmsize,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations isp_cap_v4l2_fops = {
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static int c3_isp_cap_link_validate(struct media_link *link)
+{
+ struct video_device *vdev =
+ media_entity_to_video_device(link->sink->entity);
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct c3_isp_capture *cap = video_get_drvdata(vdev);
+ struct v4l2_subdev_format src_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = link->source->index,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call_state_active(sd, pad, get_fmt, &src_fmt);
+ if (ret)
+ return ret;
+
+ if (src_fmt.format.width != cap->format.pix_mp.width ||
+ src_fmt.format.height != cap->format.pix_mp.height ||
+ src_fmt.format.code != cap->format.info->mbus_code) {
+ dev_err(cap->isp->dev,
+ "link %s: %u -> %s: %u not valid: 0x%04x/%ux%u not match 0x%04x/%ux%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index,
+ src_fmt.format.code, src_fmt.format.width,
+ src_fmt.format.height, cap->format.info->mbus_code,
+ cap->format.pix_mp.width, cap->format.pix_mp.height);
+
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations isp_cap_entity_ops = {
+ .link_validate = c3_isp_cap_link_validate,
+};
+
+static int c3_isp_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(q);
+ const struct v4l2_pix_format_mplane *pix_mp = &cap->format.pix_mp;
+ unsigned int i;
+
+ if (*num_planes) {
+ if (*num_planes != pix_mp->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < pix_mp->num_planes; i++)
+ if (sizes[i] < pix_mp->plane_fmt[i].sizeimage)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *num_planes = pix_mp->num_planes;
+ for (i = 0; i < pix_mp->num_planes; i++)
+ sizes[i] = pix_mp->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static void c3_isp_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_cap_buffer *buf =
+ container_of(v4l2_buf, struct c3_isp_cap_buffer, vb);
+ struct c3_isp_capture *cap = vb2_get_drv_priv(vb->vb2_queue);
+
+ guard(spinlock_irqsave)(&cap->buff_lock);
+
+ list_add_tail(&buf->list, &cap->pending);
+}
+
+static int c3_isp_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+
+ for (unsigned int i = 0; i < cap->format.pix_mp.num_planes; i++) {
+ size = cap->format.pix_mp.plane_fmt[i].sizeimage;
+ if (vb2_plane_size(vb, i) < size) {
+ dev_err(cap->isp->dev,
+ "User buffer too small (%ld < %lu)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static int c3_isp_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_cap_buffer *buf =
+ container_of(v4l2_buf, struct c3_isp_cap_buffer, vb);
+
+ for (unsigned int i = 0; i < cap->format.pix_mp.num_planes; i++)
+ buf->dma_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ return 0;
+}
+
+static int c3_isp_vb2_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(q);
+ int ret;
+
+ ret = video_device_pipeline_start(&cap->vdev, &cap->isp->pipe);
+ if (ret) {
+ dev_err(cap->isp->dev,
+ "Failed to start cap%u pipeline: %d\n", cap->id, ret);
+ goto err_return_buffers;
+ }
+
+ ret = c3_isp_cap_dummy_buff_create(cap);
+ if (ret)
+ goto err_pipeline_stop;
+
+ ret = pm_runtime_resume_and_get(cap->isp->dev);
+ if (ret)
+ goto err_dummy_destroy;
+
+ c3_isp_cap_start(cap);
+
+ ret = v4l2_subdev_enable_streams(&cap->rsz->sd, C3_ISP_RSZ_PAD_SOURCE,
+ BIT(0));
+ if (ret)
+ goto err_pm_put;
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_put(cap->isp->dev);
+err_dummy_destroy:
+ c3_isp_cap_dummy_buff_destroy(cap);
+err_pipeline_stop:
+ video_device_pipeline_stop(&cap->vdev);
+err_return_buffers:
+ c3_isp_cap_return_buffers(cap, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void c3_isp_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(q);
+
+ c3_isp_cap_stop(cap);
+
+ c3_isp_cap_return_buffers(cap, VB2_BUF_STATE_ERROR);
+
+ v4l2_subdev_disable_streams(&cap->rsz->sd, C3_ISP_RSZ_PAD_SOURCE,
+ BIT(0));
+
+ pm_runtime_put(cap->isp->dev);
+
+ c3_isp_cap_dummy_buff_destroy(cap);
+
+ video_device_pipeline_stop(&cap->vdev);
+}
+
+static const struct vb2_ops isp_video_vb2_ops = {
+ .queue_setup = c3_isp_vb2_queue_setup,
+ .buf_queue = c3_isp_vb2_buf_queue,
+ .buf_prepare = c3_isp_vb2_buf_prepare,
+ .buf_init = c3_isp_vb2_buf_init,
+ .start_streaming = c3_isp_vb2_start_streaming,
+ .stop_streaming = c3_isp_vb2_stop_streaming,
+};
+
+static int c3_isp_register_capture(struct c3_isp_capture *cap)
+{
+ struct video_device *vdev = &cap->vdev;
+ struct vb2_queue *vb2_q = &cap->vb2_q;
+ int ret;
+
+ snprintf(vdev->name, sizeof(vdev->name), "c3-isp-cap%u", cap->id);
+ vdev->fops = &isp_cap_v4l2_fops;
+ vdev->ioctl_ops = &isp_cap_v4l2_ioctl_ops;
+ vdev->v4l2_dev = &cap->isp->v4l2_dev;
+ vdev->entity.ops = &isp_cap_entity_ops;
+ vdev->lock = &cap->lock;
+ vdev->minor = -1;
+ vdev->queue = vb2_q;
+ vdev->release = video_device_release_empty;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING;
+ vdev->vfl_dir = VFL_DIR_RX;
+ video_set_drvdata(vdev, cap);
+
+ vb2_q->drv_priv = cap;
+ vb2_q->mem_ops = &vb2_dma_contig_memops;
+ vb2_q->ops = &isp_video_vb2_ops;
+ vb2_q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ vb2_q->io_modes = VB2_DMABUF | VB2_MMAP;
+ vb2_q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vb2_q->buf_struct_size = sizeof(struct c3_isp_cap_buffer);
+ vb2_q->dev = cap->isp->dev;
+ vb2_q->lock = &cap->lock;
+
+ ret = vb2_queue_init(vb2_q);
+ if (ret)
+ goto err_destroy;
+
+ cap->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, &cap->pad);
+ if (ret)
+ goto err_queue_release;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(cap->isp->dev,
+ "Failed to register %s: %d\n", vdev->name, ret);
+ goto err_entity_cleanup;
+ }
+
+ return 0;
+
+err_entity_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_queue_release:
+ vb2_queue_release(vb2_q);
+err_destroy:
+ mutex_destroy(&cap->lock);
+ return ret;
+}
+
+int c3_isp_captures_register(struct c3_isp_device *isp)
+{
+ int ret;
+ unsigned int i;
+ struct c3_isp_capture *cap;
+
+ for (i = C3_ISP_CAP_DEV_0; i < C3_ISP_NUM_CAP_DEVS; i++) {
+ cap = &isp->caps[i];
+ memset(cap, 0, sizeof(*cap));
+
+ cap->format.pix_mp.width = C3_ISP_DEFAULT_WIDTH;
+ cap->format.pix_mp.height = C3_ISP_DEFAULT_HEIGHT;
+ cap->format.pix_mp.field = V4L2_FIELD_NONE;
+ cap->format.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M;
+ cap->format.pix_mp.colorspace = V4L2_COLORSPACE_SRGB;
+ cap->format.info =
+ c3_cap_find_fmt(cap->format.pix_mp.pixelformat);
+
+ c3_cap_try_fmt(&cap->format.pix_mp);
+
+ cap->id = i;
+ cap->rsz = &isp->resizers[i];
+ cap->isp = isp;
+ INIT_LIST_HEAD(&cap->pending);
+ spin_lock_init(&cap->buff_lock);
+ mutex_init(&cap->lock);
+
+ ret = c3_isp_register_capture(cap);
+ if (ret) {
+ cap->isp = NULL;
+ mutex_destroy(&cap->lock);
+ c3_isp_captures_unregister(isp);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void c3_isp_captures_unregister(struct c3_isp_device *isp)
+{
+ unsigned int i;
+ struct c3_isp_capture *cap;
+
+ for (i = C3_ISP_CAP_DEV_0; i < C3_ISP_NUM_CAP_DEVS; i++) {
+ cap = &isp->caps[i];
+
+ if (!cap->isp)
+ continue;
+ vb2_queue_release(&cap->vb2_q);
+ media_entity_cleanup(&cap->vdev.entity);
+ video_unregister_device(&cap->vdev);
+ mutex_destroy(&cap->lock);
+ }
+}
+
+void c3_isp_captures_isr(struct c3_isp_device *isp)
+{
+ c3_isp_cap_done(&isp->caps[C3_ISP_CAP_DEV_0]);
+ c3_isp_cap_done(&isp->caps[C3_ISP_CAP_DEV_1]);
+ c3_isp_cap_done(&isp->caps[C3_ISP_CAP_DEV_2]);
+}
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-common.h b/drivers/media/platform/amlogic/c3/isp/c3-isp-common.h
new file mode 100644
index 000000000000..cb470802e61e
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-common.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#ifndef __C3_ISP_COMMON_H__
+#define __C3_ISP_COMMON_H__
+
+#include <linux/clk.h>
+
+#include <media/media-device.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+
+#define C3_ISP_DRIVER_NAME "c3-isp"
+#define C3_ISP_CLOCK_NUM_MAX 3
+
+#define C3_ISP_DEFAULT_WIDTH 1920
+#define C3_ISP_DEFAULT_HEIGHT 1080
+#define C3_ISP_MAX_WIDTH 2888
+#define C3_ISP_MAX_HEIGHT 2240
+#define C3_ISP_MIN_WIDTH 160
+#define C3_ISP_MIN_HEIGHT 120
+
+#define C3_ISP_DMA_SIZE_ALIGN_BYTES 16
+
+enum c3_isp_core_pads {
+ C3_ISP_CORE_PAD_SINK_VIDEO,
+ C3_ISP_CORE_PAD_SINK_PARAMS,
+ C3_ISP_CORE_PAD_SOURCE_STATS,
+ C3_ISP_CORE_PAD_SOURCE_VIDEO_0,
+ C3_ISP_CORE_PAD_SOURCE_VIDEO_1,
+ C3_ISP_CORE_PAD_SOURCE_VIDEO_2,
+ C3_ISP_CORE_PAD_MAX
+};
+
+enum c3_isp_resizer_ids {
+ C3_ISP_RSZ_0,
+ C3_ISP_RSZ_1,
+ C3_ISP_RSZ_2,
+ C3_ISP_NUM_RSZ
+};
+
+enum c3_isp_resizer_pads {
+ C3_ISP_RSZ_PAD_SINK,
+ C3_ISP_RSZ_PAD_SOURCE,
+ C3_ISP_RSZ_PAD_MAX
+};
+
+enum c3_isp_cap_devs {
+ C3_ISP_CAP_DEV_0,
+ C3_ISP_CAP_DEV_1,
+ C3_ISP_CAP_DEV_2,
+ C3_ISP_NUM_CAP_DEVS
+};
+
+enum c3_isp_planes {
+ C3_ISP_PLANE_Y,
+ C3_ISP_PLANE_UV,
+ C3_ISP_NUM_PLANES
+};
+
+/*
+ * struct c3_isp_cap_format_info - The image format of capture device
+ *
+ * @mbus_code: the mbus code
+ * @fourcc: the pixel format
+ * @format: defines the output format of hardware
+ * @planes: defines the mutil plane of hardware
+ * @ch0_pix_bits: defines the channel 0 pixel bits mode of hardware
+ * @uv_swap: defines the uv swap flag of hardware
+ * @in_bits: defines the input bits of hardware
+ * @hdiv: horizontal chroma subsampling factor of hardware
+ * @vdiv: vertical chroma subsampling factor of hardware
+ */
+struct c3_isp_cap_format_info {
+ u32 mbus_code;
+ u32 fourcc;
+ u32 format;
+ u32 planes;
+ u32 ch0_pix_bits;
+ u8 uv_swap;
+ u8 in_bits;
+ u8 hdiv;
+ u8 vdiv;
+};
+
+/*
+ * struct c3_isp_cap_buffer - A container of vb2 buffer used by the video
+ * devices: capture video devices
+ *
+ * @vb: vb2 buffer
+ * @dma_addr: buffer physical address
+ * @list: entry of the buffer in the queue
+ */
+struct c3_isp_cap_buffer {
+ struct vb2_v4l2_buffer vb;
+ dma_addr_t dma_addr[C3_ISP_NUM_PLANES];
+ struct list_head list;
+};
+
+/*
+ * struct c3_isp_stats_dma_buffer - A container of vb2 buffer used by the video
+ * devices: stats video devices
+ *
+ * @vb: vb2 buffer
+ * @dma_addr: buffer physical address
+ * @list: entry of the buffer in the queue
+ */
+struct c3_isp_stats_buffer {
+ struct vb2_v4l2_buffer vb;
+ dma_addr_t dma_addr;
+ struct list_head list;
+};
+
+/*
+ * struct c3_isp_params_buffer - A container of vb2 buffer used by the
+ * params video device
+ *
+ * @vb: vb2 buffer
+ * @cfg: scratch buffer used for caching the ISP configuration parameters
+ * @list: entry of the buffer in the queue
+ */
+struct c3_isp_params_buffer {
+ struct vb2_v4l2_buffer vb;
+ void *cfg;
+ struct list_head list;
+};
+
+/*
+ * struct c3_isp_dummy_buffer - A buffer to write the next frame to in case
+ * there are no vb2 buffers available.
+ *
+ * @vaddr: return value of call to dma_alloc_attrs
+ * @dma_addr: dma address of the buffer
+ * @size: size of the buffer
+ */
+struct c3_isp_dummy_buffer {
+ void *vaddr;
+ dma_addr_t dma_addr;
+ u32 size;
+};
+
+/*
+ * struct c3_isp_core - ISP core subdev
+ *
+ * @sd: ISP sub-device
+ * @pads: ISP sub-device pads
+ * @src_pad: source sub-device pad
+ * @isp: pointer to c3_isp_device
+ */
+struct c3_isp_core {
+ struct v4l2_subdev sd;
+ struct media_pad pads[C3_ISP_CORE_PAD_MAX];
+ struct media_pad *src_pad;
+ struct c3_isp_device *isp;
+};
+
+/*
+ * struct c3_isp_resizer - ISP resizer subdev
+ *
+ * @id: resizer id
+ * @sd: resizer sub-device
+ * @pads: resizer sub-device pads
+ * @src_sd: source sub-device
+ * @isp: pointer to c3_isp_device
+ * @src_pad: the pad of source sub-device
+ */
+struct c3_isp_resizer {
+ enum c3_isp_resizer_ids id;
+ struct v4l2_subdev sd;
+ struct media_pad pads[C3_ISP_RSZ_PAD_MAX];
+ struct v4l2_subdev *src_sd;
+ struct c3_isp_device *isp;
+ u32 src_pad;
+};
+
+/*
+ * struct c3_isp_stats - ISP statistics device
+ *
+ * @vb2_q: vb2 buffer queue
+ * @vdev: video node
+ * @vfmt: v4l2_format of the metadata format
+ * @pad: media pad
+ * @lock: protects vb2_q, vdev
+ * @isp: pointer to c3_isp_device
+ * @buff: in use buffer
+ * @buff_lock: protects stats buffer
+ * @pending: stats buffer list head
+ */
+struct c3_isp_stats {
+ struct vb2_queue vb2_q;
+ struct video_device vdev;
+ struct v4l2_format vfmt;
+ struct media_pad pad;
+
+ struct mutex lock; /* Protects vb2_q, vdev */
+ struct c3_isp_device *isp;
+
+ struct c3_isp_stats_buffer *buff;
+ spinlock_t buff_lock; /* Protects stats buffer */
+ struct list_head pending;
+};
+
+/*
+ * struct c3_isp_params - ISP parameters device
+ *
+ * @vb2_q: vb2 buffer queue
+ * @vdev: video node
+ * @vfmt: v4l2_format of the metadata format
+ * @pad: media pad
+ * @lock: protects vb2_q, vdev
+ * @isp: pointer to c3_isp_device
+ * @buff: in use buffer
+ * @buff_lock: protects stats buffer
+ * @pending: stats buffer list head
+ */
+struct c3_isp_params {
+ struct vb2_queue vb2_q;
+ struct video_device vdev;
+ struct v4l2_format vfmt;
+ struct media_pad pad;
+
+ struct mutex lock; /* Protects vb2_q, vdev */
+ struct c3_isp_device *isp;
+
+ struct c3_isp_params_buffer *buff;
+ spinlock_t buff_lock; /* Protects params buffer */
+ struct list_head pending;
+};
+
+/*
+ * struct c3_isp_capture - ISP capture device
+ *
+ * @id: capture device ID
+ * @vb2_q: vb2 buffer queue
+ * @vdev: video node
+ * @pad: media pad
+ * @lock: protects vb2_q, vdev
+ * @isp: pointer to c3_isp_device
+ * @rsz: pointer to c3_isp_resizer
+ * @buff: in use buffer
+ * @buff_lock: protects capture buffer
+ * @pending: capture buffer list head
+ * @format.info: a pointer to the c3_isp_capture_format of the pixel format
+ * @format.fmt: buffer format
+ */
+struct c3_isp_capture {
+ enum c3_isp_cap_devs id;
+ struct vb2_queue vb2_q;
+ struct video_device vdev;
+ struct media_pad pad;
+
+ struct mutex lock; /* Protects vb2_q, vdev */
+ struct c3_isp_device *isp;
+ struct c3_isp_resizer *rsz;
+
+ struct c3_isp_dummy_buffer dummy_buff;
+ struct c3_isp_cap_buffer *buff;
+ spinlock_t buff_lock; /* Protects stream buffer */
+ struct list_head pending;
+ struct {
+ const struct c3_isp_cap_format_info *info;
+ struct v4l2_pix_format_mplane pix_mp;
+ } format;
+};
+
+/**
+ * struct c3_isp_info - ISP information
+ *
+ * @clocks: array of ISP clock names
+ * @clock_num: actual clock number
+ */
+struct c3_isp_info {
+ char *clocks[C3_ISP_CLOCK_NUM_MAX];
+ u32 clock_num;
+};
+
+/**
+ * struct c3_isp_device - ISP platform device
+ *
+ * @dev: pointer to the struct device
+ * @base: base register address
+ * @clks: array of clocks
+ * @notifier: notifier to register on the v4l2-async API
+ * @v4l2_dev: v4l2_device variable
+ * @media_dev: media device variable
+ * @pipe: media pipeline
+ * @core: ISP core subdev
+ * @resizers: ISP resizer subdev
+ * @stats: ISP stats device
+ * @params: ISP params device
+ * @caps: array of ISP capture device
+ * @frm_sequence: used to record frame id
+ * @info: version-specific ISP information
+ */
+struct c3_isp_device {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data clks[C3_ISP_CLOCK_NUM_MAX];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct media_pipeline pipe;
+
+ struct c3_isp_core core;
+ struct c3_isp_resizer resizers[C3_ISP_NUM_RSZ];
+ struct c3_isp_stats stats;
+ struct c3_isp_params params;
+ struct c3_isp_capture caps[C3_ISP_NUM_CAP_DEVS];
+
+ u32 frm_sequence;
+ const struct c3_isp_info *info;
+};
+
+u32 c3_isp_read(struct c3_isp_device *isp, u32 reg);
+void c3_isp_write(struct c3_isp_device *isp, u32 reg, u32 val);
+void c3_isp_update_bits(struct c3_isp_device *isp, u32 reg, u32 mask, u32 val);
+
+void c3_isp_core_queue_sof(struct c3_isp_device *isp);
+int c3_isp_core_register(struct c3_isp_device *isp);
+void c3_isp_core_unregister(struct c3_isp_device *isp);
+int c3_isp_resizers_register(struct c3_isp_device *isp);
+void c3_isp_resizers_unregister(struct c3_isp_device *isp);
+int c3_isp_captures_register(struct c3_isp_device *isp);
+void c3_isp_captures_unregister(struct c3_isp_device *isp);
+void c3_isp_captures_isr(struct c3_isp_device *isp);
+void c3_isp_stats_pre_cfg(struct c3_isp_device *isp);
+int c3_isp_stats_register(struct c3_isp_device *isp);
+void c3_isp_stats_unregister(struct c3_isp_device *isp);
+void c3_isp_stats_isr(struct c3_isp_device *isp);
+void c3_isp_params_pre_cfg(struct c3_isp_device *isp);
+int c3_isp_params_register(struct c3_isp_device *isp);
+void c3_isp_params_unregister(struct c3_isp_device *isp);
+void c3_isp_params_isr(struct c3_isp_device *isp);
+
+#endif
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-core.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-core.c
new file mode 100644
index 000000000000..ff6413fff889
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-core.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/media/amlogic/c3-isp-config.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-event.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+#define C3_ISP_CORE_SUBDEV_NAME "c3-isp-core"
+
+#define C3_ISP_PHASE_OFFSET_0 0
+#define C3_ISP_PHASE_OFFSET_1 1
+#define C3_ISP_PHASE_OFFSET_NONE 0xff
+
+#define C3_ISP_CORE_DEF_SINK_PAD_FMT MEDIA_BUS_FMT_SRGGB10_1X10
+#define C3_ISP_CORE_DEF_SRC_PAD_FMT MEDIA_BUS_FMT_YUV10_1X30
+
+/*
+ * struct c3_isp_core_format_info - ISP core format information
+ *
+ * @mbus_code: the mbus code
+ * @pads: bitmask detailing valid pads for this mbus_code
+ * @xofst: horizontal phase offset of hardware
+ * @yofst: vertical phase offset of hardware
+ * @is_raw: the raw format flag of mbus code
+ */
+struct c3_isp_core_format_info {
+ u32 mbus_code;
+ u32 pads;
+ u8 xofst;
+ u8 yofst;
+ bool is_raw;
+};
+
+static const struct c3_isp_core_format_info c3_isp_core_fmts[] = {
+ /* RAW formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_0,
+ .yofst = C3_ISP_PHASE_OFFSET_1,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_1,
+ .yofst = C3_ISP_PHASE_OFFSET_1,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_0,
+ .yofst = C3_ISP_PHASE_OFFSET_0,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_1,
+ .yofst = C3_ISP_PHASE_OFFSET_0,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_0,
+ .yofst = C3_ISP_PHASE_OFFSET_1,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_1,
+ .yofst = C3_ISP_PHASE_OFFSET_1,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_0,
+ .yofst = C3_ISP_PHASE_OFFSET_0,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_1,
+ .yofst = C3_ISP_PHASE_OFFSET_0,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = true,
+ },
+ /* YUV formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0) |
+ BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1) |
+ BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = false,
+ },
+};
+
+static const struct c3_isp_core_format_info
+*core_find_format_by_code(u32 code, u32 pad)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_isp_core_fmts); i++) {
+ const struct c3_isp_core_format_info *info =
+ &c3_isp_core_fmts[i];
+
+ if (info->mbus_code == code && info->pads & BIT(pad))
+ return info;
+ }
+
+ return NULL;
+}
+
+static const struct c3_isp_core_format_info
+*core_find_format_by_index(u32 index, u32 pad)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_isp_core_fmts); i++) {
+ const struct c3_isp_core_format_info *info =
+ &c3_isp_core_fmts[i];
+
+ if (!(info->pads & BIT(pad)))
+ continue;
+
+ if (!index)
+ return info;
+
+ index--;
+ }
+
+ return NULL;
+}
+
+static void c3_isp_core_enable(struct c3_isp_device *isp)
+{
+ c3_isp_update_bits(isp, ISP_TOP_IRQ_EN, ISP_TOP_IRQ_EN_FRM_END_MASK,
+ ISP_TOP_IRQ_EN_FRM_END_EN);
+ c3_isp_update_bits(isp, ISP_TOP_IRQ_EN, ISP_TOP_IRQ_EN_FRM_RST_MASK,
+ ISP_TOP_IRQ_EN_FRM_RST_EN);
+
+ /* Enable image data to ISP core */
+ c3_isp_update_bits(isp, ISP_TOP_PATH_SEL, ISP_TOP_PATH_SEL_CORE_MASK,
+ ISP_TOP_PATH_SEL_CORE_MIPI_CORE);
+}
+
+static void c3_isp_core_disable(struct c3_isp_device *isp)
+{
+ /* Disable image data to ISP core */
+ c3_isp_update_bits(isp, ISP_TOP_PATH_SEL, ISP_TOP_PATH_SEL_CORE_MASK,
+ ISP_TOP_PATH_SEL_CORE_CORE_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_IRQ_EN, ISP_TOP_IRQ_EN_FRM_END_MASK,
+ ISP_TOP_IRQ_EN_FRM_END_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_IRQ_EN, ISP_TOP_IRQ_EN_FRM_RST_MASK,
+ ISP_TOP_IRQ_EN_FRM_RST_DIS);
+}
+
+/* Set the phase offset of blc, wb and lns */
+static void c3_isp_core_lswb_ofst(struct c3_isp_device *isp,
+ u8 xofst, u8 yofst)
+{
+ c3_isp_update_bits(isp, ISP_LSWB_BLC_PHSOFST,
+ ISP_LSWB_BLC_PHSOFST_HORIZ_OFST_MASK,
+ ISP_LSWB_BLC_PHSOFST_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_LSWB_BLC_PHSOFST,
+ ISP_LSWB_BLC_PHSOFST_VERT_OFST_MASK,
+ ISP_LSWB_BLC_PHSOFST_VERT_OFST(yofst));
+
+ c3_isp_update_bits(isp, ISP_LSWB_WB_PHSOFST,
+ ISP_LSWB_WB_PHSOFST_HORIZ_OFST_MASK,
+ ISP_LSWB_WB_PHSOFST_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_PHSOFST,
+ ISP_LSWB_WB_PHSOFST_VERT_OFST_MASK,
+ ISP_LSWB_WB_PHSOFST_VERT_OFST(yofst));
+
+ c3_isp_update_bits(isp, ISP_LSWB_LNS_PHSOFST,
+ ISP_LSWB_LNS_PHSOFST_HORIZ_OFST_MASK,
+ ISP_LSWB_LNS_PHSOFST_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_LSWB_LNS_PHSOFST,
+ ISP_LSWB_LNS_PHSOFST_VERT_OFST_MASK,
+ ISP_LSWB_LNS_PHSOFST_VERT_OFST(yofst));
+}
+
+/* Set the phase offset of af, ae and awb */
+static void c3_isp_core_3a_ofst(struct c3_isp_device *isp,
+ u8 xofst, u8 yofst)
+{
+ c3_isp_update_bits(isp, ISP_AF_CTRL, ISP_AF_CTRL_HORIZ_OFST_MASK,
+ ISP_AF_CTRL_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_AF_CTRL, ISP_AF_CTRL_VERT_OFST_MASK,
+ ISP_AF_CTRL_VERT_OFST(yofst));
+
+ c3_isp_update_bits(isp, ISP_AE_CTRL, ISP_AE_CTRL_HORIZ_OFST_MASK,
+ ISP_AE_CTRL_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_AE_CTRL, ISP_AE_CTRL_VERT_OFST_MASK,
+ ISP_AE_CTRL_VERT_OFST(yofst));
+
+ c3_isp_update_bits(isp, ISP_AWB_CTRL, ISP_AWB_CTRL_HORIZ_OFST_MASK,
+ ISP_AWB_CTRL_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_AWB_CTRL, ISP_AWB_CTRL_VERT_OFST_MASK,
+ ISP_AWB_CTRL_VERT_OFST(yofst));
+}
+
+/* Set the phase offset of demosaic */
+static void c3_isp_core_dms_ofst(struct c3_isp_device *isp,
+ u8 xofst, u8 yofst)
+{
+ c3_isp_update_bits(isp, ISP_DMS_COMMON_PARAM0,
+ ISP_DMS_COMMON_PARAM0_HORIZ_PHS_OFST_MASK,
+ ISP_DMS_COMMON_PARAM0_HORIZ_PHS_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_DMS_COMMON_PARAM0,
+ ISP_DMS_COMMON_PARAM0_VERT_PHS_OFST_MASK,
+ ISP_DMS_COMMON_PARAM0_VERT_PHS_OFST(yofst));
+}
+
+static void c3_isp_core_cfg_format(struct c3_isp_device *isp,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ const struct c3_isp_core_format_info *isp_fmt;
+
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_CORE_PAD_SINK_VIDEO);
+ isp_fmt = core_find_format_by_code(fmt->code,
+ C3_ISP_CORE_PAD_SINK_VIDEO);
+
+ c3_isp_write(isp, ISP_TOP_INPUT_SIZE,
+ ISP_TOP_INPUT_SIZE_HORIZ_SIZE(fmt->width) |
+ ISP_TOP_INPUT_SIZE_VERT_SIZE(fmt->height));
+ c3_isp_write(isp, ISP_TOP_FRM_SIZE,
+ ISP_TOP_FRM_SIZE_CORE_HORIZ_SIZE(fmt->width) |
+ ISP_TOP_FRM_SIZE_CORE_VERT_SIZE(fmt->height));
+
+ c3_isp_update_bits(isp, ISP_TOP_HOLD_SIZE,
+ ISP_TOP_HOLD_SIZE_CORE_HORIZ_SIZE_MASK,
+ ISP_TOP_HOLD_SIZE_CORE_HORIZ_SIZE(fmt->width));
+
+ c3_isp_write(isp, ISP_AF_HV_SIZE,
+ ISP_AF_HV_SIZE_GLB_WIN_XSIZE(fmt->width) |
+ ISP_AF_HV_SIZE_GLB_WIN_YSIZE(fmt->height));
+ c3_isp_write(isp, ISP_AE_HV_SIZE,
+ ISP_AE_HV_SIZE_HORIZ_SIZE(fmt->width) |
+ ISP_AE_HV_SIZE_VERT_SIZE(fmt->height));
+ c3_isp_write(isp, ISP_AWB_HV_SIZE,
+ ISP_AWB_HV_SIZE_HORIZ_SIZE(fmt->width) |
+ ISP_AWB_HV_SIZE_VERT_SIZE(fmt->height));
+
+ c3_isp_core_lswb_ofst(isp, isp_fmt->xofst, isp_fmt->yofst);
+ c3_isp_core_3a_ofst(isp, isp_fmt->xofst, isp_fmt->yofst);
+ c3_isp_core_dms_ofst(isp, isp_fmt->xofst, isp_fmt->yofst);
+}
+
+static bool c3_isp_core_streams_ready(struct c3_isp_core *core)
+{
+ unsigned int n_links = 0;
+ struct media_link *link;
+
+ for_each_media_entity_data_link(&core->sd.entity, link) {
+ if ((link->source->index == C3_ISP_CORE_PAD_SOURCE_VIDEO_0 ||
+ link->source->index == C3_ISP_CORE_PAD_SOURCE_VIDEO_1 ||
+ link->source->index == C3_ISP_CORE_PAD_SOURCE_VIDEO_2) &&
+ link->flags == MEDIA_LNK_FL_ENABLED)
+ n_links++;
+ }
+
+ return n_links == core->isp->pipe.start_count;
+}
+
+static int c3_isp_core_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_isp_core *core = v4l2_get_subdevdata(sd);
+ struct media_pad *sink_pad;
+ struct v4l2_subdev *src_sd;
+ int ret;
+
+ if (!c3_isp_core_streams_ready(core))
+ return 0;
+
+ core->isp->frm_sequence = 0;
+ c3_isp_core_cfg_format(core->isp, state);
+ c3_isp_core_enable(core->isp);
+
+ sink_pad = &core->pads[C3_ISP_CORE_PAD_SINK_VIDEO];
+ core->src_pad = media_pad_remote_pad_unique(sink_pad);
+ if (IS_ERR(core->src_pad)) {
+ dev_dbg(core->isp->dev,
+ "Failed to get source pad for ISP core\n");
+ return -EPIPE;
+ }
+
+ src_sd = media_entity_to_v4l2_subdev(core->src_pad->entity);
+
+ ret = v4l2_subdev_enable_streams(src_sd, core->src_pad->index, BIT(0));
+ if (ret) {
+ c3_isp_core_disable(core->isp);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int c3_isp_core_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_isp_core *core = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *src_sd;
+
+ if (core->isp->pipe.start_count != 1)
+ return 0;
+
+ if (core->src_pad) {
+ src_sd = media_entity_to_v4l2_subdev(core->src_pad->entity);
+ v4l2_subdev_disable_streams(src_sd, core->src_pad->index,
+ BIT(0));
+ }
+ core->src_pad = NULL;
+
+ c3_isp_core_disable(core->isp);
+
+ return 0;
+}
+
+static int c3_isp_core_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct c3_isp_core_format_info *info;
+
+ switch (code->pad) {
+ case C3_ISP_CORE_PAD_SINK_VIDEO:
+ case C3_ISP_CORE_PAD_SOURCE_VIDEO_0:
+ case C3_ISP_CORE_PAD_SOURCE_VIDEO_1:
+ case C3_ISP_CORE_PAD_SOURCE_VIDEO_2:
+ info = core_find_format_by_index(code->index, code->pad);
+ if (!info)
+ return -EINVAL;
+
+ code->code = info->mbus_code;
+
+ break;
+ case C3_ISP_CORE_PAD_SINK_PARAMS:
+ case C3_ISP_CORE_PAD_SOURCE_STATS:
+ if (code->index)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_METADATA_FIXED;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void c3_isp_core_set_sink_fmt(struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ const struct c3_isp_core_format_info *isp_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, format->pad);
+
+ isp_fmt = core_find_format_by_code(format->format.code, format->pad);
+ if (!isp_fmt)
+ sink_fmt->code = C3_ISP_CORE_DEF_SINK_PAD_FMT;
+ else
+ sink_fmt->code = format->format.code;
+
+ sink_fmt->width = clamp_t(u32, format->format.width,
+ C3_ISP_MIN_WIDTH, C3_ISP_MAX_WIDTH);
+ sink_fmt->height = clamp_t(u32, format->format.height,
+ C3_ISP_MIN_HEIGHT, C3_ISP_MAX_HEIGHT);
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ for (unsigned int i = C3_ISP_CORE_PAD_SOURCE_VIDEO_0;
+ i < C3_ISP_CORE_PAD_MAX; i++) {
+ src_fmt = v4l2_subdev_state_get_format(state, i);
+
+ src_fmt->width = sink_fmt->width;
+ src_fmt->height = sink_fmt->height;
+ }
+
+ format->format = *sink_fmt;
+}
+
+static void c3_isp_core_set_source_fmt(struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ const struct c3_isp_core_format_info *isp_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state,
+ C3_ISP_CORE_PAD_SINK_VIDEO);
+ src_fmt = v4l2_subdev_state_get_format(state, format->pad);
+
+ isp_fmt = core_find_format_by_code(format->format.code, format->pad);
+ if (!isp_fmt)
+ src_fmt->code = C3_ISP_CORE_DEF_SRC_PAD_FMT;
+ else
+ src_fmt->code = format->format.code;
+
+ src_fmt->width = sink_fmt->width;
+ src_fmt->height = sink_fmt->height;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (isp_fmt && isp_fmt->is_raw) {
+ src_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ src_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ } else {
+ src_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ src_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ }
+
+ format->format = *src_fmt;
+}
+
+static int c3_isp_core_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ if (format->pad == C3_ISP_CORE_PAD_SINK_VIDEO)
+ c3_isp_core_set_sink_fmt(state, format);
+ else if (format->pad == C3_ISP_CORE_PAD_SOURCE_VIDEO_0 ||
+ format->pad == C3_ISP_CORE_PAD_SOURCE_VIDEO_1 ||
+ format->pad == C3_ISP_CORE_PAD_SOURCE_VIDEO_2)
+ c3_isp_core_set_source_fmt(state, format);
+ else
+ format->format =
+ *v4l2_subdev_state_get_format(state, format->pad);
+
+ return 0;
+}
+
+static int c3_isp_core_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ /* Video sink pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_CORE_PAD_SINK_VIDEO);
+ fmt->width = C3_ISP_DEFAULT_WIDTH;
+ fmt->height = C3_ISP_DEFAULT_HEIGHT;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = C3_ISP_CORE_DEF_SINK_PAD_FMT;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ /* Video source pad */
+ for (unsigned int i = C3_ISP_CORE_PAD_SOURCE_VIDEO_0;
+ i < C3_ISP_CORE_PAD_MAX; i++) {
+ fmt = v4l2_subdev_state_get_format(state, i);
+ fmt->width = C3_ISP_DEFAULT_WIDTH;
+ fmt->height = C3_ISP_DEFAULT_HEIGHT;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = C3_ISP_CORE_DEF_SRC_PAD_FMT;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ }
+
+ /* Parameters pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_CORE_PAD_SINK_PARAMS);
+ fmt->width = 0;
+ fmt->height = 0;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = MEDIA_BUS_FMT_METADATA_FIXED;
+
+ /* Statistics pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_CORE_PAD_SOURCE_STATS);
+ fmt->width = 0;
+ fmt->height = 0;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = MEDIA_BUS_FMT_METADATA_FIXED;
+
+ return 0;
+}
+
+static int c3_isp_core_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_FRAME_SYNC)
+ return -EINVAL;
+
+ /* V4L2_EVENT_FRAME_SYNC doesn't need id, so should set 0 */
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+}
+
+static const struct v4l2_subdev_pad_ops c3_isp_core_pad_ops = {
+ .enum_mbus_code = c3_isp_core_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = c3_isp_core_set_fmt,
+ .enable_streams = c3_isp_core_enable_streams,
+ .disable_streams = c3_isp_core_disable_streams,
+};
+
+static const struct v4l2_subdev_core_ops c3_isp_core_core_ops = {
+ .subscribe_event = c3_isp_core_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops c3_isp_core_subdev_ops = {
+ .core = &c3_isp_core_core_ops,
+ .pad = &c3_isp_core_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops c3_isp_core_internal_ops = {
+ .init_state = c3_isp_core_init_state,
+};
+
+static int c3_isp_core_link_validate(struct media_link *link)
+{
+ if (link->sink->index == C3_ISP_CORE_PAD_SINK_PARAMS)
+ return 0;
+
+ return v4l2_subdev_link_validate(link);
+}
+
+/* Media entity operations */
+static const struct media_entity_operations c3_isp_core_entity_ops = {
+ .link_validate = c3_isp_core_link_validate,
+};
+
+void c3_isp_core_queue_sof(struct c3_isp_device *isp)
+{
+ struct v4l2_event event = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ };
+
+ event.u.frame_sync.frame_sequence = isp->frm_sequence;
+ v4l2_event_queue(isp->core.sd.devnode, &event);
+}
+
+int c3_isp_core_register(struct c3_isp_device *isp)
+{
+ struct c3_isp_core *core = &isp->core;
+ struct v4l2_subdev *sd = &core->sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &c3_isp_core_subdev_ops);
+ sd->owner = THIS_MODULE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->internal_ops = &c3_isp_core_internal_ops;
+ snprintf(sd->name, sizeof(sd->name), "%s", C3_ISP_CORE_SUBDEV_NAME);
+
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ sd->entity.ops = &c3_isp_core_entity_ops;
+
+ core->isp = isp;
+ sd->dev = isp->dev;
+ v4l2_set_subdevdata(sd, core);
+
+ core->pads[C3_ISP_CORE_PAD_SINK_VIDEO].flags = MEDIA_PAD_FL_SINK;
+ core->pads[C3_ISP_CORE_PAD_SINK_PARAMS].flags = MEDIA_PAD_FL_SINK;
+ core->pads[C3_ISP_CORE_PAD_SOURCE_STATS].flags = MEDIA_PAD_FL_SOURCE;
+ core->pads[C3_ISP_CORE_PAD_SOURCE_VIDEO_0].flags = MEDIA_PAD_FL_SOURCE;
+ core->pads[C3_ISP_CORE_PAD_SOURCE_VIDEO_1].flags = MEDIA_PAD_FL_SOURCE;
+ core->pads[C3_ISP_CORE_PAD_SOURCE_VIDEO_2].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, C3_ISP_CORE_PAD_MAX,
+ core->pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = v4l2_device_register_subdev(&isp->v4l2_dev, sd);
+ if (ret)
+ goto err_subdev_cleanup;
+
+ return 0;
+
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+void c3_isp_core_unregister(struct c3_isp_device *isp)
+{
+ struct c3_isp_core *core = &isp->core;
+ struct v4l2_subdev *sd = &core->sd;
+
+ v4l2_device_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+}
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c
new file mode 100644
index 000000000000..c3b779f63088
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+u32 c3_isp_read(struct c3_isp_device *isp, u32 reg)
+{
+ return readl(isp->base + reg);
+}
+
+void c3_isp_write(struct c3_isp_device *isp, u32 reg, u32 val)
+{
+ writel(val, isp->base + reg);
+}
+
+void c3_isp_update_bits(struct c3_isp_device *isp, u32 reg, u32 mask, u32 val)
+{
+ u32 orig, tmp;
+
+ orig = c3_isp_read(isp, reg);
+
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (tmp != orig)
+ c3_isp_write(isp, reg, tmp);
+}
+
+/* PM runtime suspend */
+static int c3_isp_runtime_suspend(struct device *dev)
+{
+ struct c3_isp_device *isp = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(isp->info->clock_num, isp->clks);
+
+ return 0;
+}
+
+/* PM runtime resume */
+static int c3_isp_runtime_resume(struct device *dev)
+{
+ struct c3_isp_device *isp = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(isp->info->clock_num, isp->clks);
+}
+
+static const struct dev_pm_ops c3_isp_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(c3_isp_runtime_suspend,
+ c3_isp_runtime_resume, NULL)
+};
+
+/* IRQ handling */
+static irqreturn_t c3_isp_irq_handler(int irq, void *dev)
+{
+ struct c3_isp_device *isp = dev;
+ u32 status;
+
+ /* Get irq status and clear irq status */
+ status = c3_isp_read(isp, ISP_TOP_RO_IRQ_STAT);
+ c3_isp_write(isp, ISP_TOP_IRQ_CLR, status);
+
+ if (status & ISP_TOP_RO_IRQ_STAT_FRM_END_MASK) {
+ c3_isp_stats_isr(isp);
+ c3_isp_params_isr(isp);
+ c3_isp_captures_isr(isp);
+ isp->frm_sequence++;
+ }
+
+ if (status & ISP_TOP_RO_IRQ_STAT_FRM_RST_MASK)
+ c3_isp_core_queue_sof(isp);
+
+ return IRQ_HANDLED;
+}
+
+/* Subdev notifier register */
+static int c3_isp_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct c3_isp_device *isp =
+ container_of(notifier, struct c3_isp_device, notifier);
+ struct media_pad *sink =
+ &isp->core.sd.entity.pads[C3_ISP_CORE_PAD_SINK_VIDEO];
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static int c3_isp_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct c3_isp_device *isp =
+ container_of(notifier, struct c3_isp_device, notifier);
+
+ return v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations c3_isp_notify_ops = {
+ .bound = c3_isp_notify_bound,
+ .complete = c3_isp_notify_complete,
+};
+
+static int c3_isp_async_nf_register(struct c3_isp_device *isp)
+{
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_nf_init(&isp->notifier, &isp->v4l2_dev);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ asc = v4l2_async_nf_add_fwnode_remote(&isp->notifier, ep,
+ struct v4l2_async_connection);
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asc))
+ return PTR_ERR(asc);
+
+ isp->notifier.ops = &c3_isp_notify_ops;
+ ret = v4l2_async_nf_register(&isp->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ return ret;
+}
+
+static void c3_isp_async_nf_unregister(struct c3_isp_device *isp)
+{
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+}
+
+static int c3_isp_media_register(struct c3_isp_device *isp)
+{
+ struct media_device *media_dev = &isp->media_dev;
+ struct v4l2_device *v4l2_dev = &isp->v4l2_dev;
+ int ret;
+
+ /* Initialize media device */
+ strscpy(media_dev->model, C3_ISP_DRIVER_NAME, sizeof(media_dev->model));
+ media_dev->dev = isp->dev;
+
+ media_device_init(media_dev);
+
+ /* Initialize v4l2 device */
+ v4l2_dev->mdev = media_dev;
+ strscpy(v4l2_dev->name, C3_ISP_DRIVER_NAME, sizeof(v4l2_dev->name));
+
+ ret = v4l2_device_register(isp->dev, v4l2_dev);
+ if (ret)
+ goto err_media_dev_cleanup;
+
+ ret = media_device_register(&isp->media_dev);
+ if (ret) {
+ dev_err(isp->dev, "Failed to register media device: %d\n", ret);
+ goto err_unreg_v4l2_dev;
+ }
+
+ return 0;
+
+err_unreg_v4l2_dev:
+ v4l2_device_unregister(&isp->v4l2_dev);
+err_media_dev_cleanup:
+ media_device_cleanup(media_dev);
+ return ret;
+}
+
+static void c3_isp_media_unregister(struct c3_isp_device *isp)
+{
+ media_device_unregister(&isp->media_dev);
+ v4l2_device_unregister(&isp->v4l2_dev);
+ media_device_cleanup(&isp->media_dev);
+}
+
+static void c3_isp_remove_links(struct c3_isp_device *isp)
+{
+ unsigned int i;
+
+ media_entity_remove_links(&isp->core.sd.entity);
+
+ for (i = 0; i < C3_ISP_NUM_RSZ; i++)
+ media_entity_remove_links(&isp->resizers[i].sd.entity);
+
+ for (i = 0; i < C3_ISP_NUM_CAP_DEVS; i++)
+ media_entity_remove_links(&isp->caps[i].vdev.entity);
+}
+
+static int c3_isp_create_links(struct c3_isp_device *isp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < C3_ISP_NUM_RSZ; i++) {
+ ret = media_create_pad_link(&isp->resizers[i].sd.entity,
+ C3_ISP_RSZ_PAD_SOURCE,
+ &isp->caps[i].vdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to link rsz %u and cap %u\n", i, i);
+ goto err_remove_links;
+ }
+
+ ret = media_create_pad_link(&isp->core.sd.entity,
+ C3_ISP_CORE_PAD_SOURCE_VIDEO_0 + i,
+ &isp->resizers[i].sd.entity,
+ C3_ISP_RSZ_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to link core and rsz %u\n", i);
+ goto err_remove_links;
+ }
+ }
+
+ ret = media_create_pad_link(&isp->core.sd.entity,
+ C3_ISP_CORE_PAD_SOURCE_STATS,
+ &isp->stats.vdev.entity,
+ 0, MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(isp->dev, "Failed to link core and stats\n");
+ goto err_remove_links;
+ }
+
+ ret = media_create_pad_link(&isp->params.vdev.entity, 0,
+ &isp->core.sd.entity,
+ C3_ISP_CORE_PAD_SINK_PARAMS,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(isp->dev, "Failed to link params and core\n");
+ goto err_remove_links;
+ }
+
+ return 0;
+
+err_remove_links:
+ c3_isp_remove_links(isp);
+ return ret;
+}
+
+static int c3_isp_videos_register(struct c3_isp_device *isp)
+{
+ int ret;
+
+ ret = c3_isp_captures_register(isp);
+ if (ret)
+ return ret;
+
+ ret = c3_isp_stats_register(isp);
+ if (ret)
+ goto err_captures_unregister;
+
+ ret = c3_isp_params_register(isp);
+ if (ret)
+ goto err_stats_unregister;
+
+ ret = c3_isp_create_links(isp);
+ if (ret)
+ goto err_params_unregister;
+
+ return 0;
+
+err_params_unregister:
+ c3_isp_params_unregister(isp);
+err_stats_unregister:
+ c3_isp_stats_unregister(isp);
+err_captures_unregister:
+ c3_isp_captures_unregister(isp);
+ return ret;
+}
+
+static void c3_isp_videos_unregister(struct c3_isp_device *isp)
+{
+ c3_isp_remove_links(isp);
+ c3_isp_params_unregister(isp);
+ c3_isp_stats_unregister(isp);
+ c3_isp_captures_unregister(isp);
+}
+
+static int c3_isp_get_clocks(struct c3_isp_device *isp)
+{
+ const struct c3_isp_info *info = isp->info;
+
+ for (unsigned int i = 0; i < info->clock_num; i++)
+ isp->clks[i].id = info->clocks[i];
+
+ return devm_clk_bulk_get(isp->dev, info->clock_num, isp->clks);
+}
+
+static int c3_isp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct c3_isp_device *isp;
+ int irq;
+ int ret;
+
+ isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->info = of_device_get_match_data(dev);
+ isp->dev = dev;
+
+ isp->base = devm_platform_ioremap_resource_byname(pdev, "isp");
+ if (IS_ERR(isp->base))
+ return dev_err_probe(dev, PTR_ERR(isp->base),
+ "Failed to ioremap resource\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = c3_isp_get_clocks(isp);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ platform_set_drvdata(pdev, isp);
+
+ pm_runtime_enable(dev);
+
+ ret = c3_isp_media_register(isp);
+ if (ret)
+ goto err_runtime_disable;
+
+ ret = c3_isp_core_register(isp);
+ if (ret)
+ goto err_v4l2_unregister;
+
+ ret = c3_isp_resizers_register(isp);
+ if (ret)
+ goto err_core_unregister;
+
+ ret = c3_isp_async_nf_register(isp);
+ if (ret)
+ goto err_resizers_unregister;
+
+ ret = devm_request_irq(dev, irq,
+ c3_isp_irq_handler, IRQF_SHARED,
+ dev_driver_string(dev), isp);
+ if (ret)
+ goto err_nf_unregister;
+
+ ret = c3_isp_videos_register(isp);
+ if (ret)
+ goto err_nf_unregister;
+
+ return 0;
+
+err_nf_unregister:
+ c3_isp_async_nf_unregister(isp);
+err_resizers_unregister:
+ c3_isp_resizers_unregister(isp);
+err_core_unregister:
+ c3_isp_core_unregister(isp);
+err_v4l2_unregister:
+ c3_isp_media_unregister(isp);
+err_runtime_disable:
+ pm_runtime_disable(dev);
+ return ret;
+};
+
+static void c3_isp_remove(struct platform_device *pdev)
+{
+ struct c3_isp_device *isp = platform_get_drvdata(pdev);
+
+ c3_isp_videos_unregister(isp);
+ c3_isp_async_nf_unregister(isp);
+ c3_isp_core_unregister(isp);
+ c3_isp_resizers_unregister(isp);
+ c3_isp_media_unregister(isp);
+ pm_runtime_disable(isp->dev);
+};
+
+static const struct c3_isp_info isp_info = {
+ .clocks = {"vapb", "isp0"},
+ .clock_num = 2
+};
+
+static const struct of_device_id c3_isp_of_match[] = {
+ { .compatible = "amlogic,c3-isp",
+ .data = &isp_info },
+ { },
+};
+MODULE_DEVICE_TABLE(of, c3_isp_of_match);
+
+static struct platform_driver c3_isp_driver = {
+ .probe = c3_isp_probe,
+ .remove = c3_isp_remove,
+ .driver = {
+ .name = "c3-isp",
+ .of_match_table = c3_isp_of_match,
+ .pm = pm_ptr(&c3_isp_pm_ops),
+ },
+};
+
+module_platform_driver(c3_isp_driver);
+
+MODULE_AUTHOR("Keke Li <keke.li@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic C3 ISP pipeline");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
new file mode 100644
index 000000000000..c80667dd7662
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/cleanup.h>
+#include <linux/media/amlogic/c3-isp-config.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+/*
+ * union c3_isp_params_block - Generalisation of a parameter block
+ *
+ * This union allows the driver to treat a block as a generic struct to this
+ * union and safely access the header and block-specific struct without having
+ * to resort to casting. The header member is accessed first, and the type field
+ * checked which allows the driver to determine which of the other members
+ * should be used.
+ *
+ * @header: The shared header struct embedded as the first member
+ * of all the possible other members. This member would be
+ * accessed first and the type field checked to determine
+ * which of the other members should be accessed.
+ * @awb_gains: For header.type == C3_ISP_PARAMS_BLOCK_AWB_GAINS
+ * @awb_cfg: For header.type == C3_ISP_PARAMS_BLOCK_AWB_CONFIG
+ * @ae_cfg: For header.type == C3_ISP_PARAMS_BLOCK_AE_CONFIG
+ * @af_cfg: For header.type == C3_ISP_PARAMS_BLOCK_AF_CONFIG
+ * @pst_gamma: For header.type == C3_ISP_PARAMS_BLOCK_PST_GAMMA
+ * @ccm: For header.type == C3_ISP_PARAMS_BLOCK_CCM
+ * @csc: For header.type == C3_ISP_PARAMS_BLOCK_CSC
+ * @blc: For header.type == C3_ISP_PARAMS_BLOCK_BLC
+ */
+union c3_isp_params_block {
+ struct c3_isp_params_block_header header;
+ struct c3_isp_params_awb_gains awb_gains;
+ struct c3_isp_params_awb_config awb_cfg;
+ struct c3_isp_params_ae_config ae_cfg;
+ struct c3_isp_params_af_config af_cfg;
+ struct c3_isp_params_pst_gamma pst_gamma;
+ struct c3_isp_params_ccm ccm;
+ struct c3_isp_params_csc csc;
+ struct c3_isp_params_blc blc;
+};
+
+typedef void (*c3_isp_block_handler)(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block);
+
+struct c3_isp_params_handler {
+ size_t size;
+ c3_isp_block_handler handler;
+};
+
+#define to_c3_isp_params_buffer(vbuf) \
+ container_of(vbuf, struct c3_isp_params_buffer, vb)
+
+/* Hardware configuration */
+
+static void c3_isp_params_cfg_awb_gains(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_awb_gains *awb_gains = &block->awb_gains;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_WB_EN_MASK,
+ ISP_TOP_BEO_CTRL_WB_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN0,
+ ISP_LSWB_WB_GAIN0_GR_GAIN_MASK,
+ ISP_LSWB_WB_GAIN0_GR_GAIN(awb_gains->gr_gain));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN0,
+ ISP_LSWB_WB_GAIN0_R_GAIN_MASK,
+ ISP_LSWB_WB_GAIN0_R_GAIN(awb_gains->r_gain));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN1,
+ ISP_LSWB_WB_GAIN1_B_GAIN_MASK,
+ ISP_LSWB_WB_GAIN1_B_GAIN(awb_gains->b_gain));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN1,
+ ISP_LSWB_WB_GAIN1_GB_GAIN_MASK,
+ ISP_LSWB_WB_GAIN1_GB_GAIN(awb_gains->gb_gain));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN2,
+ ISP_LSWB_WB_GAIN2_IR_GAIN_MASK,
+ ISP_LSWB_WB_GAIN2_IR_GAIN(awb_gains->gb_gain));
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_WB_EN_MASK,
+ ISP_TOP_BEO_CTRL_WB_EN);
+}
+
+static void c3_isp_params_awb_wt(struct c3_isp_device *isp,
+ const struct c3_isp_params_awb_config *cfg)
+{
+ unsigned int zones_num;
+ unsigned int base;
+ unsigned int data;
+ unsigned int i;
+
+ /* Set the weight address to 0 position */
+ c3_isp_write(isp, ISP_AWB_BLK_WT_ADDR, 0);
+
+ zones_num = cfg->horiz_zones_num * cfg->vert_zones_num;
+
+ /* Need to write 8 weights at once */
+ for (i = 0; i < zones_num / 8; i++) {
+ base = i * 8;
+ data = ISP_AWB_BLK_WT_DATA_WT(0, cfg->zone_weight[base + 0]) |
+ ISP_AWB_BLK_WT_DATA_WT(1, cfg->zone_weight[base + 1]) |
+ ISP_AWB_BLK_WT_DATA_WT(2, cfg->zone_weight[base + 2]) |
+ ISP_AWB_BLK_WT_DATA_WT(3, cfg->zone_weight[base + 3]) |
+ ISP_AWB_BLK_WT_DATA_WT(4, cfg->zone_weight[base + 4]) |
+ ISP_AWB_BLK_WT_DATA_WT(5, cfg->zone_weight[base + 5]) |
+ ISP_AWB_BLK_WT_DATA_WT(6, cfg->zone_weight[base + 6]) |
+ ISP_AWB_BLK_WT_DATA_WT(7, cfg->zone_weight[base + 7]);
+ c3_isp_write(isp, ISP_AWB_BLK_WT_DATA, data);
+ }
+
+ if (zones_num % 8 == 0)
+ return;
+
+ data = 0;
+ base = i * 8;
+
+ for (i = 0; i < zones_num % 8; i++)
+ data |= ISP_AWB_BLK_WT_DATA_WT(i, cfg->zone_weight[base + i]);
+
+ c3_isp_write(isp, ISP_AWB_BLK_WT_DATA, data);
+}
+
+static void c3_isp_params_awb_cood(struct c3_isp_device *isp,
+ const struct c3_isp_params_awb_config *cfg)
+{
+ unsigned int max_point_num;
+
+ /* The number of points is one more than the number of edges */
+ max_point_num = max(cfg->horiz_zones_num, cfg->vert_zones_num) + 1;
+
+ /* Set the index address to 0 position */
+ c3_isp_write(isp, ISP_AWB_IDX_ADDR, 0);
+
+ for (unsigned int i = 0; i < max_point_num; i++)
+ c3_isp_write(isp, ISP_AWB_IDX_DATA,
+ ISP_AWB_IDX_DATA_HIDX_DATA(cfg->horiz_coord[i]) |
+ ISP_AWB_IDX_DATA_VIDX_DATA(cfg->vert_coord[i]));
+}
+
+static void c3_isp_params_cfg_awb_config(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_awb_config *awb_cfg = &block->awb_cfg;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AWB_POINT_MASK,
+ ISP_TOP_3A_STAT_CRTL_AWB_POINT(awb_cfg->tap_point));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_CTRL2,
+ ISP_AWB_STAT_CTRL2_SATUR_CTRL_MASK,
+ ISP_AWB_STAT_CTRL2_SATUR_CTRL(awb_cfg->satur_vald));
+
+ c3_isp_update_bits(isp, ISP_AWB_HV_BLKNUM,
+ ISP_AWB_HV_BLKNUM_H_NUM_MASK,
+ ISP_AWB_HV_BLKNUM_H_NUM(awb_cfg->horiz_zones_num));
+ c3_isp_update_bits(isp, ISP_AWB_HV_BLKNUM,
+ ISP_AWB_HV_BLKNUM_V_NUM_MASK,
+ ISP_AWB_HV_BLKNUM_V_NUM(awb_cfg->vert_zones_num));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_RG, ISP_AWB_STAT_RG_MIN_VALUE_MASK,
+ ISP_AWB_STAT_RG_MIN_VALUE(awb_cfg->rg_min));
+ c3_isp_update_bits(isp, ISP_AWB_STAT_RG, ISP_AWB_STAT_RG_MAX_VALUE_MASK,
+ ISP_AWB_STAT_RG_MAX_VALUE(awb_cfg->rg_max));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_BG, ISP_AWB_STAT_BG_MIN_VALUE_MASK,
+ ISP_AWB_STAT_BG_MIN_VALUE(awb_cfg->bg_min));
+ c3_isp_update_bits(isp, ISP_AWB_STAT_BG, ISP_AWB_STAT_BG_MAX_VALUE_MASK,
+ ISP_AWB_STAT_BG_MAX_VALUE(awb_cfg->bg_max));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_RG_HL,
+ ISP_AWB_STAT_RG_HL_LOW_VALUE_MASK,
+ ISP_AWB_STAT_RG_HL_LOW_VALUE(awb_cfg->rg_low));
+ c3_isp_update_bits(isp, ISP_AWB_STAT_RG_HL,
+ ISP_AWB_STAT_RG_HL_HIGH_VALUE_MASK,
+ ISP_AWB_STAT_RG_HL_HIGH_VALUE(awb_cfg->rg_high));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_BG_HL,
+ ISP_AWB_STAT_BG_HL_LOW_VALUE_MASK,
+ ISP_AWB_STAT_BG_HL_LOW_VALUE(awb_cfg->bg_low));
+ c3_isp_update_bits(isp, ISP_AWB_STAT_BG_HL,
+ ISP_AWB_STAT_BG_HL_HIGH_VALUE_MASK,
+ ISP_AWB_STAT_BG_HL_HIGH_VALUE(awb_cfg->bg_high));
+
+ c3_isp_params_awb_wt(isp, awb_cfg);
+ c3_isp_params_awb_cood(isp, awb_cfg);
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN);
+}
+
+static void c3_isp_params_ae_wt(struct c3_isp_device *isp,
+ const struct c3_isp_params_ae_config *cfg)
+{
+ unsigned int zones_num;
+ unsigned int base;
+ unsigned int data;
+ unsigned int i;
+
+ /* Set the weight address to 0 position */
+ c3_isp_write(isp, ISP_AE_BLK_WT_ADDR, 0);
+
+ zones_num = cfg->horiz_zones_num * cfg->vert_zones_num;
+
+ /* Need to write 8 weights at once */
+ for (i = 0; i < zones_num / 8; i++) {
+ base = i * 8;
+ data = ISP_AE_BLK_WT_DATA_WT(0, cfg->zone_weight[base + 0]) |
+ ISP_AE_BLK_WT_DATA_WT(1, cfg->zone_weight[base + 1]) |
+ ISP_AE_BLK_WT_DATA_WT(2, cfg->zone_weight[base + 2]) |
+ ISP_AE_BLK_WT_DATA_WT(3, cfg->zone_weight[base + 3]) |
+ ISP_AE_BLK_WT_DATA_WT(4, cfg->zone_weight[base + 4]) |
+ ISP_AE_BLK_WT_DATA_WT(5, cfg->zone_weight[base + 5]) |
+ ISP_AE_BLK_WT_DATA_WT(6, cfg->zone_weight[base + 6]) |
+ ISP_AE_BLK_WT_DATA_WT(7, cfg->zone_weight[base + 7]);
+ c3_isp_write(isp, ISP_AE_BLK_WT_DATA, data);
+ }
+
+ if (zones_num % 8 == 0)
+ return;
+
+ data = 0;
+ base = i * 8;
+
+ /* Write the last weights data */
+ for (i = 0; i < zones_num % 8; i++)
+ data |= ISP_AE_BLK_WT_DATA_WT(i, cfg->zone_weight[base + i]);
+
+ c3_isp_write(isp, ISP_AE_BLK_WT_DATA, data);
+}
+
+static void c3_isp_params_ae_cood(struct c3_isp_device *isp,
+ const struct c3_isp_params_ae_config *cfg)
+{
+ unsigned int max_point_num;
+
+ /* The number of points is one more than the number of edges */
+ max_point_num = max(cfg->horiz_zones_num, cfg->vert_zones_num) + 1;
+
+ /* Set the index address to 0 position */
+ c3_isp_write(isp, ISP_AE_IDX_ADDR, 0);
+
+ for (unsigned int i = 0; i < max_point_num; i++)
+ c3_isp_write(isp, ISP_AE_IDX_DATA,
+ ISP_AE_IDX_DATA_HIDX_DATA(cfg->horiz_coord[i]) |
+ ISP_AE_IDX_DATA_VIDX_DATA(cfg->vert_coord[i]));
+}
+
+static void c3_isp_params_cfg_ae_config(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_ae_config *ae_cfg = &block->ae_cfg;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AE_POINT_MASK,
+ ISP_TOP_3A_STAT_CRTL_AE_POINT(ae_cfg->tap_point));
+
+ if (ae_cfg->tap_point == C3_ISP_AE_STATS_TAP_GE)
+ c3_isp_update_bits(isp, ISP_AE_CTRL,
+ ISP_AE_CTRL_INPUT_2LINE_MASK,
+ ISP_AE_CTRL_INPUT_2LINE_EN);
+ else
+ c3_isp_update_bits(isp, ISP_AE_CTRL,
+ ISP_AE_CTRL_INPUT_2LINE_MASK,
+ ISP_AE_CTRL_INPUT_2LINE_DIS);
+
+ c3_isp_update_bits(isp, ISP_AE_HV_BLKNUM,
+ ISP_AE_HV_BLKNUM_H_NUM_MASK,
+ ISP_AE_HV_BLKNUM_H_NUM(ae_cfg->horiz_zones_num));
+ c3_isp_update_bits(isp, ISP_AE_HV_BLKNUM,
+ ISP_AE_HV_BLKNUM_V_NUM_MASK,
+ ISP_AE_HV_BLKNUM_V_NUM(ae_cfg->vert_zones_num));
+
+ c3_isp_params_ae_wt(isp, ae_cfg);
+ c3_isp_params_ae_cood(isp, ae_cfg);
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_EN);
+}
+
+static void c3_isp_params_af_cood(struct c3_isp_device *isp,
+ const struct c3_isp_params_af_config *cfg)
+{
+ unsigned int max_point_num;
+
+ /* The number of points is one more than the number of edges */
+ max_point_num = max(cfg->horiz_zones_num, cfg->vert_zones_num) + 1;
+
+ /* Set the index address to 0 position */
+ c3_isp_write(isp, ISP_AF_IDX_ADDR, 0);
+
+ for (unsigned int i = 0; i < max_point_num; i++)
+ c3_isp_write(isp, ISP_AF_IDX_DATA,
+ ISP_AF_IDX_DATA_HIDX_DATA(cfg->horiz_coord[i]) |
+ ISP_AF_IDX_DATA_VIDX_DATA(cfg->vert_coord[i]));
+}
+
+static void c3_isp_params_cfg_af_config(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_af_config *af_cfg = &block->af_cfg;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AF_POINT_MASK,
+ ISP_TOP_3A_STAT_CRTL_AF_POINT(af_cfg->tap_point));
+
+ c3_isp_update_bits(isp, ISP_AF_HV_BLKNUM,
+ ISP_AF_HV_BLKNUM_H_NUM_MASK,
+ ISP_AF_HV_BLKNUM_H_NUM(af_cfg->horiz_zones_num));
+ c3_isp_update_bits(isp, ISP_AF_HV_BLKNUM,
+ ISP_AF_HV_BLKNUM_V_NUM_MASK,
+ ISP_AF_HV_BLKNUM_V_NUM(af_cfg->vert_zones_num));
+
+ c3_isp_params_af_cood(isp, af_cfg);
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_EN);
+}
+
+static void c3_isp_params_cfg_pst_gamma(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_pst_gamma *gm = &block->pst_gamma;
+ unsigned int base;
+ unsigned int i;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_PST_GAMMA_EN_MASK,
+ ISP_TOP_BED_CTRL_PST_GAMMA_DIS);
+ return;
+ }
+
+ /* R, G and B channels use the same gamma lut */
+ for (unsigned int j = 0; j < 3; j++) {
+ /* Set the channel lut address */
+ c3_isp_write(isp, ISP_PST_GAMMA_LUT_ADDR,
+ ISP_PST_GAMMA_LUT_ADDR_IDX_ADDR(j));
+
+ /* Need to write 2 lut values at once */
+ for (i = 0; i < ARRAY_SIZE(gm->lut) / 2; i++) {
+ base = i * 2;
+ c3_isp_write(isp, ISP_PST_GAMMA_LUT_DATA,
+ ISP_PST_GM_LUT_DATA0(gm->lut[base]) |
+ ISP_PST_GM_LUT_DATA1(gm->lut[base + 1]));
+ }
+
+ /* Write the last one */
+ if (ARRAY_SIZE(gm->lut) % 2) {
+ base = i * 2;
+ c3_isp_write(isp, ISP_PST_GAMMA_LUT_DATA,
+ ISP_PST_GM_LUT_DATA0(gm->lut[base]));
+ }
+ }
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_PST_GAMMA_EN_MASK,
+ ISP_TOP_BED_CTRL_PST_GAMMA_EN);
+}
+
+/* Configure 3 x 3 ccm matrix */
+static void c3_isp_params_cfg_ccm(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_ccm *ccm = &block->ccm;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_CCM_EN_MASK,
+ ISP_TOP_BED_CTRL_CCM_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_CCM_MTX_00_01,
+ ISP_CCM_MTX_00_01_MTX_00_MASK,
+ ISP_CCM_MTX_00_01_MTX_00(ccm->matrix[0][0]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_00_01,
+ ISP_CCM_MTX_00_01_MTX_01_MASK,
+ ISP_CCM_MTX_00_01_MTX_01(ccm->matrix[0][1]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_02_03,
+ ISP_CCM_MTX_02_03_MTX_02_MASK,
+ ISP_CCM_MTX_02_03_MTX_02(ccm->matrix[0][2]));
+
+ c3_isp_update_bits(isp, ISP_CCM_MTX_10_11,
+ ISP_CCM_MTX_10_11_MTX_10_MASK,
+ ISP_CCM_MTX_10_11_MTX_10(ccm->matrix[1][0]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_10_11,
+ ISP_CCM_MTX_10_11_MTX_11_MASK,
+ ISP_CCM_MTX_10_11_MTX_11(ccm->matrix[1][1]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_12_13,
+ ISP_CCM_MTX_12_13_MTX_12_MASK,
+ ISP_CCM_MTX_12_13_MTX_12(ccm->matrix[1][2]));
+
+ c3_isp_update_bits(isp, ISP_CCM_MTX_20_21,
+ ISP_CCM_MTX_20_21_MTX_20_MASK,
+ ISP_CCM_MTX_20_21_MTX_20(ccm->matrix[2][0]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_20_21,
+ ISP_CCM_MTX_20_21_MTX_21_MASK,
+ ISP_CCM_MTX_20_21_MTX_21(ccm->matrix[2][1]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_22_23_RS,
+ ISP_CCM_MTX_22_23_RS_MTX_22_MASK,
+ ISP_CCM_MTX_22_23_RS_MTX_22(ccm->matrix[2][2]));
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_CCM_EN_MASK,
+ ISP_TOP_BED_CTRL_CCM_EN);
+}
+
+/* Configure color space conversion matrix parameters */
+static void c3_isp_params_cfg_csc(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_csc *csc = &block->csc;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_CM0_EN_MASK,
+ ISP_TOP_BED_CTRL_CM0_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_CM0_COEF00_01,
+ ISP_CM0_COEF00_01_MTX_00_MASK,
+ ISP_CM0_COEF00_01_MTX_00(csc->matrix[0][0]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF00_01,
+ ISP_CM0_COEF00_01_MTX_01_MASK,
+ ISP_CM0_COEF00_01_MTX_01(csc->matrix[0][1]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF02_10,
+ ISP_CM0_COEF02_10_MTX_02_MASK,
+ ISP_CM0_COEF02_10_MTX_02(csc->matrix[0][2]));
+
+ c3_isp_update_bits(isp, ISP_CM0_COEF02_10,
+ ISP_CM0_COEF02_10_MTX_10_MASK,
+ ISP_CM0_COEF02_10_MTX_10(csc->matrix[1][0]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF11_12,
+ ISP_CM0_COEF11_12_MTX_11_MASK,
+ ISP_CM0_COEF11_12_MTX_11(csc->matrix[1][1]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF11_12,
+ ISP_CM0_COEF11_12_MTX_12_MASK,
+ ISP_CM0_COEF11_12_MTX_12(csc->matrix[1][2]));
+
+ c3_isp_update_bits(isp, ISP_CM0_COEF20_21,
+ ISP_CM0_COEF20_21_MTX_20_MASK,
+ ISP_CM0_COEF20_21_MTX_20(csc->matrix[2][0]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF20_21,
+ ISP_CM0_COEF20_21_MTX_21_MASK,
+ ISP_CM0_COEF20_21_MTX_21(csc->matrix[2][1]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF22_OUP_OFST0,
+ ISP_CM0_COEF22_OUP_OFST0_MTX_22_MASK,
+ ISP_CM0_COEF22_OUP_OFST0_MTX_22(csc->matrix[2][2]));
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_CM0_EN_MASK,
+ ISP_TOP_BED_CTRL_CM0_EN);
+}
+
+/* Set blc offset of each color channel */
+static void c3_isp_params_cfg_blc(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_blc *blc = &block->blc;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_BLC_EN_MASK,
+ ISP_TOP_BEO_CTRL_BLC_DIS);
+ return;
+ }
+
+ c3_isp_write(isp, ISP_LSWB_BLC_OFST0,
+ ISP_LSWB_BLC_OFST0_R_OFST(blc->r_ofst) |
+ ISP_LSWB_BLC_OFST0_GR_OFST(blc->gr_ofst));
+ c3_isp_write(isp, ISP_LSWB_BLC_OFST1,
+ ISP_LSWB_BLC_OFST1_GB_OFST(blc->gb_ofst) |
+ ISP_LSWB_BLC_OFST1_B_OFST(blc->b_ofst));
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_BLC_EN_MASK,
+ ISP_TOP_BEO_CTRL_BLC_EN);
+}
+
+static const struct c3_isp_params_handler c3_isp_params_handlers[] = {
+ [C3_ISP_PARAMS_BLOCK_AWB_GAINS] = {
+ .size = sizeof(struct c3_isp_params_awb_gains),
+ .handler = c3_isp_params_cfg_awb_gains,
+ },
+ [C3_ISP_PARAMS_BLOCK_AWB_CONFIG] = {
+ .size = sizeof(struct c3_isp_params_awb_config),
+ .handler = c3_isp_params_cfg_awb_config,
+ },
+ [C3_ISP_PARAMS_BLOCK_AE_CONFIG] = {
+ .size = sizeof(struct c3_isp_params_ae_config),
+ .handler = c3_isp_params_cfg_ae_config,
+ },
+ [C3_ISP_PARAMS_BLOCK_AF_CONFIG] = {
+ .size = sizeof(struct c3_isp_params_af_config),
+ .handler = c3_isp_params_cfg_af_config,
+ },
+ [C3_ISP_PARAMS_BLOCK_PST_GAMMA] = {
+ .size = sizeof(struct c3_isp_params_pst_gamma),
+ .handler = c3_isp_params_cfg_pst_gamma,
+ },
+ [C3_ISP_PARAMS_BLOCK_CCM] = {
+ .size = sizeof(struct c3_isp_params_ccm),
+ .handler = c3_isp_params_cfg_ccm,
+ },
+ [C3_ISP_PARAMS_BLOCK_CSC] = {
+ .size = sizeof(struct c3_isp_params_csc),
+ .handler = c3_isp_params_cfg_csc,
+ },
+ [C3_ISP_PARAMS_BLOCK_BLC] = {
+ .size = sizeof(struct c3_isp_params_blc),
+ .handler = c3_isp_params_cfg_blc,
+ },
+};
+
+static void c3_isp_params_cfg_blocks(struct c3_isp_params *params)
+{
+ struct c3_isp_params_cfg *config = params->buff->cfg;
+ size_t block_offset = 0;
+
+ if (WARN_ON(!config))
+ return;
+
+ /* Walk the list of parameter blocks and process them */
+ while (block_offset < config->data_size) {
+ const struct c3_isp_params_handler *block_handler;
+ const union c3_isp_params_block *block;
+
+ block = (const union c3_isp_params_block *)
+ &config->data[block_offset];
+
+ block_handler = &c3_isp_params_handlers[block->header.type];
+ block_handler->handler(params->isp, block);
+
+ block_offset += block->header.size;
+ }
+}
+
+void c3_isp_params_pre_cfg(struct c3_isp_device *isp)
+{
+ struct c3_isp_params *params = &isp->params;
+
+ /* Disable some unused modules */
+ c3_isp_update_bits(isp, ISP_TOP_FEO_CTRL0,
+ ISP_TOP_FEO_CTRL0_INPUT_FMT_EN_MASK,
+ ISP_TOP_FEO_CTRL0_INPUT_FMT_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_FEO_CTRL1_0,
+ ISP_TOP_FEO_CTRL1_0_DPC_EN_MASK,
+ ISP_TOP_FEO_CTRL1_0_DPC_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FEO_CTRL1_0,
+ ISP_TOP_FEO_CTRL1_0_OG_EN_MASK,
+ ISP_TOP_FEO_CTRL1_0_OG_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL, ISP_TOP_FED_CTRL_PDPC_EN_MASK,
+ ISP_TOP_FED_CTRL_PDPC_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL,
+ ISP_TOP_FED_CTRL_RAWCNR_EN_MASK,
+ ISP_TOP_FED_CTRL_RAWCNR_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL, ISP_TOP_FED_CTRL_SNR1_EN_MASK,
+ ISP_TOP_FED_CTRL_SNR1_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL, ISP_TOP_FED_CTRL_TNR0_EN_MASK,
+ ISP_TOP_FED_CTRL_TNR0_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL,
+ ISP_TOP_FED_CTRL_CUBIC_CS_EN_MASK,
+ ISP_TOP_FED_CTRL_CUBIC_CS_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL, ISP_TOP_FED_CTRL_SQRT_EN_MASK,
+ ISP_TOP_FED_CTRL_SQRT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL,
+ ISP_TOP_FED_CTRL_DGAIN_EN_MASK,
+ ISP_TOP_FED_CTRL_DGAIN_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_INV_DGAIN_EN_MASK,
+ ISP_TOP_BEO_CTRL_INV_DGAIN_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL, ISP_TOP_BEO_CTRL_EOTF_EN_MASK,
+ ISP_TOP_BEO_CTRL_EOTF_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_YHS_STAT_EN_MASK,
+ ISP_TOP_BED_CTRL_YHS_STAT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_GRPH_STAT_EN_MASK,
+ ISP_TOP_BED_CTRL_GRPH_STAT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_FMETER_EN_MASK,
+ ISP_TOP_BED_CTRL_FMETER_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL, ISP_TOP_BED_CTRL_BSC_EN_MASK,
+ ISP_TOP_BED_CTRL_BSC_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL, ISP_TOP_BED_CTRL_CNR2_EN_MASK,
+ ISP_TOP_BED_CTRL_CNR2_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL, ISP_TOP_BED_CTRL_CM1_EN_MASK,
+ ISP_TOP_BED_CTRL_CM1_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_LUT3D_EN_MASK,
+ ISP_TOP_BED_CTRL_LUT3D_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_PST_TNR_LITE_EN_MASK,
+ ISP_TOP_BED_CTRL_PST_TNR_LITE_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL, ISP_TOP_BED_CTRL_AMCM_EN_MASK,
+ ISP_TOP_BED_CTRL_AMCM_DIS);
+
+ /*
+ * Disable AE, AF and AWB stat module. Please configure the parameters
+ * in userspace algorithm if need to enable these switch.
+ */
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_DIS);
+
+ c3_isp_write(isp, ISP_LSWB_WB_LIMIT0,
+ ISP_LSWB_WB_LIMIT0_WB_LIMIT_R_MAX |
+ ISP_LSWB_WB_LIMIT0_WB_LIMIT_GR_MAX);
+ c3_isp_write(isp, ISP_LSWB_WB_LIMIT1,
+ ISP_LSWB_WB_LIMIT1_WB_LIMIT_GB_MAX |
+ ISP_LSWB_WB_LIMIT1_WB_LIMIT_B_MAX);
+
+ guard(spinlock_irqsave)(&params->buff_lock);
+
+ /* Only use the first buffer to initialize ISP */
+ params->buff =
+ list_first_entry_or_null(&params->pending,
+ struct c3_isp_params_buffer, list);
+ if (params->buff)
+ c3_isp_params_cfg_blocks(params);
+}
+
+/* V4L2 video operations */
+
+static int c3_isp_params_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, C3_ISP_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "AML C3 ISP", sizeof(cap->card));
+
+ return 0;
+}
+
+static int c3_isp_params_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_META_FMT_C3ISP_PARAMS;
+
+ return 0;
+}
+
+static int c3_isp_params_g_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct c3_isp_params *params = video_drvdata(file);
+
+ f->fmt.meta = params->vfmt.fmt.meta;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops isp_params_v4l2_ioctl_ops = {
+ .vidioc_querycap = c3_isp_params_querycap,
+ .vidioc_enum_fmt_meta_out = c3_isp_params_enum_fmt,
+ .vidioc_g_fmt_meta_out = c3_isp_params_g_fmt,
+ .vidioc_s_fmt_meta_out = c3_isp_params_g_fmt,
+ .vidioc_try_fmt_meta_out = c3_isp_params_g_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct v4l2_file_operations isp_params_v4l2_fops = {
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static int c3_isp_params_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ if (*num_planes) {
+ if (*num_planes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < sizeof(struct c3_isp_params_cfg))
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *num_planes = 1;
+ sizes[0] = sizeof(struct c3_isp_params_cfg);
+
+ return 0;
+}
+
+static void c3_isp_params_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_params_buffer *buf = to_c3_isp_params_buffer(v4l2_buf);
+ struct c3_isp_params *params = vb2_get_drv_priv(vb->vb2_queue);
+
+ guard(spinlock_irqsave)(&params->buff_lock);
+
+ list_add_tail(&buf->list, &params->pending);
+}
+
+static int c3_isp_params_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_params_buffer *buf = to_c3_isp_params_buffer(vbuf);
+ struct c3_isp_params *params = vb2_get_drv_priv(vb->vb2_queue);
+ struct c3_isp_params_cfg *cfg = buf->cfg;
+ struct c3_isp_params_cfg *usr_cfg = vb2_plane_vaddr(vb, 0);
+ size_t payload_size = vb2_get_plane_payload(vb, 0);
+ size_t header_size = offsetof(struct c3_isp_params_cfg, data);
+ size_t block_offset = 0;
+ size_t cfg_size;
+
+ /* Payload size can't be greater than the destination buffer size */
+ if (payload_size > params->vfmt.fmt.meta.buffersize) {
+ dev_dbg(params->isp->dev,
+ "Payload size is too large: %zu\n", payload_size);
+ return -EINVAL;
+ }
+
+ /* Payload size can't be smaller than the header size */
+ if (payload_size < header_size) {
+ dev_dbg(params->isp->dev,
+ "Payload size is too small: %zu\n", payload_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Use the internal scratch buffer to avoid userspace modifying
+ * the buffer content while the driver is processing it.
+ */
+ memcpy(cfg, usr_cfg, payload_size);
+
+ /* Only v0 is supported at the moment */
+ if (cfg->version != C3_ISP_PARAMS_BUFFER_V0) {
+ dev_dbg(params->isp->dev,
+ "Invalid params buffer version: %u\n", cfg->version);
+ return -EINVAL;
+ }
+
+ /* Validate the size reported in the parameter buffer header */
+ cfg_size = header_size + cfg->data_size;
+ if (cfg_size != payload_size) {
+ dev_dbg(params->isp->dev,
+ "Data size %zu and payload size %zu are different\n",
+ cfg_size, payload_size);
+ return -EINVAL;
+ }
+
+ /* Walk the list of parameter blocks and validate them */
+ cfg_size = cfg->data_size;
+ while (cfg_size >= sizeof(struct c3_isp_params_block_header)) {
+ const struct c3_isp_params_block_header *block;
+ const struct c3_isp_params_handler *handler;
+
+ block = (struct c3_isp_params_block_header *)
+ &cfg->data[block_offset];
+
+ if (block->type >= ARRAY_SIZE(c3_isp_params_handlers)) {
+ dev_dbg(params->isp->dev,
+ "Invalid params block type\n");
+ return -EINVAL;
+ }
+
+ if (block->size > cfg_size) {
+ dev_dbg(params->isp->dev,
+ "Block size is greater than cfg size\n");
+ return -EINVAL;
+ }
+
+ if ((block->flags & (C3_ISP_PARAMS_BLOCK_FL_ENABLE |
+ C3_ISP_PARAMS_BLOCK_FL_DISABLE)) ==
+ (C3_ISP_PARAMS_BLOCK_FL_ENABLE |
+ C3_ISP_PARAMS_BLOCK_FL_DISABLE)) {
+ dev_dbg(params->isp->dev,
+ "Invalid parameters block flags\n");
+ return -EINVAL;
+ }
+
+ handler = &c3_isp_params_handlers[block->type];
+ if (block->size != handler->size) {
+ dev_dbg(params->isp->dev,
+ "Invalid params block size\n");
+ return -EINVAL;
+ }
+
+ block_offset += block->size;
+ cfg_size -= block->size;
+ }
+
+ if (cfg_size) {
+ dev_dbg(params->isp->dev,
+ "Unexpected data after the params buffer end\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_isp_params_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_params *params = vb2_get_drv_priv(vb->vb2_queue);
+ struct c3_isp_params_buffer *buf = to_c3_isp_params_buffer(v4l2_buf);
+
+ buf->cfg = kvmalloc(params->vfmt.fmt.meta.buffersize, GFP_KERNEL);
+ if (!buf->cfg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void c3_isp_params_vb2_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_params_buffer *buf = to_c3_isp_params_buffer(v4l2_buf);
+
+ kvfree(buf->cfg);
+ buf->cfg = NULL;
+}
+
+static void c3_isp_params_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct c3_isp_params *params = vb2_get_drv_priv(q);
+ struct c3_isp_params_buffer *buff;
+
+ guard(spinlock_irqsave)(&params->buff_lock);
+
+ while (!list_empty(&params->pending)) {
+ buff = list_first_entry(&params->pending,
+ struct c3_isp_params_buffer, list);
+ list_del(&buff->list);
+ vb2_buffer_done(&buff->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops isp_params_vb2_ops = {
+ .queue_setup = c3_isp_params_vb2_queue_setup,
+ .buf_queue = c3_isp_params_vb2_buf_queue,
+ .buf_prepare = c3_isp_params_vb2_buf_prepare,
+ .buf_init = c3_isp_params_vb2_buf_init,
+ .buf_cleanup = c3_isp_params_vb2_buf_cleanup,
+ .stop_streaming = c3_isp_params_vb2_stop_streaming,
+};
+
+int c3_isp_params_register(struct c3_isp_device *isp)
+{
+ struct c3_isp_params *params = &isp->params;
+ struct video_device *vdev = &params->vdev;
+ struct vb2_queue *vb2_q = &params->vb2_q;
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+ params->vfmt.fmt.meta.dataformat = V4L2_META_FMT_C3ISP_PARAMS;
+ params->vfmt.fmt.meta.buffersize = sizeof(struct c3_isp_params_cfg);
+ params->isp = isp;
+ INIT_LIST_HEAD(&params->pending);
+ spin_lock_init(&params->buff_lock);
+ mutex_init(&params->lock);
+
+ snprintf(vdev->name, sizeof(vdev->name), "c3-isp-params");
+ vdev->fops = &isp_params_v4l2_fops;
+ vdev->ioctl_ops = &isp_params_v4l2_ioctl_ops;
+ vdev->v4l2_dev = &isp->v4l2_dev;
+ vdev->lock = &params->lock;
+ vdev->minor = -1;
+ vdev->queue = vb2_q;
+ vdev->release = video_device_release_empty;
+ vdev->device_caps = V4L2_CAP_META_OUTPUT | V4L2_CAP_STREAMING;
+ vdev->vfl_dir = VFL_DIR_TX;
+ video_set_drvdata(vdev, params);
+
+ vb2_q->drv_priv = params;
+ vb2_q->mem_ops = &vb2_vmalloc_memops;
+ vb2_q->ops = &isp_params_vb2_ops;
+ vb2_q->type = V4L2_BUF_TYPE_META_OUTPUT;
+ vb2_q->io_modes = VB2_DMABUF | VB2_MMAP;
+ vb2_q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vb2_q->buf_struct_size = sizeof(struct c3_isp_params_buffer);
+ vb2_q->dev = isp->dev;
+ vb2_q->lock = &params->lock;
+ vb2_q->min_queued_buffers = 1;
+
+ ret = vb2_queue_init(vb2_q);
+ if (ret)
+ goto err_detroy;
+
+ params->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vdev->entity, 1, &params->pad);
+ if (ret)
+ goto err_queue_release;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(isp->dev,
+ "Failed to register %s: %d\n", vdev->name, ret);
+ goto err_entity_cleanup;
+ }
+
+ return 0;
+
+err_entity_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_queue_release:
+ vb2_queue_release(vb2_q);
+err_detroy:
+ mutex_destroy(&params->lock);
+ return ret;
+}
+
+void c3_isp_params_unregister(struct c3_isp_device *isp)
+{
+ struct c3_isp_params *params = &isp->params;
+
+ vb2_queue_release(&params->vb2_q);
+ media_entity_cleanup(&params->vdev.entity);
+ video_unregister_device(&params->vdev);
+ mutex_destroy(&params->lock);
+}
+
+void c3_isp_params_isr(struct c3_isp_device *isp)
+{
+ struct c3_isp_params *params = &isp->params;
+
+ guard(spinlock_irqsave)(&params->buff_lock);
+
+ params->buff =
+ list_first_entry_or_null(&params->pending,
+ struct c3_isp_params_buffer, list);
+ if (!params->buff)
+ return;
+
+ list_del(&params->buff->list);
+
+ c3_isp_params_cfg_blocks(params);
+
+ params->buff->vb.sequence = params->isp->frm_sequence;
+ params->buff->vb.vb2_buf.timestamp = ktime_get();
+ params->buff->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&params->buff->vb.vb2_buf, VB2_BUF_STATE_DONE);
+}
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h b/drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h
new file mode 100644
index 000000000000..fa249985a771
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h
@@ -0,0 +1,618 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#ifndef __C3_ISP_REGS_H__
+#define __C3_ISP_REGS_H__
+
+#define ISP_TOP_INPUT_SIZE 0x0000
+#define ISP_TOP_INPUT_SIZE_VERT_SIZE_MASK GENMASK(15, 0)
+#define ISP_TOP_INPUT_SIZE_VERT_SIZE(x) ((x) << 0)
+#define ISP_TOP_INPUT_SIZE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_TOP_INPUT_SIZE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_TOP_FRM_SIZE 0x0004
+#define ISP_TOP_FRM_SIZE_CORE_VERT_SIZE_MASK GENMASK(15, 0)
+#define ISP_TOP_FRM_SIZE_CORE_VERT_SIZE(x) ((x) << 0)
+#define ISP_TOP_FRM_SIZE_CORE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_TOP_FRM_SIZE_CORE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_TOP_HOLD_SIZE 0x0008
+#define ISP_TOP_HOLD_SIZE_CORE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_TOP_HOLD_SIZE_CORE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_TOP_PATH_EN 0x0010
+#define ISP_TOP_PATH_EN_DISP0_EN_MASK BIT(0)
+#define ISP_TOP_PATH_EN_DISP0_EN BIT(0)
+#define ISP_TOP_PATH_EN_DISP0_DIS (0 << 0)
+#define ISP_TOP_PATH_EN_DISP1_EN_MASK BIT(1)
+#define ISP_TOP_PATH_EN_DISP1_EN BIT(1)
+#define ISP_TOP_PATH_EN_DISP1_DIS (0 << 1)
+#define ISP_TOP_PATH_EN_DISP2_EN_MASK BIT(2)
+#define ISP_TOP_PATH_EN_DISP2_EN BIT(2)
+#define ISP_TOP_PATH_EN_DISP2_DIS (0 << 2)
+#define ISP_TOP_PATH_EN_WRMIF0_EN_MASK BIT(8)
+#define ISP_TOP_PATH_EN_WRMIF0_EN BIT(8)
+#define ISP_TOP_PATH_EN_WRMIF0_DIS (0 << 8)
+#define ISP_TOP_PATH_EN_WRMIF1_EN_MASK BIT(9)
+#define ISP_TOP_PATH_EN_WRMIF1_EN BIT(9)
+#define ISP_TOP_PATH_EN_WRMIF1_DIS (0 << 9)
+#define ISP_TOP_PATH_EN_WRMIF2_EN_MASK BIT(10)
+#define ISP_TOP_PATH_EN_WRMIF2_EN BIT(10)
+#define ISP_TOP_PATH_EN_WRMIF2_DIS (0 << 10)
+
+#define ISP_TOP_PATH_SEL 0x0014
+#define ISP_TOP_PATH_SEL_CORE_MASK GENMASK(18, 16)
+#define ISP_TOP_PATH_SEL_CORE_CORE_DIS (0 << 16)
+#define ISP_TOP_PATH_SEL_CORE_MIPI_CORE BIT(16)
+
+#define ISP_TOP_DISPIN_SEL 0x0018
+#define ISP_TOP_DISPIN_SEL_DISP0_MASK GENMASK(3, 0)
+#define ISP_TOP_DISPIN_SEL_DISP0_CORE_OUT (0 << 0)
+#define ISP_TOP_DISPIN_SEL_DISP0_MIPI_OUT (2 << 0)
+#define ISP_TOP_DISPIN_SEL_DISP1_MASK GENMASK(7, 4)
+#define ISP_TOP_DISPIN_SEL_DISP1_CORE_OUT (0 << 4)
+#define ISP_TOP_DISPIN_SEL_DISP1_MIPI_OUT (2 << 4)
+#define ISP_TOP_DISPIN_SEL_DISP2_MASK GENMASK(11, 8)
+#define ISP_TOP_DISPIN_SEL_DISP2_CORE_OUT (0 << 8)
+#define ISP_TOP_DISPIN_SEL_DISP2_MIPI_OUT (2 << 8)
+
+#define ISP_TOP_IRQ_EN 0x0080
+#define ISP_TOP_IRQ_EN_FRM_END_MASK BIT(0)
+#define ISP_TOP_IRQ_EN_FRM_END_EN BIT(0)
+#define ISP_TOP_IRQ_EN_FRM_END_DIS (0 << 0)
+#define ISP_TOP_IRQ_EN_FRM_RST_MASK BIT(1)
+#define ISP_TOP_IRQ_EN_FRM_RST_EN BIT(1)
+#define ISP_TOP_IRQ_EN_FRM_RST_DIS (0 << 1)
+#define ISP_TOP_IRQ_EN_3A_DMA_ERR_MASK BIT(5)
+#define ISP_TOP_IRQ_EN_3A_DMA_ERR_EN BIT(5)
+#define ISP_TOP_IRQ_EN_3A_DMA_ERR_DIS (0 << 5)
+
+#define ISP_TOP_IRQ_CLR 0x0084
+#define ISP_TOP_RO_IRQ_STAT 0x01c4
+#define ISP_TOP_RO_IRQ_STAT_FRM_END_MASK BIT(0)
+#define ISP_TOP_RO_IRQ_STAT_FRM_RST_MASK BIT(1)
+#define ISP_TOP_RO_IRQ_STAT_3A_DMA_ERR_MASK BIT(5)
+
+#define ISP_TOP_MODE_CTRL 0x0400
+#define ISP_TOP_FEO_CTRL0 0x040c
+#define ISP_TOP_FEO_CTRL0_INPUT_FMT_EN_MASK BIT(8)
+#define ISP_TOP_FEO_CTRL0_INPUT_FMT_DIS (0 << 8)
+#define ISP_TOP_FEO_CTRL0_INPUT_FMT_EN BIT(8)
+
+#define ISP_TOP_FEO_CTRL1_0 0x0410
+#define ISP_TOP_FEO_CTRL1_0_DPC_EN_MASK BIT(3)
+#define ISP_TOP_FEO_CTRL1_0_DPC_DIS (0 << 3)
+#define ISP_TOP_FEO_CTRL1_0_DPC_EN BIT(3)
+#define ISP_TOP_FEO_CTRL1_0_OG_EN_MASK BIT(5)
+#define ISP_TOP_FEO_CTRL1_0_OG_DIS (0 << 5)
+#define ISP_TOP_FEO_CTRL1_0_OG_EN BIT(5)
+
+#define ISP_TOP_FED_CTRL 0x0418
+#define ISP_TOP_FED_CTRL_PDPC_EN_MASK BIT(1)
+#define ISP_TOP_FED_CTRL_PDPC_DIS (0 << 1)
+#define ISP_TOP_FED_CTRL_PDPC_EN BIT(1)
+#define ISP_TOP_FED_CTRL_RAWCNR_EN_MASK GENMASK(6, 5)
+#define ISP_TOP_FED_CTRL_RAWCNR_DIS (0 << 5)
+#define ISP_TOP_FED_CTRL_RAWCNR_EN BIT(5)
+#define ISP_TOP_FED_CTRL_SNR1_EN_MASK BIT(9)
+#define ISP_TOP_FED_CTRL_SNR1_DIS (0 << 9)
+#define ISP_TOP_FED_CTRL_SNR1_EN BIT(9)
+#define ISP_TOP_FED_CTRL_TNR0_EN_MASK BIT(11)
+#define ISP_TOP_FED_CTRL_TNR0_DIS (0 << 11)
+#define ISP_TOP_FED_CTRL_TNR0_EN BIT(11)
+#define ISP_TOP_FED_CTRL_CUBIC_CS_EN_MASK BIT(12)
+#define ISP_TOP_FED_CTRL_CUBIC_CS_DIS (0 << 12)
+#define ISP_TOP_FED_CTRL_CUBIC_CS_EN BIT(12)
+#define ISP_TOP_FED_CTRL_SQRT_EN_MASK BIT(14)
+#define ISP_TOP_FED_CTRL_SQRT_DIS (0 << 14)
+#define ISP_TOP_FED_CTRL_SQRT_EN BIT(14)
+#define ISP_TOP_FED_CTRL_DGAIN_EN_MASK BIT(16)
+#define ISP_TOP_FED_CTRL_DGAIN_DIS (0 << 16)
+#define ISP_TOP_FED_CTRL_DGAIN_EN BIT(16)
+
+#define ISP_TOP_BEO_CTRL 0x041c
+#define ISP_TOP_BEO_CTRL_WB_EN_MASK BIT(6)
+#define ISP_TOP_BEO_CTRL_WB_DIS (0 << 6)
+#define ISP_TOP_BEO_CTRL_WB_EN BIT(6)
+#define ISP_TOP_BEO_CTRL_BLC_EN_MASK BIT(7)
+#define ISP_TOP_BEO_CTRL_BLC_DIS (0 << 7)
+#define ISP_TOP_BEO_CTRL_BLC_EN BIT(7)
+#define ISP_TOP_BEO_CTRL_INV_DGAIN_EN_MASK BIT(8)
+#define ISP_TOP_BEO_CTRL_INV_DGAIN_DIS (0 << 8)
+#define ISP_TOP_BEO_CTRL_INV_DGAIN_EN BIT(8)
+#define ISP_TOP_BEO_CTRL_EOTF_EN_MASK BIT(9)
+#define ISP_TOP_BEO_CTRL_EOTF_DIS (0 << 9)
+#define ISP_TOP_BEO_CTRL_EOTF_EN BIT(9)
+
+#define ISP_TOP_BED_CTRL 0x0420
+#define ISP_TOP_BED_CTRL_YHS_STAT_EN_MASK GENMASK(1, 0)
+#define ISP_TOP_BED_CTRL_YHS_STAT_DIS (0 << 0)
+#define ISP_TOP_BED_CTRL_YHS_STAT_EN BIT(0)
+#define ISP_TOP_BED_CTRL_GRPH_STAT_EN_MASK BIT(2)
+#define ISP_TOP_BED_CTRL_GRPH_STAT_DIS (0 << 2)
+#define ISP_TOP_BED_CTRL_GRPH_STAT_EN BIT(2)
+#define ISP_TOP_BED_CTRL_FMETER_EN_MASK BIT(3)
+#define ISP_TOP_BED_CTRL_FMETER_DIS (0 << 3)
+#define ISP_TOP_BED_CTRL_FMETER_EN BIT(3)
+#define ISP_TOP_BED_CTRL_BSC_EN_MASK BIT(10)
+#define ISP_TOP_BED_CTRL_BSC_DIS (0 << 10)
+#define ISP_TOP_BED_CTRL_BSC_EN BIT(10)
+#define ISP_TOP_BED_CTRL_CNR2_EN_MASK BIT(11)
+#define ISP_TOP_BED_CTRL_CNR2_DIS (0 << 11)
+#define ISP_TOP_BED_CTRL_CNR2_EN BIT(11)
+#define ISP_TOP_BED_CTRL_CM1_EN_MASK BIT(13)
+#define ISP_TOP_BED_CTRL_CM1_DIS (0 << 13)
+#define ISP_TOP_BED_CTRL_CM1_EN BIT(13)
+#define ISP_TOP_BED_CTRL_CM0_EN_MASK BIT(14)
+#define ISP_TOP_BED_CTRL_CM0_DIS (0 << 14)
+#define ISP_TOP_BED_CTRL_CM0_EN BIT(14)
+#define ISP_TOP_BED_CTRL_PST_GAMMA_EN_MASK BIT(16)
+#define ISP_TOP_BED_CTRL_PST_GAMMA_DIS (0 << 16)
+#define ISP_TOP_BED_CTRL_PST_GAMMA_EN BIT(16)
+#define ISP_TOP_BED_CTRL_LUT3D_EN_MASK BIT(17)
+#define ISP_TOP_BED_CTRL_LUT3D_DIS (0 << 17)
+#define ISP_TOP_BED_CTRL_LUT3D_EN BIT(17)
+#define ISP_TOP_BED_CTRL_CCM_EN_MASK BIT(18)
+#define ISP_TOP_BED_CTRL_CCM_DIS (0 << 18)
+#define ISP_TOP_BED_CTRL_CCM_EN BIT(18)
+#define ISP_TOP_BED_CTRL_PST_TNR_LITE_EN_MASK BIT(21)
+#define ISP_TOP_BED_CTRL_PST_TNR_LITE_DIS (0 << 21)
+#define ISP_TOP_BED_CTRL_PST_TNR_LITE_EN BIT(21)
+#define ISP_TOP_BED_CTRL_AMCM_EN_MASK BIT(25)
+#define ISP_TOP_BED_CTRL_AMCM_DIS (0 << 25)
+#define ISP_TOP_BED_CTRL_AMCM_EN BIT(25)
+
+#define ISP_TOP_3A_STAT_CRTL 0x0424
+#define ISP_TOP_3A_STAT_CRTL_AE_STAT_EN_MASK BIT(0)
+#define ISP_TOP_3A_STAT_CRTL_AE_STAT_DIS (0 << 0)
+#define ISP_TOP_3A_STAT_CRTL_AE_STAT_EN BIT(0)
+#define ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN_MASK BIT(1)
+#define ISP_TOP_3A_STAT_CRTL_AWB_STAT_DIS (0 << 1)
+#define ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN BIT(1)
+#define ISP_TOP_3A_STAT_CRTL_AF_STAT_EN_MASK BIT(2)
+#define ISP_TOP_3A_STAT_CRTL_AF_STAT_DIS (0 << 2)
+#define ISP_TOP_3A_STAT_CRTL_AF_STAT_EN BIT(2)
+#define ISP_TOP_3A_STAT_CRTL_AWB_POINT_MASK GENMASK(6, 4)
+#define ISP_TOP_3A_STAT_CRTL_AWB_POINT(x) ((x) << 4)
+#define ISP_TOP_3A_STAT_CRTL_AE_POINT_MASK GENMASK(9, 8)
+#define ISP_TOP_3A_STAT_CRTL_AE_POINT(x) ((x) << 8)
+#define ISP_TOP_3A_STAT_CRTL_AF_POINT_MASK GENMASK(13, 12)
+#define ISP_TOP_3A_STAT_CRTL_AF_POINT(x) ((x) << 12)
+
+#define ISP_LSWB_BLC_OFST0 0x4028
+#define ISP_LSWB_BLC_OFST0_R_OFST_MASK GENMASK(15, 0)
+#define ISP_LSWB_BLC_OFST0_R_OFST(x) ((x) << 0)
+#define ISP_LSWB_BLC_OFST0_GR_OFST_MASK GENMASK(31, 16)
+#define ISP_LSWB_BLC_OFST0_GR_OFST(x) ((x) << 16)
+
+#define ISP_LSWB_BLC_OFST1 0x402c
+#define ISP_LSWB_BLC_OFST1_GB_OFST_MASK GENMASK(15, 0)
+#define ISP_LSWB_BLC_OFST1_GB_OFST(x) ((x) << 0)
+#define ISP_LSWB_BLC_OFST1_B_OFST_MASK GENMASK(31, 16)
+#define ISP_LSWB_BLC_OFST1_B_OFST(x) ((x) << 16)
+
+#define ISP_LSWB_BLC_PHSOFST 0x4034
+#define ISP_LSWB_BLC_PHSOFST_VERT_OFST_MASK GENMASK(1, 0)
+#define ISP_LSWB_BLC_PHSOFST_VERT_OFST(x) ((x) << 0)
+#define ISP_LSWB_BLC_PHSOFST_HORIZ_OFST_MASK GENMASK(3, 2)
+#define ISP_LSWB_BLC_PHSOFST_HORIZ_OFST(x) ((x) << 2)
+
+#define ISP_LSWB_WB_GAIN0 0x4038
+#define ISP_LSWB_WB_GAIN0_R_GAIN_MASK GENMASK(11, 0)
+#define ISP_LSWB_WB_GAIN0_R_GAIN(x) ((x) << 0)
+#define ISP_LSWB_WB_GAIN0_GR_GAIN_MASK GENMASK(27, 16)
+#define ISP_LSWB_WB_GAIN0_GR_GAIN(x) ((x) << 16)
+
+#define ISP_LSWB_WB_GAIN1 0x403c
+#define ISP_LSWB_WB_GAIN1_GB_GAIN_MASK GENMASK(11, 0)
+#define ISP_LSWB_WB_GAIN1_GB_GAIN(x) ((x) << 0)
+#define ISP_LSWB_WB_GAIN1_B_GAIN_MASK GENMASK(27, 16)
+#define ISP_LSWB_WB_GAIN1_B_GAIN(x) ((x) << 16)
+
+#define ISP_LSWB_WB_GAIN2 0x4040
+#define ISP_LSWB_WB_GAIN2_IR_GAIN_MASK GENMASK(11, 0)
+#define ISP_LSWB_WB_GAIN2_IR_GAIN(x) ((x) << 0)
+
+#define ISP_LSWB_WB_LIMIT0 0x4044
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_R_MASK GENMASK(15, 0)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_R(x) ((x) << 0)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_R_MAX (0x8fff << 0)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_GR_MASK GENMASK(31, 16)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_GR(x) ((x) << 16)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_GR_MAX (0x8fff << 16)
+
+#define ISP_LSWB_WB_LIMIT1 0x4048
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_GB_MASK GENMASK(15, 0)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_GB(x) ((x) << 0)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_GB_MAX (0x8fff << 0)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_B_MASK GENMASK(31, 16)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_B(x) ((x) << 16)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_B_MAX (0x8fff << 16)
+
+#define ISP_LSWB_WB_PHSOFST 0x4050
+#define ISP_LSWB_WB_PHSOFST_VERT_OFST_MASK GENMASK(1, 0)
+#define ISP_LSWB_WB_PHSOFST_VERT_OFST(x) ((x) << 0)
+#define ISP_LSWB_WB_PHSOFST_HORIZ_OFST_MASK GENMASK(3, 2)
+#define ISP_LSWB_WB_PHSOFST_HORIZ_OFST(x) ((x) << 2)
+
+#define ISP_LSWB_LNS_PHSOFST 0x4054
+#define ISP_LSWB_LNS_PHSOFST_VERT_OFST_MASK GENMASK(1, 0)
+#define ISP_LSWB_LNS_PHSOFST_VERT_OFST(x) ((x) << 0)
+#define ISP_LSWB_LNS_PHSOFST_HORIZ_OFST_MASK GENMASK(3, 2)
+#define ISP_LSWB_LNS_PHSOFST_HORIZ_OFST(x) ((x) << 2)
+
+#define ISP_DMS_COMMON_PARAM0 0x5000
+#define ISP_DMS_COMMON_PARAM0_VERT_PHS_OFST_MASK GENMASK(1, 0)
+#define ISP_DMS_COMMON_PARAM0_VERT_PHS_OFST(x) ((x) << 0)
+#define ISP_DMS_COMMON_PARAM0_HORIZ_PHS_OFST_MASK GENMASK(3, 2)
+#define ISP_DMS_COMMON_PARAM0_HORIZ_PHS_OFST(x) ((x) << 2)
+
+#define ISP_CM0_COEF00_01 0x6048
+#define ISP_CM0_COEF00_01_MTX_00_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF00_01_MTX_00(x) ((x) << 0)
+#define ISP_CM0_COEF00_01_MTX_01_MASK GENMASK(28, 16)
+#define ISP_CM0_COEF00_01_MTX_01(x) ((x) << 16)
+
+#define ISP_CM0_COEF02_10 0x604c
+#define ISP_CM0_COEF02_10_MTX_02_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF02_10_MTX_02(x) ((x) << 0)
+#define ISP_CM0_COEF02_10_MTX_10_MASK GENMASK(28, 16)
+#define ISP_CM0_COEF02_10_MTX_10(x) ((x) << 16)
+
+#define ISP_CM0_COEF11_12 0x6050
+#define ISP_CM0_COEF11_12_MTX_11_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF11_12_MTX_11(x) ((x) << 0)
+#define ISP_CM0_COEF11_12_MTX_12_MASK GENMASK(28, 16)
+#define ISP_CM0_COEF11_12_MTX_12(x) ((x) << 16)
+
+#define ISP_CM0_COEF20_21 0x6054
+#define ISP_CM0_COEF20_21_MTX_20_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF20_21_MTX_20(x) ((x) << 0)
+#define ISP_CM0_COEF20_21_MTX_21_MASK GENMASK(28, 16)
+#define ISP_CM0_COEF20_21_MTX_21(x) ((x) << 16)
+
+#define ISP_CM0_COEF22_OUP_OFST0 0x6058
+#define ISP_CM0_COEF22_OUP_OFST0_MTX_22_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF22_OUP_OFST0_MTX_22(x) ((x) << 0)
+
+#define ISP_CCM_MTX_00_01 0x6098
+#define ISP_CCM_MTX_00_01_MTX_00_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_00_01_MTX_00(x) ((x) << 0)
+#define ISP_CCM_MTX_00_01_MTX_01_MASK GENMASK(28, 16)
+#define ISP_CCM_MTX_00_01_MTX_01(x) ((x) << 16)
+
+#define ISP_CCM_MTX_02_03 0x609c
+#define ISP_CCM_MTX_02_03_MTX_02_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_02_03_MTX_02(x) ((x) << 0)
+
+#define ISP_CCM_MTX_10_11 0x60A0
+#define ISP_CCM_MTX_10_11_MTX_10_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_10_11_MTX_10(x) ((x) << 0)
+#define ISP_CCM_MTX_10_11_MTX_11_MASK GENMASK(28, 16)
+#define ISP_CCM_MTX_10_11_MTX_11(x) ((x) << 16)
+
+#define ISP_CCM_MTX_12_13 0x60A4
+#define ISP_CCM_MTX_12_13_MTX_12_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_12_13_MTX_12(x) ((x) << 0)
+
+#define ISP_CCM_MTX_20_21 0x60A8
+#define ISP_CCM_MTX_20_21_MTX_20_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_20_21_MTX_20(x) ((x) << 0)
+#define ISP_CCM_MTX_20_21_MTX_21_MASK GENMASK(28, 16)
+#define ISP_CCM_MTX_20_21_MTX_21(x) ((x) << 16)
+
+#define ISP_CCM_MTX_22_23_RS 0x60Ac
+#define ISP_CCM_MTX_22_23_RS_MTX_22_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_22_23_RS_MTX_22(x) ((x) << 0)
+
+#define ISP_PST_GAMMA_LUT_ADDR 0x60cc
+#define ISP_PST_GAMMA_LUT_ADDR_IDX_ADDR(x) ((x) << 7)
+
+#define ISP_PST_GAMMA_LUT_DATA 0x60d0
+#define ISP_PST_GM_LUT_DATA0(x) (((x) & GENMASK(15, 0)) << 0)
+#define ISP_PST_GM_LUT_DATA1(x) (((x) & GENMASK(15, 0)) << 16)
+
+#define DISP0_TOP_TOP_CTRL 0x8000
+#define DISP0_TOP_TOP_CTRL_CROP2_EN_MASK BIT(5)
+#define DISP0_TOP_TOP_CTRL_CROP2_EN BIT(5)
+#define DISP0_TOP_TOP_CTRL_CROP2_DIS (0 << 5)
+
+#define DISP0_TOP_CRP2_START 0x8004
+#define DISP0_TOP_CRP2_START_V_START_MASK GENMASK(15, 0)
+#define DISP0_TOP_CRP2_START_V_START(x) ((x) << 0)
+#define DISP0_TOP_CRP2_START_H_START_MASK GENMASK(31, 16)
+#define DISP0_TOP_CRP2_START_H_START(x) ((x) << 16)
+
+#define DISP0_TOP_CRP2_SIZE 0x8008
+#define DISP0_TOP_CRP2_SIZE_V_SIZE_MASK GENMASK(15, 0)
+#define DISP0_TOP_CRP2_SIZE_V_SIZE(x) ((x) << 0)
+#define DISP0_TOP_CRP2_SIZE_H_SIZE_MASK GENMASK(31, 16)
+#define DISP0_TOP_CRP2_SIZE_H_SIZE(x) ((x) << 16)
+
+#define DISP0_TOP_OUT_SIZE 0x800c
+#define DISP0_TOP_OUT_SIZE_SCL_OUT_HEIGHT_MASK GENMASK(12, 0)
+#define DISP0_TOP_OUT_SIZE_SCL_OUT_HEIGHT(x) ((x) << 0)
+#define DISP0_TOP_OUT_SIZE_SCL_OUT_WIDTH_MASK GENMASK(28, 16)
+#define DISP0_TOP_OUT_SIZE_SCL_OUT_WIDTH(x) ((x) << 16)
+
+#define ISP_DISP0_TOP_IN_SIZE 0x804c
+#define ISP_DISP0_TOP_IN_SIZE_VSIZE_MASK GENMASK(12, 0)
+#define ISP_DISP0_TOP_IN_SIZE_VSIZE(x) ((x) << 0)
+#define ISP_DISP0_TOP_IN_SIZE_HSIZE_MASK GENMASK(28, 16)
+#define ISP_DISP0_TOP_IN_SIZE_HSIZE(x) ((x) << 16)
+
+#define DISP0_PPS_SCALE_EN 0x8200
+#define DISP0_PPS_SCALE_EN_VSC_TAP_NUM_MASK GENMASK(3, 0)
+#define DISP0_PPS_SCALE_EN_VSC_TAP_NUM(x) ((x) << 0)
+#define DISP0_PPS_SCALE_EN_HSC_TAP_NUM_MASK GENMASK(7, 4)
+#define DISP0_PPS_SCALE_EN_HSC_TAP_NUM(x) ((x) << 4)
+#define DISP0_PPS_SCALE_EN_PREVSC_FLT_NUM_MASK GENMASK(11, 8)
+#define DISP0_PPS_SCALE_EN_PREVSC_FLT_NUM(x) ((x) << 8)
+#define DISP0_PPS_SCALE_EN_PREHSC_FLT_NUM_MASK GENMASK(15, 12)
+#define DISP0_PPS_SCALE_EN_PREHSC_FLT_NUM(x) ((x) << 12)
+#define DISP0_PPS_SCALE_EN_PREVSC_RATE_MASK GENMASK(17, 16)
+#define DISP0_PPS_SCALE_EN_PREVSC_RATE(x) ((x) << 16)
+#define DISP0_PPS_SCALE_EN_PREHSC_RATE_MASK GENMASK(19, 18)
+#define DISP0_PPS_SCALE_EN_PREHSC_RATE(x) ((x) << 18)
+#define DISP0_PPS_SCALE_EN_HSC_EN_MASK BIT(20)
+#define DISP0_PPS_SCALE_EN_HSC_EN(x) ((x) << 20)
+#define DISP0_PPS_SCALE_EN_HSC_DIS (0 << 20)
+#define DISP0_PPS_SCALE_EN_VSC_EN_MASK BIT(21)
+#define DISP0_PPS_SCALE_EN_VSC_EN(x) ((x) << 21)
+#define DISP0_PPS_SCALE_EN_VSC_DIS (0 << 21)
+#define DISP0_PPS_SCALE_EN_PREVSC_EN_MASK BIT(22)
+#define DISP0_PPS_SCALE_EN_PREVSC_EN(x) ((x) << 22)
+#define DISP0_PPS_SCALE_EN_PREVSC_DIS (0 << 22)
+#define DISP0_PPS_SCALE_EN_PREHSC_EN_MASK BIT(23)
+#define DISP0_PPS_SCALE_EN_PREHSC_EN(x) ((x) << 23)
+#define DISP0_PPS_SCALE_EN_PREHSC_DIS (0 << 23)
+#define DISP0_PPS_SCALE_EN_HSC_NOR_RS_BITS_MASK GENMASK(27, 24)
+#define DISP0_PPS_SCALE_EN_HSC_NOR_RS_BITS(x) ((x) << 24)
+#define DISP0_PPS_SCALE_EN_VSC_NOR_RS_BITS_MASK GENMASK(31, 28)
+#define DISP0_PPS_SCALE_EN_VSC_NOR_RS_BITS(x) ((x) << 28)
+
+#define DISP0_PPS_VSC_START_PHASE_STEP 0x8224
+#define DISP0_PPS_VSC_START_PHASE_STEP_VERT_FRAC_MASK GENMASK(23, 0)
+#define DISP0_PPS_VSC_START_PHASE_STEP_VERT_FRAC(x) ((x) << 0)
+#define DISP0_PPS_VSC_START_PHASE_STEP_VERT_INTE_MASK GENMASK(27, 24)
+#define DISP0_PPS_VSC_START_PHASE_STEP_VERT_INTE(x) ((x) << 24)
+
+#define DISP0_PPS_HSC_START_PHASE_STEP 0x8230
+#define DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_FRAC_MASK GENMASK(23, 0)
+#define DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_FRAC(x) ((x) << 0)
+#define DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_INTE_MASK GENMASK(27, 24)
+#define DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_INTE(x) ((x) << 24)
+
+#define DISP0_PPS_444TO422 0x823c
+#define DISP0_PPS_444TO422_EN_MASK BIT(0)
+#define DISP0_PPS_444TO422_EN(x) ((x) << 0)
+
+#define ISP_SCALE0_COEF_IDX_LUMA 0x8240
+#define ISP_SCALE0_COEF_IDX_LUMA_COEF_S11_MODE_MASK BIT(9)
+#define ISP_SCALE0_COEF_IDX_LUMA_COEF_S11_MODE_EN BIT(9)
+#define ISP_SCALE0_COEF_IDX_LUMA_COEF_S11_MODE_DIS (0 << 9)
+#define ISP_SCALE0_COEF_IDX_LUMA_CTYPE_MASK GENMASK(12, 10)
+#define ISP_SCALE0_COEF_IDX_LUMA_CTYPE(x) ((x) << 10)
+
+#define ISP_SCALE0_COEF_LUMA 0x8244
+#define ISP_SCALE0_COEF_LUMA_DATA1(x) (((x) & GENMASK(10, 0)) << 0)
+#define ISP_SCALE0_COEF_LUMA_DATA0(x) (((x) & GENMASK(10, 0)) << 16)
+
+#define ISP_SCALE0_COEF_IDX_CHRO 0x8248
+#define ISP_SCALE0_COEF_IDX_CHRO_COEF_S11_MODE_MASK BIT(9)
+#define ISP_SCALE0_COEF_IDX_CHRO_COEF_S11_MODE_EN BIT(9)
+#define ISP_SCALE0_COEF_IDX_CHRO_COEF_S11_MODE_DIS (0 << 9)
+#define ISP_SCALE0_COEF_IDX_CHRO_CTYPE_MASK GENMASK(12, 10)
+#define ISP_SCALE0_COEF_IDX_CHRO_CTYPE(x) ((x) << 10)
+
+#define ISP_SCALE0_COEF_CHRO 0x824c
+#define ISP_SCALE0_COEF_CHRO_DATA1(x) (((x) & GENMASK(10, 0)) << 0)
+#define ISP_SCALE0_COEF_CHRO_DATA0(x) (((x) & GENMASK(10, 0)) << 16)
+
+#define ISP_AF_CTRL 0xa044
+#define ISP_AF_CTRL_VERT_OFST_MASK GENMASK(15, 14)
+#define ISP_AF_CTRL_VERT_OFST(x) ((x) << 14)
+#define ISP_AF_CTRL_HORIZ_OFST_MASK GENMASK(17, 16)
+#define ISP_AF_CTRL_HORIZ_OFST(x) ((x) << 16)
+
+#define ISP_AF_HV_SIZE 0xa04c
+#define ISP_AF_HV_SIZE_GLB_WIN_YSIZE_MASK GENMASK(15, 0)
+#define ISP_AF_HV_SIZE_GLB_WIN_YSIZE(x) ((x) << 0)
+#define ISP_AF_HV_SIZE_GLB_WIN_XSIZE_MASK GENMASK(31, 16)
+#define ISP_AF_HV_SIZE_GLB_WIN_XSIZE(x) ((x) << 16)
+
+#define ISP_AF_HV_BLKNUM 0xa050
+#define ISP_AF_HV_BLKNUM_V_NUM_MASK GENMASK(5, 0)
+#define ISP_AF_HV_BLKNUM_V_NUM(x) ((x) << 0)
+#define ISP_AF_HV_BLKNUM_H_NUM_MASK GENMASK(21, 16)
+#define ISP_AF_HV_BLKNUM_H_NUM(x) ((x) << 16)
+
+#define ISP_AF_EN_CTRL 0xa054
+#define ISP_AF_EN_CTRL_STAT_SEL_MASK BIT(21)
+#define ISP_AF_EN_CTRL_STAT_SEL_OLD (0 << 21)
+#define ISP_AF_EN_CTRL_STAT_SEL_NEW BIT(21)
+
+#define ISP_AF_IDX_ADDR 0xa1c0
+#define ISP_AF_IDX_DATA 0xa1c4
+#define ISP_AF_IDX_DATA_VIDX_DATA(x) (((x) & GENMASK(15, 0)) << 0)
+#define ISP_AF_IDX_DATA_HIDX_DATA(x) (((x) & GENMASK(15, 0)) << 16)
+
+#define ISP_AE_CTRL 0xa448
+#define ISP_AE_CTRL_INPUT_2LINE_MASK BIT(7)
+#define ISP_AE_CTRL_INPUT_2LINE_EN BIT(7)
+#define ISP_AE_CTRL_INPUT_2LINE_DIS (0 << 7)
+#define ISP_AE_CTRL_LUMA_MODE_MASK GENMASK(9, 8)
+#define ISP_AE_CTRL_LUMA_MODE_CUR (0 << 8)
+#define ISP_AE_CTRL_LUMA_MODE_MAX BIT(8)
+#define ISP_AE_CTRL_LUMA_MODE_FILTER (2 << 8)
+#define ISP_AE_CTRL_VERT_OFST_MASK GENMASK(25, 24)
+#define ISP_AE_CTRL_VERT_OFST(x) ((x) << 24)
+#define ISP_AE_CTRL_HORIZ_OFST_MASK GENMASK(27, 26)
+#define ISP_AE_CTRL_HORIZ_OFST(x) ((x) << 26)
+
+#define ISP_AE_HV_SIZE 0xa464
+#define ISP_AE_HV_SIZE_VERT_SIZE_MASK GENMASK(15, 0)
+#define ISP_AE_HV_SIZE_VERT_SIZE(x) ((x) << 0)
+#define ISP_AE_HV_SIZE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_AE_HV_SIZE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_AE_HV_BLKNUM 0xa468
+#define ISP_AE_HV_BLKNUM_V_NUM_MASK GENMASK(6, 0)
+#define ISP_AE_HV_BLKNUM_V_NUM(x) ((x) << 0)
+#define ISP_AE_HV_BLKNUM_H_NUM_MASK GENMASK(22, 16)
+#define ISP_AE_HV_BLKNUM_H_NUM(x) ((x) << 16)
+
+#define ISP_AE_IDX_ADDR 0xa600
+#define ISP_AE_IDX_DATA 0xa604
+#define ISP_AE_IDX_DATA_VIDX_DATA(x) (((x) & GENMASK(15, 0)) << 0)
+#define ISP_AE_IDX_DATA_HIDX_DATA(x) (((x) & GENMASK(15, 0)) << 16)
+
+#define ISP_AE_BLK_WT_ADDR 0xa608
+#define ISP_AE_BLK_WT_DATA 0xa60c
+#define ISP_AE_BLK_WT_DATA_WT(i, x) (((x) & GENMASK(3, 0)) << ((i) * 4))
+
+#define ISP_AWB_CTRL 0xa834
+#define ISP_AWB_CTRL_VERT_OFST_MASK GENMASK(1, 0)
+#define ISP_AWB_CTRL_VERT_OFST(x) ((x) << 0)
+#define ISP_AWB_CTRL_HORIZ_OFST_MASK GENMASK(3, 2)
+#define ISP_AWB_CTRL_HORIZ_OFST(x) ((x) << 2)
+
+#define ISP_AWB_HV_SIZE 0xa83c
+#define ISP_AWB_HV_SIZE_VERT_SIZE_MASK GENMASK(15, 0)
+#define ISP_AWB_HV_SIZE_VERT_SIZE(x) ((x) << 0)
+#define ISP_AWB_HV_SIZE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_AWB_HV_SIZE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_AWB_HV_BLKNUM 0xa840
+#define ISP_AWB_HV_BLKNUM_V_NUM_MASK GENMASK(5, 0)
+#define ISP_AWB_HV_BLKNUM_V_NUM(x) ((x) << 0)
+#define ISP_AWB_HV_BLKNUM_H_NUM_MASK GENMASK(21, 16)
+#define ISP_AWB_HV_BLKNUM_H_NUM(x) ((x) << 16)
+
+#define ISP_AWB_STAT_RG 0xa848
+#define ISP_AWB_STAT_RG_MIN_VALUE_MASK GENMASK(11, 0)
+#define ISP_AWB_STAT_RG_MIN_VALUE(x) ((x) << 0)
+#define ISP_AWB_STAT_RG_MAX_VALUE_MASK GENMASK(27, 16)
+#define ISP_AWB_STAT_RG_MAX_VALUE(x) ((x) << 16)
+
+#define ISP_AWB_STAT_BG 0xa84c
+#define ISP_AWB_STAT_BG_MIN_VALUE_MASK GENMASK(11, 0)
+#define ISP_AWB_STAT_BG_MIN_VALUE(x) ((x) << 0)
+#define ISP_AWB_STAT_BG_MAX_VALUE_MASK GENMASK(27, 16)
+#define ISP_AWB_STAT_BG_MAX_VALUE(x) ((x) << 16)
+
+#define ISP_AWB_STAT_RG_HL 0xa850
+#define ISP_AWB_STAT_RG_HL_LOW_VALUE_MASK GENMASK(11, 0)
+#define ISP_AWB_STAT_RG_HL_LOW_VALUE(x) ((x) << 0)
+#define ISP_AWB_STAT_RG_HL_HIGH_VALUE_MASK GENMASK(27, 16)
+#define ISP_AWB_STAT_RG_HL_HIGH_VALUE(x) ((x) << 16)
+
+#define ISP_AWB_STAT_BG_HL 0xa854
+#define ISP_AWB_STAT_BG_HL_LOW_VALUE_MASK GENMASK(11, 0)
+#define ISP_AWB_STAT_BG_HL_LOW_VALUE(x) ((x) << 0)
+#define ISP_AWB_STAT_BG_HL_HIGH_VALUE_MASK GENMASK(27, 16)
+#define ISP_AWB_STAT_BG_HL_HIGH_VALUE(x) ((x) << 16)
+
+#define ISP_AWB_STAT_CTRL2 0xa858
+#define ISP_AWB_STAT_CTRL2_SATUR_CTRL_MASK BIT(0)
+#define ISP_AWB_STAT_CTRL2_SATUR_CTRL(x) ((x) << 0)
+
+#define ISP_AWB_IDX_ADDR 0xaa00
+#define ISP_AWB_IDX_DATA 0xaa04
+#define ISP_AWB_IDX_DATA_VIDX_DATA(x) (((x) & GENMASK(15, 0)) << 0)
+#define ISP_AWB_IDX_DATA_HIDX_DATA(x) (((x) & GENMASK(15, 0)) << 16)
+
+#define ISP_AWB_BLK_WT_ADDR 0xaa08
+#define ISP_AWB_BLK_WT_DATA 0xaa0c
+#define ISP_AWB_BLK_WT_DATA_WT(i, x) (((x) & GENMASK(3, 0)) << ((i) * 4))
+
+#define ISP_WRMIFX3_0_CH0_CTRL0 0xc400
+#define ISP_WRMIFX3_0_CH0_CTRL0_STRIDE_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_CH0_CTRL0_STRIDE(x) ((x) << 16)
+
+#define ISP_WRMIFX3_0_CH0_CTRL1 0xc404
+#define ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_MODE_MASK GENMASK(30, 27)
+#define ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS BIT(27)
+#define ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS (2 << 27)
+
+#define ISP_WRMIFX3_0_CH1_CTRL0 0xc408
+#define ISP_WRMIFX3_0_CH1_CTRL0_STRIDE_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_CH1_CTRL0_STRIDE(x) ((x) << 16)
+
+#define ISP_WRMIFX3_0_CH1_CTRL1 0xc40c
+#define ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_MODE_MASK GENMASK(30, 27)
+#define ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_8BITS BIT(27)
+#define ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_16BITS (2 << 27)
+#define ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_32BITS (3 << 27)
+
+#define ISP_WRMIFX3_0_WIN_LUMA_H 0xc420
+#define ISP_WRMIFX3_0_WIN_LUMA_H_LUMA_HEND_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_WIN_LUMA_H_LUMA_HEND(x) (((x) - 1) << 16)
+
+#define ISP_WRMIFX3_0_WIN_LUMA_V 0xc424
+#define ISP_WRMIFX3_0_WIN_LUMA_V_LUMA_VEND_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_WIN_LUMA_V_LUMA_VEND(x) (((x) - 1) << 16)
+
+#define ISP_WRMIFX3_0_WIN_CHROM_H 0xc428
+#define ISP_WRMIFX3_0_WIN_CHROM_H_CHROM_HEND_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_WIN_CHROM_H_CHROM_HEND(x) (((x) - 1) << 16)
+
+#define ISP_WRMIFX3_0_WIN_CHROM_V 0xc42c
+#define ISP_WRMIFX3_0_WIN_CHROM_V_CHROM_VEND_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_WIN_CHROM_V_CHROM_VEND(x) (((x) - 1) << 16)
+
+#define ISP_WRMIFX3_0_CH0_BADDR 0xc440
+#define ISP_WRMIFX3_0_CH0_BASE_ADDR(x) ((x) >> 4)
+
+#define ISP_WRMIFX3_0_CH1_BADDR 0xc444
+#define ISP_WRMIFX3_0_CH1_BASE_ADDR(x) ((x) >> 4)
+
+#define ISP_WRMIFX3_0_FMT_SIZE 0xc464
+#define ISP_WRMIFX3_0_FMT_SIZE_HSIZE_MASK GENMASK(15, 0)
+#define ISP_WRMIFX3_0_FMT_SIZE_HSIZE(x) ((x) << 0)
+#define ISP_WRMIFX3_0_FMT_SIZE_VSIZE_MASK GENMASK(31, 16)
+#define ISP_WRMIFX3_0_FMT_SIZE_VSIZE(x) ((x) << 16)
+
+#define ISP_WRMIFX3_0_FMT_CTRL 0xc468
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_MASK GENMASK(1, 0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT (0 << 0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_10BIT BIT(0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_12BIT (2 << 0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT (3 << 0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_MASK BIT(2)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU (0 << 2)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_UV BIT(2)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_MASK GENMASK(5, 4)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1 (0 << 4)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2 BIT(4)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_MASK GENMASK(18, 16)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV422 BIT(16)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV420 (2 << 16)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_Y_ONLY (3 << 16)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW (4 << 16)
+
+#define VIU_DMAWR_BADDR0 0xc840
+#define VIU_DMAWR_BADDR0_AF_STATS_BASE_ADDR_MASK GENMASK(27, 0)
+#define VIU_DMAWR_BADDR0_AF_STATS_BASE_ADDR(x) ((x) >> 4)
+
+#define VIU_DMAWR_BADDR1 0xc844
+#define VIU_DMAWR_BADDR1_AWB_STATS_BASE_ADDR_MASK GENMASK(27, 0)
+#define VIU_DMAWR_BADDR1_AWB_STATS_BASE_ADDR(x) ((x) >> 4)
+
+#define VIU_DMAWR_BADDR2 0xc848
+#define VIU_DMAWR_BADDR2_AE_STATS_BASE_ADDR_MASK GENMASK(27, 0)
+#define VIU_DMAWR_BADDR2_AE_STATS_BASE_ADDR(x) ((x) >> 4)
+
+#define VIU_DMAWR_SIZE0 0xc854
+#define VIU_DMAWR_SIZE0_AF_STATS_SIZE_MASK GENMASK(15, 0)
+#define VIU_DMAWR_SIZE0_AF_STATS_SIZE(x) ((x) << 0)
+#define VIU_DMAWR_SIZE0_AWB_STATS_SIZE_MASK GENMASK(31, 16)
+#define VIU_DMAWR_SIZE0_AWB_STATS_SIZE(x) ((x) << 16)
+
+#define VIU_DMAWR_SIZE1 0xc858
+#define VIU_DMAWR_SIZE1_AE_STATS_SIZE_MASK GENMASK(15, 0)
+#define VIU_DMAWR_SIZE1_AE_STATS_SIZE(x) ((x) << 0)
+
+#endif
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c
new file mode 100644
index 000000000000..453a889e0b27
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c
@@ -0,0 +1,892 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+#define C3_ISP_RSZ_DEF_PAD_FMT MEDIA_BUS_FMT_YUV10_1X30
+#define C3_ISP_DISP_REG(base, id) ((base) + (id) * 0x400)
+#define C3_ISP_PPS_LUT_H_NUM 33
+#define C3_ISP_PPS_LUT_CTYPE_0 0
+#define C3_ISP_PPS_LUT_CTYPE_2 2
+#define C3_ISP_SCL_EN 1
+#define C3_ISP_SCL_DIS 0
+
+/*
+ * struct c3_isp_rsz_format_info - ISP resizer format information
+ *
+ * @mbus_code: the mbus code
+ * @pads: bitmask detailing valid pads for this mbus_code
+ * @is_raw: the raw format flag of mbus code
+ */
+struct c3_isp_rsz_format_info {
+ u32 mbus_code;
+ u32 pads;
+ bool is_raw;
+};
+
+static const struct c3_isp_rsz_format_info c3_isp_rsz_fmts[] = {
+ /* RAW formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = true,
+ },
+ /* YUV formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = false,
+ },
+};
+
+/*
+ * struct c3_isp_pps_io_size - ISP scaler input and output size
+ *
+ * @thsize: input horizontal size of after preprocessing
+ * @tvsize: input vertical size of after preprocessing
+ * @ohsize: output horizontal size
+ * @ovsize: output vertical size
+ * @ihsize: input horizontal size
+ * @max_hsize: maximum horizontal size
+ */
+struct c3_isp_pps_io_size {
+ u32 thsize;
+ u32 tvsize;
+ u32 ohsize;
+ u32 ovsize;
+ u32 ihsize;
+ u32 max_hsize;
+};
+
+/* The normal parameters of pps module */
+static const int c3_isp_pps_lut[C3_ISP_PPS_LUT_H_NUM][4] = {
+ { 0, 511, 0, 0}, { -5, 511, 5, 0}, {-10, 511, 11, 0},
+ {-14, 510, 17, -1}, {-18, 508, 23, -1}, {-22, 506, 29, -1},
+ {-25, 503, 36, -2}, {-28, 500, 43, -3}, {-32, 496, 51, -3},
+ {-34, 491, 59, -4}, {-37, 487, 67, -5}, {-39, 482, 75, -6},
+ {-41, 476, 84, -7}, {-42, 470, 92, -8}, {-44, 463, 102, -9},
+ {-45, 456, 111, -10}, {-45, 449, 120, -12}, {-47, 442, 130, -13},
+ {-47, 434, 140, -15}, {-47, 425, 151, -17}, {-47, 416, 161, -18},
+ {-47, 407, 172, -20}, {-47, 398, 182, -21}, {-47, 389, 193, -23},
+ {-46, 379, 204, -25}, {-45, 369, 215, -27}, {-44, 358, 226, -28},
+ {-43, 348, 237, -30}, {-43, 337, 249, -31}, {-41, 326, 260, -33},
+ {-40, 316, 271, -35}, {-39, 305, 282, -36}, {-37, 293, 293, -37}
+};
+
+static const struct c3_isp_rsz_format_info
+*rsz_find_format_by_code(u32 code, u32 pad)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_isp_rsz_fmts); i++) {
+ const struct c3_isp_rsz_format_info *info = &c3_isp_rsz_fmts[i];
+
+ if (info->mbus_code == code && info->pads & BIT(pad))
+ return info;
+ }
+
+ return NULL;
+}
+
+static const struct c3_isp_rsz_format_info
+*rsz_find_format_by_index(u32 index, u32 pad)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_isp_rsz_fmts); i++) {
+ const struct c3_isp_rsz_format_info *info = &c3_isp_rsz_fmts[i];
+
+ if (!(info->pads & BIT(pad)))
+ continue;
+
+ if (!index)
+ return info;
+
+ index--;
+ }
+
+ return NULL;
+}
+
+static void c3_isp_rsz_pps_size(struct c3_isp_resizer *rsz,
+ struct c3_isp_pps_io_size *io_size)
+{
+ int thsize = io_size->thsize;
+ int tvsize = io_size->tvsize;
+ u32 ohsize = io_size->ohsize;
+ u32 ovsize = io_size->ovsize;
+ u32 ihsize = io_size->ihsize;
+ u32 max_hsize = io_size->max_hsize;
+ int h_int;
+ int v_int;
+ int h_fract;
+ int v_fract;
+ int yuv444to422_en;
+
+ /* Calculate the integer part of horizonal scaler step */
+ h_int = thsize / ohsize;
+
+ /* Calculate the vertical part of horizonal scaler step */
+ v_int = tvsize / ovsize;
+
+ /*
+ * Calculate the fraction part of horizonal scaler step.
+ * step_h_fraction = (source / dest) * 2^24,
+ * so step_h_fraction = ((source << 12) / dest) << 12.
+ */
+ h_fract = ((thsize << 12) / ohsize) << 12;
+
+ /*
+ * Calculate the fraction part of vertical scaler step
+ * step_v_fraction = (source / dest) * 2^24,
+ * so step_v_fraction = ((source << 12) / dest) << 12.
+ */
+ v_fract = ((tvsize << 12) / ovsize) << 12;
+
+ yuv444to422_en = ihsize > (max_hsize / 2) ? 1 : 0;
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_444TO422, rsz->id),
+ DISP0_PPS_444TO422_EN_MASK,
+ DISP0_PPS_444TO422_EN(yuv444to422_en));
+
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_VSC_START_PHASE_STEP, rsz->id),
+ DISP0_PPS_VSC_START_PHASE_STEP_VERT_FRAC(v_fract) |
+ DISP0_PPS_VSC_START_PHASE_STEP_VERT_INTE(v_int));
+
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_HSC_START_PHASE_STEP, rsz->id),
+ DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_FRAC(h_fract) |
+ DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_INTE(h_int));
+}
+
+static void c3_isp_rsz_pps_lut(struct c3_isp_resizer *rsz, u32 ctype)
+{
+ unsigned int i;
+
+ /*
+ * Default value of this register is 0, so only need to set
+ * SCALE_LUMA_COEF_S11_MODE and SCALE_LUMA_CTYPE. This register needs
+ * to be written in one time.
+ */
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_IDX_LUMA, rsz->id),
+ ISP_SCALE0_COEF_IDX_LUMA_COEF_S11_MODE_EN |
+ ISP_SCALE0_COEF_IDX_LUMA_CTYPE(ctype));
+
+ for (i = 0; i < C3_ISP_PPS_LUT_H_NUM; i++) {
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_LUMA, rsz->id),
+ ISP_SCALE0_COEF_LUMA_DATA0(c3_isp_pps_lut[i][0]) |
+ ISP_SCALE0_COEF_LUMA_DATA1(c3_isp_pps_lut[i][1]));
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_LUMA, rsz->id),
+ ISP_SCALE0_COEF_LUMA_DATA0(c3_isp_pps_lut[i][2]) |
+ ISP_SCALE0_COEF_LUMA_DATA1(c3_isp_pps_lut[i][3]));
+ }
+
+ /*
+ * Default value of this register is 0, so only need to set
+ * SCALE_CHRO_COEF_S11_MODE and SCALE_CHRO_CTYPE. This register needs
+ * to be written in one time.
+ */
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_IDX_CHRO, rsz->id),
+ ISP_SCALE0_COEF_IDX_CHRO_COEF_S11_MODE_EN |
+ ISP_SCALE0_COEF_IDX_CHRO_CTYPE(ctype));
+
+ for (i = 0; i < C3_ISP_PPS_LUT_H_NUM; i++) {
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_CHRO, rsz->id),
+ ISP_SCALE0_COEF_CHRO_DATA0(c3_isp_pps_lut[i][0]) |
+ ISP_SCALE0_COEF_CHRO_DATA1(c3_isp_pps_lut[i][1]));
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_CHRO, rsz->id),
+ ISP_SCALE0_COEF_CHRO_DATA0(c3_isp_pps_lut[i][2]) |
+ ISP_SCALE0_COEF_CHRO_DATA1(c3_isp_pps_lut[i][3]));
+ }
+}
+
+static void c3_isp_rsz_pps_disable(struct c3_isp_resizer *rsz)
+{
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_HSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_HSC_DIS);
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_VSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_VSC_DIS);
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREVSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_PREVSC_DIS);
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREHSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_PREHSC_DIS);
+}
+
+static int c3_isp_rsz_pps_enable(struct c3_isp_resizer *rsz,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_rect *crop;
+ struct v4l2_rect *cmps;
+ int max_hsize;
+ int hsc_en, vsc_en;
+ int preh_en, prev_en;
+ u32 prehsc_rate;
+ u32 prevsc_flt_num;
+ int pre_vscale_max_hsize;
+ u32 ihsize_after_pre_hsc;
+ u32 ihsize_after_pre_hsc_alt;
+ u32 vsc_tap_num_alt;
+ u32 ihsize;
+ u32 ivsize;
+ struct c3_isp_pps_io_size io_size;
+
+ crop = v4l2_subdev_state_get_crop(state, C3_ISP_RSZ_PAD_SINK);
+ cmps = v4l2_subdev_state_get_compose(state, C3_ISP_RSZ_PAD_SINK);
+
+ ihsize = crop->width;
+ ivsize = crop->height;
+
+ hsc_en = (ihsize == cmps->width) ? C3_ISP_SCL_DIS : C3_ISP_SCL_EN;
+ vsc_en = (ivsize == cmps->height) ? C3_ISP_SCL_DIS : C3_ISP_SCL_EN;
+
+ /* Disable pps when there no need to use pps */
+ if (!hsc_en && !vsc_en) {
+ c3_isp_rsz_pps_disable(rsz);
+ return 0;
+ }
+
+ /* Pre-scale needs to be enable if the down scaling factor exceeds 4 */
+ preh_en = (ihsize > cmps->width * 4) ? C3_ISP_SCL_EN : C3_ISP_SCL_DIS;
+ prev_en = (ivsize > cmps->height * 4) ? C3_ISP_SCL_EN : C3_ISP_SCL_DIS;
+
+ if (rsz->id == C3_ISP_RSZ_2) {
+ max_hsize = C3_ISP_MAX_WIDTH;
+
+ /* Set vertical tap number */
+ prevsc_flt_num = 4;
+
+ /* Set the max hsize of pre-vertical scale */
+ pre_vscale_max_hsize = max_hsize / 2;
+ } else {
+ max_hsize = C3_ISP_DEFAULT_WIDTH;
+
+ /* Set vertical tap number and the max hsize of pre-vertical */
+ if (ihsize > (max_hsize / 2) &&
+ ihsize <= max_hsize && prev_en) {
+ prevsc_flt_num = 2;
+ pre_vscale_max_hsize = max_hsize;
+ } else {
+ prevsc_flt_num = 4;
+ pre_vscale_max_hsize = max_hsize / 2;
+ }
+ }
+
+ /*
+ * Set pre-horizonal scale rate and the hsize of after
+ * pre-horizonal scale.
+ */
+ if (preh_en) {
+ prehsc_rate = 1;
+ ihsize_after_pre_hsc = DIV_ROUND_UP(ihsize, 2);
+ } else {
+ prehsc_rate = 0;
+ ihsize_after_pre_hsc = ihsize;
+ }
+
+ /* Change pre-horizonal scale rate */
+ if (prev_en && ihsize_after_pre_hsc >= pre_vscale_max_hsize)
+ prehsc_rate += 1;
+
+ /* Set the actual hsize of after pre-horizonal scale */
+ if (preh_en)
+ ihsize_after_pre_hsc_alt =
+ DIV_ROUND_UP(ihsize, 1 << prehsc_rate);
+ else
+ ihsize_after_pre_hsc_alt = ihsize;
+
+ /* Set vertical scaler bank length */
+ if (ihsize_after_pre_hsc_alt <= (max_hsize / 2))
+ vsc_tap_num_alt = 4;
+ else if (ihsize_after_pre_hsc_alt <= max_hsize)
+ vsc_tap_num_alt = prev_en ? 2 : 4;
+ else
+ vsc_tap_num_alt = prev_en ? 4 : 2;
+
+ io_size.thsize = ihsize_after_pre_hsc_alt;
+ io_size.tvsize = prev_en ? DIV_ROUND_UP(ivsize, 2) : ivsize;
+ io_size.ohsize = cmps->width;
+ io_size.ovsize = cmps->height;
+ io_size.ihsize = ihsize;
+ io_size.max_hsize = max_hsize;
+
+ c3_isp_rsz_pps_size(rsz, &io_size);
+ c3_isp_rsz_pps_lut(rsz, C3_ISP_PPS_LUT_CTYPE_0);
+ c3_isp_rsz_pps_lut(rsz, C3_ISP_PPS_LUT_CTYPE_2);
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_VSC_TAP_NUM_MASK,
+ DISP0_PPS_SCALE_EN_VSC_TAP_NUM(vsc_tap_num_alt));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREVSC_FLT_NUM_MASK,
+ DISP0_PPS_SCALE_EN_PREVSC_FLT_NUM(prevsc_flt_num));
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREVSC_RATE_MASK,
+ DISP0_PPS_SCALE_EN_PREVSC_RATE(prev_en));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREHSC_RATE_MASK,
+ DISP0_PPS_SCALE_EN_PREHSC_RATE(prehsc_rate));
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_HSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_HSC_EN(hsc_en));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_VSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_VSC_EN(vsc_en));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREVSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_PREVSC_EN(prev_en));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREHSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_PREHSC_EN(preh_en));
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_HSC_NOR_RS_BITS_MASK,
+ DISP0_PPS_SCALE_EN_HSC_NOR_RS_BITS(9));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_VSC_NOR_RS_BITS_MASK,
+ DISP0_PPS_SCALE_EN_VSC_NOR_RS_BITS(9));
+
+ return 0;
+}
+
+static void c3_isp_rsz_start(struct c3_isp_resizer *rsz,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ const struct c3_isp_rsz_format_info *rsz_fmt;
+ struct v4l2_rect *sink_crop;
+ u32 mask;
+ u32 val;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SINK);
+ sink_crop = v4l2_subdev_state_get_crop(state, C3_ISP_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SOURCE);
+ rsz_fmt = rsz_find_format_by_code(sink_fmt->code, C3_ISP_RSZ_PAD_SINK);
+
+ if (rsz->id == C3_ISP_RSZ_0) {
+ mask = ISP_TOP_DISPIN_SEL_DISP0_MASK;
+ val = rsz_fmt->is_raw ? ISP_TOP_DISPIN_SEL_DISP0_MIPI_OUT
+ : ISP_TOP_DISPIN_SEL_DISP0_CORE_OUT;
+ } else if (rsz->id == C3_ISP_RSZ_1) {
+ mask = ISP_TOP_DISPIN_SEL_DISP1_MASK;
+ val = rsz_fmt->is_raw ? ISP_TOP_DISPIN_SEL_DISP1_MIPI_OUT
+ : ISP_TOP_DISPIN_SEL_DISP1_CORE_OUT;
+ } else {
+ mask = ISP_TOP_DISPIN_SEL_DISP2_MASK;
+ val = rsz_fmt->is_raw ? ISP_TOP_DISPIN_SEL_DISP2_MIPI_OUT
+ : ISP_TOP_DISPIN_SEL_DISP2_CORE_OUT;
+ }
+
+ c3_isp_update_bits(rsz->isp, ISP_TOP_DISPIN_SEL, mask, val);
+
+ c3_isp_write(rsz->isp, C3_ISP_DISP_REG(ISP_DISP0_TOP_IN_SIZE, rsz->id),
+ ISP_DISP0_TOP_IN_SIZE_HSIZE(sink_fmt->width) |
+ ISP_DISP0_TOP_IN_SIZE_VSIZE(sink_fmt->height));
+
+ c3_isp_write(rsz->isp, C3_ISP_DISP_REG(DISP0_TOP_CRP2_START, rsz->id),
+ DISP0_TOP_CRP2_START_V_START(sink_crop->top) |
+ DISP0_TOP_CRP2_START_H_START(sink_crop->left));
+
+ c3_isp_write(rsz->isp, C3_ISP_DISP_REG(DISP0_TOP_CRP2_SIZE, rsz->id),
+ DISP0_TOP_CRP2_SIZE_V_SIZE(sink_crop->height) |
+ DISP0_TOP_CRP2_SIZE_H_SIZE(sink_crop->width));
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_TOP_TOP_CTRL, rsz->id),
+ DISP0_TOP_TOP_CTRL_CROP2_EN_MASK,
+ DISP0_TOP_TOP_CTRL_CROP2_EN);
+
+ if (!rsz_fmt->is_raw)
+ c3_isp_rsz_pps_enable(rsz, state);
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_TOP_OUT_SIZE, rsz->id),
+ DISP0_TOP_OUT_SIZE_SCL_OUT_HEIGHT_MASK,
+ DISP0_TOP_OUT_SIZE_SCL_OUT_HEIGHT(src_fmt->height));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_TOP_OUT_SIZE, rsz->id),
+ DISP0_TOP_OUT_SIZE_SCL_OUT_WIDTH_MASK,
+ DISP0_TOP_OUT_SIZE_SCL_OUT_WIDTH(src_fmt->width));
+
+ if (rsz->id == C3_ISP_RSZ_0) {
+ mask = ISP_TOP_PATH_EN_DISP0_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP0_EN;
+ } else if (rsz->id == C3_ISP_RSZ_1) {
+ mask = ISP_TOP_PATH_EN_DISP1_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP1_EN;
+ } else {
+ mask = ISP_TOP_PATH_EN_DISP2_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP2_EN;
+ }
+
+ c3_isp_update_bits(rsz->isp, ISP_TOP_PATH_EN, mask, val);
+}
+
+static void c3_isp_rsz_stop(struct c3_isp_resizer *rsz)
+{
+ u32 mask;
+ u32 val;
+
+ if (rsz->id == C3_ISP_RSZ_0) {
+ mask = ISP_TOP_PATH_EN_DISP0_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP0_DIS;
+ } else if (rsz->id == C3_ISP_RSZ_1) {
+ mask = ISP_TOP_PATH_EN_DISP1_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP1_DIS;
+ } else {
+ mask = ISP_TOP_PATH_EN_DISP2_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP2_DIS;
+ }
+
+ c3_isp_update_bits(rsz->isp, ISP_TOP_PATH_EN, mask, val);
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_TOP_TOP_CTRL, rsz->id),
+ DISP0_TOP_TOP_CTRL_CROP2_EN_MASK,
+ DISP0_TOP_TOP_CTRL_CROP2_DIS);
+
+ c3_isp_rsz_pps_disable(rsz);
+}
+
+static int c3_isp_rsz_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_isp_resizer *rsz = v4l2_get_subdevdata(sd);
+
+ c3_isp_rsz_start(rsz, state);
+
+ c3_isp_params_pre_cfg(rsz->isp);
+ c3_isp_stats_pre_cfg(rsz->isp);
+
+ return v4l2_subdev_enable_streams(rsz->src_sd, rsz->src_pad, BIT(0));
+}
+
+static int c3_isp_rsz_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_isp_resizer *rsz = v4l2_get_subdevdata(sd);
+
+ c3_isp_rsz_stop(rsz);
+
+ return v4l2_subdev_disable_streams(rsz->src_sd, rsz->src_pad, BIT(0));
+}
+
+static int c3_isp_rsz_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct c3_isp_rsz_format_info *info;
+
+ info = rsz_find_format_by_index(code->index, code->pad);
+ if (!info)
+ return -EINVAL;
+
+ code->code = info->mbus_code;
+
+ return 0;
+}
+
+static void c3_isp_rsz_set_sink_fmt(struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_rect *sink_crop;
+ struct v4l2_rect *sink_cmps;
+ const struct c3_isp_rsz_format_info *rsz_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, format->pad);
+ sink_crop = v4l2_subdev_state_get_crop(state, format->pad);
+ sink_cmps = v4l2_subdev_state_get_compose(state, format->pad);
+ src_fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SOURCE);
+
+ rsz_fmt = rsz_find_format_by_code(format->format.code, format->pad);
+ if (rsz_fmt)
+ sink_fmt->code = format->format.code;
+ else
+ sink_fmt->code = C3_ISP_RSZ_DEF_PAD_FMT;
+
+ sink_fmt->width = clamp_t(u32, format->format.width,
+ C3_ISP_MIN_WIDTH, C3_ISP_MAX_WIDTH);
+ sink_fmt->height = clamp_t(u32, format->format.height,
+ C3_ISP_MIN_HEIGHT, C3_ISP_MAX_HEIGHT);
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (rsz_fmt && rsz_fmt->is_raw) {
+ sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ } else {
+ sink_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ }
+
+ sink_crop->width = sink_fmt->width;
+ sink_crop->height = sink_fmt->height;
+ sink_crop->left = 0;
+ sink_crop->top = 0;
+
+ sink_cmps->width = sink_crop->width;
+ sink_cmps->height = sink_crop->height;
+ sink_cmps->left = 0;
+ sink_cmps->top = 0;
+
+ src_fmt->code = sink_fmt->code;
+ src_fmt->width = sink_cmps->width;
+ src_fmt->height = sink_cmps->height;
+
+ format->format = *sink_fmt;
+}
+
+static void c3_isp_rsz_set_source_fmt(struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *sink_cmps;
+ const struct c3_isp_rsz_format_info *rsz_fmt;
+
+ src_fmt = v4l2_subdev_state_get_format(state, format->pad);
+ sink_fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SINK);
+ sink_cmps = v4l2_subdev_state_get_compose(state, C3_ISP_RSZ_PAD_SINK);
+
+ src_fmt->code = sink_fmt->code;
+ src_fmt->width = sink_cmps->width;
+ src_fmt->height = sink_cmps->height;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ rsz_fmt = rsz_find_format_by_code(src_fmt->code, format->pad);
+ if (rsz_fmt->is_raw) {
+ src_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ src_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ } else {
+ src_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ src_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ }
+
+ format->format = *src_fmt;
+}
+
+static int c3_isp_rsz_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ if (format->pad == C3_ISP_RSZ_PAD_SINK)
+ c3_isp_rsz_set_sink_fmt(state, format);
+ else
+ c3_isp_rsz_set_source_fmt(state, format);
+
+ return 0;
+}
+
+static int c3_isp_rsz_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+ struct v4l2_rect *cmps;
+
+ if (sel->pad == C3_ISP_RSZ_PAD_SOURCE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ fmt = v4l2_subdev_state_get_format(state, sel->pad);
+ sel->r.width = fmt->width;
+ sel->r.height = fmt->height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
+ sel->r.width = crop->width;
+ sel->r.height = crop->height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
+ sel->r = *crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ cmps = v4l2_subdev_state_get_compose(state, sel->pad);
+ sel->r = *cmps;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_isp_rsz_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_rect *crop;
+ struct v4l2_rect *cmps;
+
+ if (sel->pad == C3_ISP_RSZ_PAD_SOURCE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ fmt = v4l2_subdev_state_get_format(state, sel->pad);
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
+ cmps = v4l2_subdev_state_get_compose(state, sel->pad);
+ src_fmt = v4l2_subdev_state_get_format(state,
+ C3_ISP_RSZ_PAD_SOURCE);
+
+ sel->r.left = clamp_t(s32, sel->r.left, 0, fmt->width - 1);
+ sel->r.top = clamp_t(s32, sel->r.top, 0, fmt->height - 1);
+ sel->r.width = clamp(sel->r.width, C3_ISP_MIN_WIDTH,
+ fmt->width - sel->r.left);
+ sel->r.height = clamp(sel->r.height, C3_ISP_MIN_HEIGHT,
+ fmt->height - sel->r.top);
+
+ crop->width = ALIGN(sel->r.width, 2);
+ crop->height = ALIGN(sel->r.height, 2);
+ crop->left = sel->r.left;
+ crop->top = sel->r.top;
+
+ *cmps = *crop;
+
+ src_fmt->code = fmt->code;
+ src_fmt->width = cmps->width;
+ src_fmt->height = cmps->height;
+
+ sel->r = *crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
+ cmps = v4l2_subdev_state_get_compose(state, sel->pad);
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = clamp(sel->r.width, C3_ISP_MIN_WIDTH,
+ crop->width);
+ sel->r.height = clamp(sel->r.height, C3_ISP_MIN_HEIGHT,
+ crop->height);
+
+ cmps->width = ALIGN(sel->r.width, 2);
+ cmps->height = ALIGN(sel->r.height, 2);
+ cmps->left = sel->r.left;
+ cmps->top = sel->r.top;
+
+ sel->r = *cmps;
+
+ fmt = v4l2_subdev_state_get_format(state,
+ C3_ISP_RSZ_PAD_SOURCE);
+ fmt->width = cmps->width;
+ fmt->height = cmps->height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_isp_rsz_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+ struct v4l2_rect *cmps;
+
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SINK);
+ fmt->width = C3_ISP_DEFAULT_WIDTH;
+ fmt->height = C3_ISP_DEFAULT_HEIGHT;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = C3_ISP_RSZ_DEF_PAD_FMT;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+
+ crop = v4l2_subdev_state_get_crop(state, C3_ISP_RSZ_PAD_SINK);
+ crop->width = C3_ISP_DEFAULT_WIDTH;
+ crop->height = C3_ISP_DEFAULT_HEIGHT;
+ crop->left = 0;
+ crop->top = 0;
+
+ cmps = v4l2_subdev_state_get_compose(state, C3_ISP_RSZ_PAD_SINK);
+ cmps->width = C3_ISP_DEFAULT_WIDTH;
+ cmps->height = C3_ISP_DEFAULT_HEIGHT;
+ cmps->left = 0;
+ cmps->top = 0;
+
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SOURCE);
+ fmt->width = cmps->width;
+ fmt->height = cmps->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = C3_ISP_RSZ_DEF_PAD_FMT;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops c3_isp_rsz_pad_ops = {
+ .enum_mbus_code = c3_isp_rsz_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = c3_isp_rsz_set_fmt,
+ .get_selection = c3_isp_rsz_get_selection,
+ .set_selection = c3_isp_rsz_set_selection,
+ .enable_streams = c3_isp_rsz_enable_streams,
+ .disable_streams = c3_isp_rsz_disable_streams,
+};
+
+static const struct v4l2_subdev_ops c3_isp_rsz_subdev_ops = {
+ .pad = &c3_isp_rsz_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops c3_isp_rsz_internal_ops = {
+ .init_state = c3_isp_rsz_init_state,
+};
+
+/* Media entity operations */
+static const struct media_entity_operations c3_isp_rsz_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int c3_isp_rsz_register(struct c3_isp_resizer *rsz)
+{
+ struct v4l2_subdev *sd = &rsz->sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &c3_isp_rsz_subdev_ops);
+ sd->owner = THIS_MODULE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &c3_isp_rsz_internal_ops;
+ snprintf(sd->name, sizeof(sd->name), "c3-isp-resizer%u", rsz->id);
+
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+ sd->entity.ops = &c3_isp_rsz_entity_ops;
+
+ sd->dev = rsz->isp->dev;
+ v4l2_set_subdevdata(sd, rsz);
+
+ rsz->pads[C3_ISP_RSZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ rsz->pads[C3_ISP_RSZ_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, C3_ISP_RSZ_PAD_MAX,
+ rsz->pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = v4l2_device_register_subdev(&rsz->isp->v4l2_dev, sd);
+ if (ret)
+ goto err_subdev_cleanup;
+
+ return 0;
+
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+static void c3_isp_rsz_unregister(struct c3_isp_resizer *rsz)
+{
+ struct v4l2_subdev *sd = &rsz->sd;
+
+ v4l2_device_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+}
+
+int c3_isp_resizers_register(struct c3_isp_device *isp)
+{
+ int ret;
+
+ for (unsigned int i = C3_ISP_RSZ_0; i < C3_ISP_NUM_RSZ; i++) {
+ struct c3_isp_resizer *rsz = &isp->resizers[i];
+
+ rsz->id = i;
+ rsz->isp = isp;
+ rsz->src_sd = &isp->core.sd;
+ rsz->src_pad = C3_ISP_CORE_PAD_SOURCE_VIDEO_0 + i;
+
+ ret = c3_isp_rsz_register(rsz);
+ if (ret) {
+ rsz->isp = NULL;
+ c3_isp_resizers_unregister(isp);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void c3_isp_resizers_unregister(struct c3_isp_device *isp)
+{
+ for (unsigned int i = C3_ISP_RSZ_0; i < C3_ISP_NUM_RSZ; i++) {
+ struct c3_isp_resizer *rsz = &isp->resizers[i];
+
+ if (rsz->isp)
+ c3_isp_rsz_unregister(rsz);
+ }
+}
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c
new file mode 100644
index 000000000000..8a5d7e1a30c9
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/cleanup.h>
+#include <linux/media/amlogic/c3-isp-config.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+/* Hardware configuration */
+
+static void c3_isp_stats_cfg_dmawr_addr(struct c3_isp_stats *stats)
+{
+ u32 awb_dma_size = sizeof(struct c3_isp_awb_stats);
+ u32 ae_dma_size = sizeof(struct c3_isp_ae_stats);
+ u32 awb_dma_addr = stats->buff->dma_addr;
+ u32 af_dma_addr;
+ u32 ae_dma_addr;
+
+ ae_dma_addr = awb_dma_addr + awb_dma_size;
+ af_dma_addr = ae_dma_addr + ae_dma_size;
+
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_BADDR0,
+ VIU_DMAWR_BADDR0_AF_STATS_BASE_ADDR_MASK,
+ VIU_DMAWR_BADDR0_AF_STATS_BASE_ADDR(af_dma_addr));
+
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_BADDR1,
+ VIU_DMAWR_BADDR1_AWB_STATS_BASE_ADDR_MASK,
+ VIU_DMAWR_BADDR1_AWB_STATS_BASE_ADDR(awb_dma_addr));
+
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_BADDR2,
+ VIU_DMAWR_BADDR2_AE_STATS_BASE_ADDR_MASK,
+ VIU_DMAWR_BADDR2_AE_STATS_BASE_ADDR(ae_dma_addr));
+}
+
+static void c3_isp_stats_cfg_buff(struct c3_isp_stats *stats)
+{
+ stats->buff =
+ list_first_entry_or_null(&stats->pending,
+ struct c3_isp_stats_buffer, list);
+ if (stats->buff) {
+ c3_isp_stats_cfg_dmawr_addr(stats);
+ list_del(&stats->buff->list);
+ }
+}
+
+void c3_isp_stats_pre_cfg(struct c3_isp_device *isp)
+{
+ struct c3_isp_stats *stats = &isp->stats;
+ u32 dma_size;
+
+ c3_isp_update_bits(stats->isp, ISP_AF_EN_CTRL,
+ ISP_AF_EN_CTRL_STAT_SEL_MASK,
+ ISP_AF_EN_CTRL_STAT_SEL_NEW);
+ c3_isp_update_bits(stats->isp, ISP_AE_CTRL,
+ ISP_AE_CTRL_LUMA_MODE_MASK,
+ ISP_AE_CTRL_LUMA_MODE_FILTER);
+
+ /* The unit of dma_size is 16 bytes */
+ dma_size = sizeof(struct c3_isp_af_stats) / C3_ISP_DMA_SIZE_ALIGN_BYTES;
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_SIZE0,
+ VIU_DMAWR_SIZE0_AF_STATS_SIZE_MASK,
+ VIU_DMAWR_SIZE0_AF_STATS_SIZE(dma_size));
+
+ dma_size = sizeof(struct c3_isp_awb_stats) /
+ C3_ISP_DMA_SIZE_ALIGN_BYTES;
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_SIZE0,
+ VIU_DMAWR_SIZE0_AWB_STATS_SIZE_MASK,
+ VIU_DMAWR_SIZE0_AWB_STATS_SIZE(dma_size));
+
+ dma_size = sizeof(struct c3_isp_ae_stats) / C3_ISP_DMA_SIZE_ALIGN_BYTES;
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_SIZE1,
+ VIU_DMAWR_SIZE1_AE_STATS_SIZE_MASK,
+ VIU_DMAWR_SIZE1_AE_STATS_SIZE(dma_size));
+
+ guard(spinlock_irqsave)(&stats->buff_lock);
+
+ c3_isp_stats_cfg_buff(stats);
+}
+
+static int c3_isp_stats_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, C3_ISP_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "AML C3 ISP", sizeof(cap->card));
+
+ return 0;
+}
+
+static int c3_isp_stats_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct c3_isp_stats *stats = video_drvdata(file);
+
+ if (f->index > 0 || f->type != stats->vb2_q.type)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_META_FMT_C3ISP_STATS;
+
+ return 0;
+}
+
+static int c3_isp_stats_g_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct c3_isp_stats *stats = video_drvdata(file);
+
+ f->fmt.meta = stats->vfmt.fmt.meta;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops isp_stats_v4l2_ioctl_ops = {
+ .vidioc_querycap = c3_isp_stats_querycap,
+ .vidioc_enum_fmt_meta_cap = c3_isp_stats_enum_fmt,
+ .vidioc_g_fmt_meta_cap = c3_isp_stats_g_fmt,
+ .vidioc_s_fmt_meta_cap = c3_isp_stats_g_fmt,
+ .vidioc_try_fmt_meta_cap = c3_isp_stats_g_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct v4l2_file_operations isp_stats_v4l2_fops = {
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static int c3_isp_stats_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ if (*num_planes) {
+ if (*num_planes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < sizeof(struct c3_isp_stats_info))
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *num_planes = 1;
+ sizes[0] = sizeof(struct c3_isp_stats_info);
+
+ return 0;
+}
+
+static void c3_isp_stats_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_stats_buffer *buf =
+ container_of(v4l2_buf, struct c3_isp_stats_buffer, vb);
+ struct c3_isp_stats *stats = vb2_get_drv_priv(vb->vb2_queue);
+
+ guard(spinlock_irqsave)(&stats->buff_lock);
+
+ list_add_tail(&buf->list, &stats->pending);
+}
+
+static int c3_isp_stats_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct c3_isp_stats *stats = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = stats->vfmt.fmt.meta.buffersize;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(stats->isp->dev,
+ "User buffer too small (%ld < %u)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static int c3_isp_stats_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_stats_buffer *buf =
+ container_of(v4l2_buf, struct c3_isp_stats_buffer, vb);
+
+ buf->dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ return 0;
+}
+
+static void c3_isp_stats_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct c3_isp_stats *stats = vb2_get_drv_priv(q);
+
+ guard(spinlock_irqsave)(&stats->buff_lock);
+
+ if (stats->buff) {
+ vb2_buffer_done(&stats->buff->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ stats->buff = NULL;
+ }
+
+ while (!list_empty(&stats->pending)) {
+ struct c3_isp_stats_buffer *buff;
+
+ buff = list_first_entry(&stats->pending,
+ struct c3_isp_stats_buffer, list);
+ list_del(&buff->list);
+ vb2_buffer_done(&buff->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops isp_stats_vb2_ops = {
+ .queue_setup = c3_isp_stats_vb2_queue_setup,
+ .buf_queue = c3_isp_stats_vb2_buf_queue,
+ .buf_prepare = c3_isp_stats_vb2_buf_prepare,
+ .buf_init = c3_isp_stats_vb2_buf_init,
+ .stop_streaming = c3_isp_stats_vb2_stop_streaming,
+};
+
+int c3_isp_stats_register(struct c3_isp_device *isp)
+{
+ struct c3_isp_stats *stats = &isp->stats;
+ struct video_device *vdev = &stats->vdev;
+ struct vb2_queue *vb2_q = &stats->vb2_q;
+ int ret;
+
+ memset(stats, 0, sizeof(*stats));
+ stats->vfmt.fmt.meta.dataformat = V4L2_META_FMT_C3ISP_STATS;
+ stats->vfmt.fmt.meta.buffersize = sizeof(struct c3_isp_stats_info);
+ stats->isp = isp;
+ INIT_LIST_HEAD(&stats->pending);
+ spin_lock_init(&stats->buff_lock);
+
+ mutex_init(&stats->lock);
+
+ snprintf(vdev->name, sizeof(vdev->name), "c3-isp-stats");
+ vdev->fops = &isp_stats_v4l2_fops;
+ vdev->ioctl_ops = &isp_stats_v4l2_ioctl_ops;
+ vdev->v4l2_dev = &isp->v4l2_dev;
+ vdev->lock = &stats->lock;
+ vdev->minor = -1;
+ vdev->queue = vb2_q;
+ vdev->release = video_device_release_empty;
+ vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->vfl_dir = VFL_DIR_RX;
+ video_set_drvdata(vdev, stats);
+
+ vb2_q->drv_priv = stats;
+ vb2_q->mem_ops = &vb2_dma_contig_memops;
+ vb2_q->ops = &isp_stats_vb2_ops;
+ vb2_q->type = V4L2_BUF_TYPE_META_CAPTURE;
+ vb2_q->io_modes = VB2_DMABUF | VB2_MMAP;
+ vb2_q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vb2_q->buf_struct_size = sizeof(struct c3_isp_stats_buffer);
+ vb2_q->dev = isp->dev;
+ vb2_q->lock = &stats->lock;
+ vb2_q->min_queued_buffers = 2;
+
+ ret = vb2_queue_init(vb2_q);
+ if (ret)
+ goto err_destroy;
+
+ stats->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, &stats->pad);
+ if (ret)
+ goto err_queue_release;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to register %s: %d\n", vdev->name, ret);
+ goto err_entity_cleanup;
+ }
+
+ return 0;
+
+err_entity_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_queue_release:
+ vb2_queue_release(vb2_q);
+err_destroy:
+ mutex_destroy(&stats->lock);
+ return ret;
+}
+
+void c3_isp_stats_unregister(struct c3_isp_device *isp)
+{
+ struct c3_isp_stats *stats = &isp->stats;
+
+ vb2_queue_release(&stats->vb2_q);
+ media_entity_cleanup(&stats->vdev.entity);
+ video_unregister_device(&stats->vdev);
+ mutex_destroy(&stats->lock);
+}
+
+void c3_isp_stats_isr(struct c3_isp_device *isp)
+{
+ struct c3_isp_stats *stats = &isp->stats;
+
+ guard(spinlock_irqsave)(&stats->buff_lock);
+
+ if (stats->buff) {
+ stats->buff->vb.sequence = stats->isp->frm_sequence;
+ stats->buff->vb.vb2_buf.timestamp = ktime_get();
+ stats->buff->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&stats->buff->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ c3_isp_stats_cfg_buff(stats);
+}
diff --git a/drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig b/drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig
new file mode 100644
index 000000000000..bf19059b3543
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_C3_MIPI_ADAPTER
+ tristate "Amlogic C3 MIPI adapter"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on OF
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Video4Linux2 driver for Amlogic C3 MIPI adapter.
+ C3 MIPI adapter mainly responsible for organizing
+ MIPI data and sending raw data to ISP pipeline.
+
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/amlogic/c3/mipi-adapter/Makefile b/drivers/media/platform/amlogic/c3/mipi-adapter/Makefile
new file mode 100644
index 000000000000..216fc310c5b4
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-adapter/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_VIDEO_C3_MIPI_ADAPTER) += c3-mipi-adap.o
diff --git a/drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c b/drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c
new file mode 100644
index 000000000000..4bd98fb9c7e9
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * Adapter Block Diagram
+ * ---------------------
+ *
+ * +--------------------------------------------+
+ * | Adapter |
+ * |--------------------------------------------|
+ * +------------+ | | | | | +-----+
+ * | MIPI CSI-2 |--->| Frontend -> DDR_RD0 -> PIXEL0 -> ALIGNMENT |--->| ISP |
+ * +------------+ | | | | | +-----+
+ * +--------------------------------------------+
+ *
+ */
+
+/* C3 adapter submodule definition */
+enum {
+ SUBMD_TOP,
+ SUBMD_FD,
+ SUBMD_RD,
+};
+
+#define ADAP_SUBMD_MASK GENMASK(17, 16)
+#define ADAP_SUBMD_SHIFT 16
+#define ADAP_SUBMD(x) (((x) & (ADAP_SUBMD_MASK)) >> (ADAP_SUBMD_SHIFT))
+#define ADAP_REG_ADDR_MASK GENMASK(15, 0)
+#define ADAP_REG_ADDR(x) ((x) & (ADAP_REG_ADDR_MASK))
+#define ADAP_REG_T(x) ((SUBMD_TOP << ADAP_SUBMD_SHIFT) | (x))
+#define ADAP_REG_F(x) ((SUBMD_FD << ADAP_SUBMD_SHIFT) | (x))
+#define ADAP_REG_R(x) ((SUBMD_RD << ADAP_SUBMD_SHIFT) | (x))
+
+#define MIPI_ADAP_CLOCK_NUM_MAX 3
+#define MIPI_ADAP_SUBDEV_NAME "c3-mipi-adapter"
+
+/* C3 MIPI adapter TOP register */
+#define MIPI_TOP_CTRL0 ADAP_REG_T(0x00)
+#define MIPI_TOP_CTRL0_RST_ADAPTER_MASK BIT(1)
+#define MIPI_TOP_CTRL0_RST_ADAPTER_APPLY BIT(1)
+#define MIPI_TOP_CTRL0_RST_ADAPTER_EXIT (0 << 1)
+
+#define MIPI_ADAPT_DE_CTRL0 ADAP_REG_T(0x40)
+#define MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_MASK BIT(3)
+#define MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_EN BIT(3)
+#define MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_DIS (0 << 3)
+#define MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_MASK BIT(7)
+#define MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_EN BIT(7)
+#define MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_DIS (0 << 7)
+
+/* C3 MIPI adapter FRONTEND register */
+#define CSI2_CLK_RESET ADAP_REG_F(0x00)
+#define CSI2_CLK_RESET_SW_RESET_MASK BIT(0)
+#define CSI2_CLK_RESET_SW_RESET_APPLY BIT(0)
+#define CSI2_CLK_RESET_SW_RESET_RELEASE (0 << 0)
+#define CSI2_CLK_RESET_CLK_ENABLE_MASK BIT(1)
+#define CSI2_CLK_RESET_CLK_ENABLE_EN BIT(1)
+#define CSI2_CLK_RESET_CLK_ENABLE_DIS (0 << 1)
+
+#define CSI2_GEN_CTRL0 ADAP_REG_F(0x04)
+#define CSI2_GEN_CTRL0_VC0_MASK BIT(0)
+#define CSI2_GEN_CTRL0_VC0_EN BIT(0)
+#define CSI2_GEN_CTRL0_VC0_DIS (0 << 0)
+#define CSI2_GEN_CTRL0_ENABLE_PACKETS_MASK GENMASK(20, 16)
+#define CSI2_GEN_CTRL0_ENABLE_PACKETS_RAW BIT(16)
+#define CSI2_GEN_CTRL0_ENABLE_PACKETS_YUV (2 << 16)
+
+#define CSI2_X_START_END_ISP ADAP_REG_F(0x0c)
+#define CSI2_X_START_END_ISP_X_START_MASK GENMASK(15, 0)
+#define CSI2_X_START_END_ISP_X_START(x) ((x) << 0)
+#define CSI2_X_START_END_ISP_X_END_MASK GENMASK(31, 16)
+#define CSI2_X_START_END_ISP_X_END(x) (((x) - 1) << 16)
+
+#define CSI2_Y_START_END_ISP ADAP_REG_F(0x10)
+#define CSI2_Y_START_END_ISP_Y_START_MASK GENMASK(15, 0)
+#define CSI2_Y_START_END_ISP_Y_START(x) ((x) << 0)
+#define CSI2_Y_START_END_ISP_Y_END_MASK GENMASK(31, 16)
+#define CSI2_Y_START_END_ISP_Y_END(x) (((x) - 1) << 16)
+
+#define CSI2_VC_MODE ADAP_REG_F(0x1c)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_MASK GENMASK(19, 16)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_0 BIT(16)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_1 (2 << 16)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_2 (4 << 16)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_3 (8 << 16)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_MASK GENMASK(23, 20)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_0 BIT(20)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_1 (2 << 20)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_2 (4 << 20)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_3 (8 << 20)
+
+/* C3 MIPI adapter READER register */
+#define MIPI_ADAPT_DDR_RD0_CNTL0 ADAP_REG_R(0x00)
+#define MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_EN_MASK BIT(0)
+#define MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_EN BIT(0)
+#define MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_DIS (0 << 0)
+
+#define MIPI_ADAPT_DDR_RD0_CNTL1 ADAP_REG_R(0x04)
+#define MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_MASK GENMASK(31, 30)
+#define MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_DIRECT_MODE (0 << 30)
+#define MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_DDR_MODE BIT(30)
+
+#define MIPI_ADAPT_PIXEL0_CNTL0 ADAP_REG_R(0x80)
+#define MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_MASK GENMASK(17, 16)
+#define MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_RAW_DDR (0 << 16)
+#define MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_RAW_DIRECT BIT(16)
+#define MIPI_ADAPT_PIXEL0_CNTL0_DATA_TYPE_MASK GENMASK(25, 20)
+#define MIPI_ADAPT_PIXEL0_CNTL0_DATA_TYPE(x) ((x) << 20)
+#define MIPI_ADAPT_PIXEL0_CNTL0_START_EN_MASK BIT(31)
+#define MIPI_ADAPT_PIXEL0_CNTL0_START_EN BIT(31)
+
+#define MIPI_ADAPT_ALIG_CNTL0 ADAP_REG_R(0x100)
+#define MIPI_ADAPT_ALIG_CNTL0_H_NUM_MASK GENMASK(15, 0)
+#define MIPI_ADAPT_ALIG_CNTL0_H_NUM(x) ((x) << 0)
+#define MIPI_ADAPT_ALIG_CNTL0_V_NUM_MASK GENMASK(31, 16)
+#define MIPI_ADAPT_ALIG_CNTL0_V_NUM(x) ((x) << 16)
+
+#define MIPI_ADAPT_ALIG_CNTL1 ADAP_REG_R(0x104)
+#define MIPI_ADAPT_ALIG_CNTL1_HPE_NUM_MASK GENMASK(31, 16)
+#define MIPI_ADAPT_ALIG_CNTL1_HPE_NUM(x) ((x) << 16)
+
+#define MIPI_ADAPT_ALIG_CNTL2 ADAP_REG_R(0x108)
+#define MIPI_ADAPT_ALIG_CNTL2_VPE_NUM_MASK GENMASK(31, 16)
+#define MIPI_ADAPT_ALIG_CNTL2_VPE_NUM(x) ((x) << 16)
+
+#define MIPI_ADAPT_ALIG_CNTL6 ADAP_REG_R(0x118)
+#define MIPI_ADAPT_ALIG_CNTL6_PATH0_EN_MASK BIT(0)
+#define MIPI_ADAPT_ALIG_CNTL6_PATH0_EN BIT(0)
+#define MIPI_ADAPT_ALIG_CNTL6_PATH0_DIS (0 << 0)
+#define MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_MASK BIT(4)
+#define MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_DDR (0 << 4)
+#define MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_DIRECT BIT(4)
+#define MIPI_ADAPT_ALIG_CNTL6_DATA0_EN_MASK BIT(12)
+#define MIPI_ADAPT_ALIG_CNTL6_DATA0_EN BIT(12)
+#define MIPI_ADAPT_ALIG_CNTL6_DATA0_DIS (0 << 12)
+
+#define MIPI_ADAPT_ALIG_CNTL8 ADAP_REG_R(0x120)
+#define MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_MASK BIT(5)
+#define MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_EN BIT(5)
+#define MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_DIS (0 << 5)
+#define MIPI_ADAPT_ALIG_CNTL8_EXCEED_DIS_MASK BIT(12)
+#define MIPI_ADAPT_ALIG_CNTL8_EXCEED_HOLD (0 << 12)
+#define MIPI_ADAPT_ALIG_CNTL8_EXCEED_NOT_HOLD BIT(12)
+#define MIPI_ADAPT_ALIG_CNTL8_START_EN_MASK BIT(31)
+#define MIPI_ADAPT_ALIG_CNTL8_START_EN BIT(31)
+
+#define MIPI_ADAP_MAX_WIDTH 2888
+#define MIPI_ADAP_MIN_WIDTH 160
+#define MIPI_ADAP_MAX_HEIGHT 2240
+#define MIPI_ADAP_MIN_HEIGHT 120
+#define MIPI_ADAP_DEFAULT_WIDTH 1920
+#define MIPI_ADAP_DEFAULT_HEIGHT 1080
+#define MIPI_ADAP_DEFAULT_FMT MEDIA_BUS_FMT_SRGGB10_1X10
+
+/* C3 MIPI adapter pad list */
+enum {
+ C3_MIPI_ADAP_PAD_SINK,
+ C3_MIPI_ADAP_PAD_SRC,
+ C3_MIPI_ADAP_PAD_MAX
+};
+
+/*
+ * struct c3_adap_info - mipi adapter information
+ *
+ * @clocks: array of mipi adapter clock names
+ * @clock_num: actual clock number
+ */
+struct c3_adap_info {
+ char *clocks[MIPI_ADAP_CLOCK_NUM_MAX];
+ u32 clock_num;
+};
+
+/*
+ * struct c3_adap_device - mipi adapter platform device
+ *
+ * @dev: pointer to the struct device
+ * @top: mipi adapter top register address
+ * @fd: mipi adapter frontend register address
+ * @rd: mipi adapter reader register address
+ * @clks: array of MIPI adapter clocks
+ * @sd: mipi adapter sub-device
+ * @pads: mipi adapter sub-device pads
+ * @notifier: notifier to register on the v4l2-async API
+ * @src_sd: source sub-device pad
+ * @info: version-specific MIPI adapter information
+ */
+struct c3_adap_device {
+ struct device *dev;
+ void __iomem *top;
+ void __iomem *fd;
+ void __iomem *rd;
+ struct clk_bulk_data clks[MIPI_ADAP_CLOCK_NUM_MAX];
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[C3_MIPI_ADAP_PAD_MAX];
+ struct v4l2_async_notifier notifier;
+ struct media_pad *src_pad;
+
+ const struct c3_adap_info *info;
+};
+
+/* Format helpers */
+
+struct c3_adap_pix_format {
+ u32 code;
+ u8 type;
+};
+
+static const struct c3_adap_pix_format c3_mipi_adap_formats[] = {
+ { MEDIA_BUS_FMT_SBGGR10_1X10, MIPI_CSI2_DT_RAW10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MIPI_CSI2_DT_RAW10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, MIPI_CSI2_DT_RAW10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MIPI_CSI2_DT_RAW10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, MIPI_CSI2_DT_RAW12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, MIPI_CSI2_DT_RAW12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, MIPI_CSI2_DT_RAW12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, MIPI_CSI2_DT_RAW12 },
+};
+
+static const struct c3_adap_pix_format *c3_mipi_adap_find_format(u32 code)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_mipi_adap_formats); i++)
+ if (code == c3_mipi_adap_formats[i].code)
+ return &c3_mipi_adap_formats[i];
+
+ return NULL;
+}
+
+/* Hardware configuration */
+
+static void c3_mipi_adap_update_bits(struct c3_adap_device *adap, u32 reg,
+ u32 mask, u32 val)
+{
+ void __iomem *addr;
+ u32 orig, tmp;
+
+ switch (ADAP_SUBMD(reg)) {
+ case SUBMD_TOP:
+ addr = adap->top + ADAP_REG_ADDR(reg);
+ break;
+ case SUBMD_FD:
+ addr = adap->fd + ADAP_REG_ADDR(reg);
+ break;
+ case SUBMD_RD:
+ addr = adap->rd + ADAP_REG_ADDR(reg);
+ break;
+ default:
+ dev_err(adap->dev,
+ "Invalid sub-module: %lu\n", ADAP_SUBMD(reg));
+ return;
+ }
+
+ orig = readl(addr);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (tmp != orig)
+ writel(tmp, addr);
+}
+
+/* Configure adapter top sub module */
+static void c3_mipi_adap_cfg_top(struct c3_adap_device *adap)
+{
+ /* Reset adapter */
+ c3_mipi_adap_update_bits(adap, MIPI_TOP_CTRL0,
+ MIPI_TOP_CTRL0_RST_ADAPTER_MASK,
+ MIPI_TOP_CTRL0_RST_ADAPTER_APPLY);
+ c3_mipi_adap_update_bits(adap, MIPI_TOP_CTRL0,
+ MIPI_TOP_CTRL0_RST_ADAPTER_MASK,
+ MIPI_TOP_CTRL0_RST_ADAPTER_EXIT);
+
+ /* Bypass decompress */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_DE_CTRL0,
+ MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_MASK,
+ MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_EN);
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_DE_CTRL0,
+ MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_MASK,
+ MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_EN);
+}
+
+/* Configure adapter frontend sub module */
+static void c3_mipi_adap_cfg_frontend(struct c3_adap_device *adap,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ /* Reset frontend module */
+ c3_mipi_adap_update_bits(adap, CSI2_CLK_RESET,
+ CSI2_CLK_RESET_SW_RESET_MASK,
+ CSI2_CLK_RESET_SW_RESET_APPLY);
+ c3_mipi_adap_update_bits(adap, CSI2_CLK_RESET,
+ CSI2_CLK_RESET_SW_RESET_MASK,
+ CSI2_CLK_RESET_SW_RESET_RELEASE);
+ c3_mipi_adap_update_bits(adap, CSI2_CLK_RESET,
+ CSI2_CLK_RESET_CLK_ENABLE_MASK,
+ CSI2_CLK_RESET_CLK_ENABLE_EN);
+
+ c3_mipi_adap_update_bits(adap, CSI2_X_START_END_ISP,
+ CSI2_X_START_END_ISP_X_START_MASK,
+ CSI2_X_START_END_ISP_X_START(0));
+ c3_mipi_adap_update_bits(adap, CSI2_X_START_END_ISP,
+ CSI2_X_START_END_ISP_X_END_MASK,
+ CSI2_X_START_END_ISP_X_END(fmt->width));
+
+ c3_mipi_adap_update_bits(adap, CSI2_Y_START_END_ISP,
+ CSI2_Y_START_END_ISP_Y_START_MASK,
+ CSI2_Y_START_END_ISP_Y_START(0));
+ c3_mipi_adap_update_bits(adap, CSI2_Y_START_END_ISP,
+ CSI2_Y_START_END_ISP_Y_END_MASK,
+ CSI2_Y_START_END_ISP_Y_END(fmt->height));
+
+ /* Select VS and HS signal for direct path */
+ c3_mipi_adap_update_bits(adap, CSI2_VC_MODE,
+ CSI2_VC_MODE_VS_ISP_SEL_VC_MASK,
+ CSI2_VC_MODE_VS_ISP_SEL_VC_0);
+ c3_mipi_adap_update_bits(adap, CSI2_VC_MODE,
+ CSI2_VC_MODE_HS_ISP_SEL_VC_MASK,
+ CSI2_VC_MODE_HS_ISP_SEL_VC_0);
+
+ /* Enable to receive RAW packet */
+ c3_mipi_adap_update_bits(adap, CSI2_GEN_CTRL0,
+ CSI2_GEN_CTRL0_ENABLE_PACKETS_MASK,
+ CSI2_GEN_CTRL0_ENABLE_PACKETS_RAW);
+
+ /* Enable virtual channel 0 */
+ c3_mipi_adap_update_bits(adap, CSI2_GEN_CTRL0,
+ CSI2_GEN_CTRL0_VC0_MASK,
+ CSI2_GEN_CTRL0_VC0_EN);
+}
+
+static void c3_mipi_adap_cfg_rd0(struct c3_adap_device *adap)
+{
+ /* Select direct mode for DDR_RD0 mode */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_DDR_RD0_CNTL1,
+ MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_MASK,
+ MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_DIRECT_MODE);
+
+ /* Data can't bypass DDR_RD0 in direct mode, so enable DDR_RD0 here */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_DDR_RD0_CNTL0,
+ MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_EN_MASK,
+ MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_EN);
+}
+
+static void c3_mipi_adap_cfg_pixel0(struct c3_adap_device *adap,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ const struct c3_adap_pix_format *pix;
+
+ pix = c3_mipi_adap_find_format(fmt->code);
+
+ /* Set work mode and data type for PIXEL0 module */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_PIXEL0_CNTL0,
+ MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_MASK,
+ MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_RAW_DIRECT);
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_PIXEL0_CNTL0,
+ MIPI_ADAPT_PIXEL0_CNTL0_DATA_TYPE_MASK,
+ MIPI_ADAPT_PIXEL0_CNTL0_DATA_TYPE(pix->type));
+
+ /* Start PIXEL0 module */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_PIXEL0_CNTL0,
+ MIPI_ADAPT_PIXEL0_CNTL0_START_EN_MASK,
+ MIPI_ADAPT_PIXEL0_CNTL0_START_EN);
+}
+
+static void c3_mipi_adap_cfg_alig(struct c3_adap_device *adap,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ /*
+ * ISP hardware requires the number of horizonal blanks greater than
+ * 64 cycles, so adding 64 here.
+ */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL0,
+ MIPI_ADAPT_ALIG_CNTL0_H_NUM_MASK,
+ MIPI_ADAPT_ALIG_CNTL0_H_NUM(fmt->width + 64));
+
+ /*
+ * ISP hardware requires the number of vertical blanks greater than
+ * 40 lines, so adding 40 here.
+ */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL0,
+ MIPI_ADAPT_ALIG_CNTL0_V_NUM_MASK,
+ MIPI_ADAPT_ALIG_CNTL0_V_NUM(fmt->height + 40));
+
+ /* End pixel in a line */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL1,
+ MIPI_ADAPT_ALIG_CNTL1_HPE_NUM_MASK,
+ MIPI_ADAPT_ALIG_CNTL1_HPE_NUM(fmt->width));
+
+ /* End line in a frame */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL2,
+ MIPI_ADAPT_ALIG_CNTL2_VPE_NUM_MASK,
+ MIPI_ADAPT_ALIG_CNTL2_VPE_NUM(fmt->height));
+
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL6,
+ MIPI_ADAPT_ALIG_CNTL6_PATH0_EN_MASK,
+ MIPI_ADAPT_ALIG_CNTL6_PATH0_EN);
+
+ /* Select direct mode for ALIG module */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL6,
+ MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_MASK,
+ MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_DIRECT);
+
+ /* Enable to send raw data */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL6,
+ MIPI_ADAPT_ALIG_CNTL6_DATA0_EN_MASK,
+ MIPI_ADAPT_ALIG_CNTL6_DATA0_EN);
+
+ /* Set continue mode and disable hold counter */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL8,
+ MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_MASK,
+ MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_EN);
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL8,
+ MIPI_ADAPT_ALIG_CNTL8_EXCEED_DIS_MASK,
+ MIPI_ADAPT_ALIG_CNTL8_EXCEED_NOT_HOLD);
+
+ /* Start ALIG module */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL8,
+ MIPI_ADAPT_ALIG_CNTL8_START_EN_MASK,
+ MIPI_ADAPT_ALIG_CNTL8_START_EN);
+}
+
+/* V4L2 subdev operations */
+
+static int c3_mipi_adap_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_adap_device *adap = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ struct media_pad *sink_pad;
+ struct v4l2_subdev *src_sd;
+ int ret;
+
+ sink_pad = &adap->pads[C3_MIPI_ADAP_PAD_SINK];
+ adap->src_pad = media_pad_remote_pad_unique(sink_pad);
+ if (IS_ERR(adap->src_pad)) {
+ dev_dbg(adap->dev, "Failed to get source pad for MIPI adap\n");
+ return -EPIPE;
+ }
+
+ src_sd = media_entity_to_v4l2_subdev(adap->src_pad->entity);
+
+ pm_runtime_resume_and_get(adap->dev);
+
+ fmt = v4l2_subdev_state_get_format(state, C3_MIPI_ADAP_PAD_SINK);
+
+ c3_mipi_adap_cfg_top(adap);
+ c3_mipi_adap_cfg_frontend(adap, fmt);
+ c3_mipi_adap_cfg_rd0(adap);
+ c3_mipi_adap_cfg_pixel0(adap, fmt);
+ c3_mipi_adap_cfg_alig(adap, fmt);
+
+ ret = v4l2_subdev_enable_streams(src_sd, adap->src_pad->index, BIT(0));
+ if (ret) {
+ pm_runtime_put(adap->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int c3_mipi_adap_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_adap_device *adap = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *src_sd;
+
+ if (adap->src_pad) {
+ src_sd = media_entity_to_v4l2_subdev(adap->src_pad->entity);
+ v4l2_subdev_disable_streams(src_sd, adap->src_pad->index,
+ BIT(0));
+ }
+ adap->src_pad = NULL;
+
+ pm_runtime_put(adap->dev);
+
+ return 0;
+}
+
+static int c3_mipi_adap_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ switch (code->pad) {
+ case C3_MIPI_ADAP_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(c3_mipi_adap_formats))
+ return -EINVAL;
+
+ code->code = c3_mipi_adap_formats[code->index].code;
+ break;
+ case C3_MIPI_ADAP_PAD_SRC:
+ if (code->index)
+ return -EINVAL;
+
+ fmt = v4l2_subdev_state_get_format(state, code->pad);
+ code->code = fmt->code;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_mipi_adap_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ const struct c3_adap_pix_format *pix_format;
+
+ if (format->pad != C3_MIPI_ADAP_PAD_SINK)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ pix_format = c3_mipi_adap_find_format(format->format.code);
+ if (!pix_format)
+ pix_format = &c3_mipi_adap_formats[0];
+
+ fmt = v4l2_subdev_state_get_format(state, format->pad);
+ fmt->code = pix_format->code;
+ fmt->width = clamp_t(u32, format->format.width,
+ MIPI_ADAP_MIN_WIDTH, MIPI_ADAP_MAX_WIDTH);
+ fmt->height = clamp_t(u32, format->format.height,
+ MIPI_ADAP_MIN_HEIGHT, MIPI_ADAP_MAX_HEIGHT);
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ format->format = *fmt;
+
+ /* Synchronize the format to source pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_MIPI_ADAP_PAD_SRC);
+ *fmt = format->format;
+
+ return 0;
+}
+
+static int c3_mipi_adap_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, C3_MIPI_ADAP_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(state, C3_MIPI_ADAP_PAD_SRC);
+
+ sink_fmt->width = MIPI_ADAP_DEFAULT_WIDTH;
+ sink_fmt->height = MIPI_ADAP_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = MIPI_ADAP_DEFAULT_FMT;
+ sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ *src_fmt = *sink_fmt;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops c3_mipi_adap_pad_ops = {
+ .enum_mbus_code = c3_mipi_adap_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = c3_mipi_adap_set_fmt,
+ .enable_streams = c3_mipi_adap_enable_streams,
+ .disable_streams = c3_mipi_adap_disable_streams,
+};
+
+static const struct v4l2_subdev_ops c3_mipi_adap_subdev_ops = {
+ .pad = &c3_mipi_adap_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops c3_mipi_adap_internal_ops = {
+ .init_state = c3_mipi_adap_init_state,
+};
+
+/* Media entity operations */
+static const struct media_entity_operations c3_mipi_adap_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* PM runtime */
+
+static int c3_mipi_adap_runtime_suspend(struct device *dev)
+{
+ struct c3_adap_device *adap = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(adap->info->clock_num, adap->clks);
+
+ return 0;
+}
+
+static int c3_mipi_adap_runtime_resume(struct device *dev)
+{
+ struct c3_adap_device *adap = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(adap->info->clock_num, adap->clks);
+}
+
+static const struct dev_pm_ops c3_mipi_adap_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(c3_mipi_adap_runtime_suspend,
+ c3_mipi_adap_runtime_resume, NULL)
+};
+
+/* Probe/remove & platform driver */
+
+static int c3_mipi_adap_subdev_init(struct c3_adap_device *adap)
+{
+ struct v4l2_subdev *sd = &adap->sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &c3_mipi_adap_subdev_ops);
+ sd->owner = THIS_MODULE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &c3_mipi_adap_internal_ops;
+ snprintf(sd->name, sizeof(sd->name), "%s", MIPI_ADAP_SUBDEV_NAME);
+
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->entity.ops = &c3_mipi_adap_entity_ops;
+
+ sd->dev = adap->dev;
+ v4l2_set_subdevdata(sd, adap);
+
+ adap->pads[C3_MIPI_ADAP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ adap->pads[C3_MIPI_ADAP_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, C3_MIPI_ADAP_PAD_MAX,
+ adap->pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret) {
+ media_entity_cleanup(&sd->entity);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void c3_mipi_adap_subdev_deinit(struct c3_adap_device *adap)
+{
+ v4l2_subdev_cleanup(&adap->sd);
+ media_entity_cleanup(&adap->sd.entity);
+}
+
+/* Subdev notifier register */
+static int c3_mipi_adap_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct c3_adap_device *adap = v4l2_get_subdevdata(notifier->sd);
+ struct media_pad *sink = &adap->sd.entity.pads[C3_MIPI_ADAP_PAD_SINK];
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static const struct v4l2_async_notifier_operations c3_mipi_adap_notify_ops = {
+ .bound = c3_mipi_adap_notify_bound,
+};
+
+static int c3_mipi_adap_async_register(struct c3_adap_device *adap)
+{
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_subdev_nf_init(&adap->notifier, &adap->sd);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(adap->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ asc = v4l2_async_nf_add_fwnode_remote(&adap->notifier, ep,
+ struct v4l2_async_connection);
+ if (IS_ERR(asc)) {
+ ret = PTR_ERR(asc);
+ goto err_put_handle;
+ }
+
+ adap->notifier.ops = &c3_mipi_adap_notify_ops;
+ ret = v4l2_async_nf_register(&adap->notifier);
+ if (ret)
+ goto err_cleanup_nf;
+
+ ret = v4l2_async_register_subdev(&adap->sd);
+ if (ret)
+ goto err_unregister_nf;
+
+ fwnode_handle_put(ep);
+
+ return 0;
+
+err_unregister_nf:
+ v4l2_async_nf_unregister(&adap->notifier);
+err_cleanup_nf:
+ v4l2_async_nf_cleanup(&adap->notifier);
+err_put_handle:
+ fwnode_handle_put(ep);
+ return ret;
+}
+
+static void c3_mipi_adap_async_unregister(struct c3_adap_device *adap)
+{
+ v4l2_async_unregister_subdev(&adap->sd);
+ v4l2_async_nf_unregister(&adap->notifier);
+ v4l2_async_nf_cleanup(&adap->notifier);
+}
+
+static int c3_mipi_adap_ioremap_resource(struct c3_adap_device *adap)
+{
+ struct device *dev = adap->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ adap->top = devm_platform_ioremap_resource_byname(pdev, "top");
+ if (IS_ERR(adap->top))
+ return PTR_ERR(adap->top);
+
+ adap->fd = devm_platform_ioremap_resource_byname(pdev, "fd");
+ if (IS_ERR(adap->fd))
+ return PTR_ERR(adap->fd);
+
+ adap->rd = devm_platform_ioremap_resource_byname(pdev, "rd");
+ if (IS_ERR(adap->rd))
+ return PTR_ERR(adap->rd);
+
+ return 0;
+}
+
+static int c3_mipi_adap_get_clocks(struct c3_adap_device *adap)
+{
+ const struct c3_adap_info *info = adap->info;
+
+ for (unsigned int i = 0; i < info->clock_num; i++)
+ adap->clks[i].id = info->clocks[i];
+
+ return devm_clk_bulk_get(adap->dev, info->clock_num, adap->clks);
+}
+
+static int c3_mipi_adap_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct c3_adap_device *adap;
+ int ret;
+
+ adap = devm_kzalloc(dev, sizeof(*adap), GFP_KERNEL);
+ if (!adap)
+ return -ENOMEM;
+
+ adap->info = of_device_get_match_data(dev);
+ adap->dev = dev;
+
+ ret = c3_mipi_adap_ioremap_resource(adap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to ioremap resource\n");
+
+ ret = c3_mipi_adap_get_clocks(adap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ platform_set_drvdata(pdev, adap);
+
+ pm_runtime_enable(dev);
+
+ ret = c3_mipi_adap_subdev_init(adap);
+ if (ret)
+ goto err_disable_runtime_pm;
+
+ ret = c3_mipi_adap_async_register(adap);
+ if (ret)
+ goto err_deinit_subdev;
+
+ return 0;
+
+err_deinit_subdev:
+ c3_mipi_adap_subdev_deinit(adap);
+err_disable_runtime_pm:
+ pm_runtime_disable(dev);
+ return ret;
+};
+
+static void c3_mipi_adap_remove(struct platform_device *pdev)
+{
+ struct c3_adap_device *adap = platform_get_drvdata(pdev);
+
+ c3_mipi_adap_async_unregister(adap);
+ c3_mipi_adap_subdev_deinit(adap);
+
+ pm_runtime_disable(&pdev->dev);
+};
+
+static const struct c3_adap_info c3_mipi_adap_info = {
+ .clocks = {"vapb", "isp0"},
+ .clock_num = 2
+};
+
+static const struct of_device_id c3_mipi_adap_of_match[] = {
+ {
+ .compatible = "amlogic,c3-mipi-adapter",
+ .data = &c3_mipi_adap_info
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, c3_mipi_adap_of_match);
+
+static struct platform_driver c3_mipi_adap_driver = {
+ .probe = c3_mipi_adap_probe,
+ .remove = c3_mipi_adap_remove,
+ .driver = {
+ .name = "c3-mipi-adapter",
+ .of_match_table = c3_mipi_adap_of_match,
+ .pm = pm_ptr(&c3_mipi_adap_pm_ops),
+ },
+};
+
+module_platform_driver(c3_mipi_adap_driver);
+
+MODULE_AUTHOR("Keke Li <keke.li@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic C3 MIPI adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig b/drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig
new file mode 100644
index 000000000000..0d7b2e203273
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_C3_MIPI_CSI2
+ tristate "Amlogic C3 MIPI CSI-2 receiver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on OF
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Video4Linux2 driver for Amlogic C3 MIPI CSI-2 receiver.
+ C3 MIPI CSI-2 receiver is used to receive MIPI data from
+ image sensor.
+
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/amlogic/c3/mipi-csi2/Makefile b/drivers/media/platform/amlogic/c3/mipi-csi2/Makefile
new file mode 100644
index 000000000000..cc08fc722bfd
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-csi2/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_VIDEO_C3_MIPI_CSI2) += c3-mipi-csi2.o
diff --git a/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
new file mode 100644
index 000000000000..1011ab3ebac7
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+
+/* C3 CSI-2 submodule definition */
+enum {
+ SUBMD_APHY,
+ SUBMD_DPHY,
+ SUBMD_HOST,
+};
+
+#define CSI2_SUBMD_MASK GENMASK(17, 16)
+#define CSI2_SUBMD_SHIFT 16
+#define CSI2_SUBMD(x) (((x) & (CSI2_SUBMD_MASK)) >> (CSI2_SUBMD_SHIFT))
+#define CSI2_REG_ADDR_MASK GENMASK(15, 0)
+#define CSI2_REG_ADDR(x) ((x) & (CSI2_REG_ADDR_MASK))
+#define CSI2_REG_A(x) ((SUBMD_APHY << CSI2_SUBMD_SHIFT) | (x))
+#define CSI2_REG_D(x) ((SUBMD_DPHY << CSI2_SUBMD_SHIFT) | (x))
+#define CSI2_REG_H(x) ((SUBMD_HOST << CSI2_SUBMD_SHIFT) | (x))
+
+#define MIPI_CSI2_CLOCK_NUM_MAX 3
+#define MIPI_CSI2_SUBDEV_NAME "c3-mipi-csi2"
+
+/* C3 CSI-2 APHY register */
+#define CSI_PHY_CNTL0 CSI2_REG_A(0x44)
+#define CSI_PHY_CNTL0_HS_LP_BIAS_EN BIT(10)
+#define CSI_PHY_CNTL0_HS_RX_TRIM_11 (11 << 11)
+#define CSI_PHY_CNTL0_LP_LOW_VTH_2 (2 << 16)
+#define CSI_PHY_CNTL0_LP_HIGH_VTH_4 (4 << 20)
+#define CSI_PHY_CNTL0_DATA_LANE0_HS_DIG_EN BIT(24)
+#define CSI_PHY_CNTL0_DATA_LANE1_HS_DIG_EN BIT(25)
+#define CSI_PHY_CNTL0_CLK0_LANE_HS_DIG_EN BIT(26)
+#define CSI_PHY_CNTL0_DATA_LANE2_HS_DIG_EN BIT(27)
+#define CSI_PHY_CNTL0_DATA_LANE3_HS_DIG_EN BIT(28)
+
+#define CSI_PHY_CNTL1 CSI2_REG_A(0x48)
+#define CSI_PHY_CNTL1_HS_EQ_CAP_SMALL (2 << 16)
+#define CSI_PHY_CNTL1_HS_EQ_CAP_BIG (3 << 16)
+#define CSI_PHY_CNTL1_HS_EQ_RES_MIN (3 << 18)
+#define CSI_PHY_CNTL1_HS_EQ_RES_MED (2 << 18)
+#define CSI_PHY_CNTL1_HS_EQ_RES_MAX BIT(18)
+#define CSI_PHY_CNTL1_CLK_CHN_EQ_MAX_GAIN BIT(20)
+#define CSI_PHY_CNTL1_DATA_CHN_EQ_MAX_GAIN BIT(21)
+#define CSI_PHY_CNTL1_COM_BG_EN BIT(24)
+#define CSI_PHY_CNTL1_HS_SYNC_EN BIT(25)
+
+/* C3 CSI-2 DPHY register */
+#define MIPI_PHY_CTRL CSI2_REG_D(0x00)
+#define MIPI_PHY_CTRL_DATA_LANE0_EN (0 << 0)
+#define MIPI_PHY_CTRL_DATA_LANE0_DIS BIT(0)
+#define MIPI_PHY_CTRL_DATA_LANE1_EN (0 << 1)
+#define MIPI_PHY_CTRL_DATA_LANE1_DIS BIT(1)
+#define MIPI_PHY_CTRL_DATA_LANE2_EN (0 << 2)
+#define MIPI_PHY_CTRL_DATA_LANE2_DIS BIT(2)
+#define MIPI_PHY_CTRL_DATA_LANE3_EN (0 << 3)
+#define MIPI_PHY_CTRL_DATA_LANE3_DIS BIT(3)
+#define MIPI_PHY_CTRL_CLOCK_LANE_EN (0 << 4)
+#define MIPI_PHY_CTRL_CLOCK_LANE_DIS BIT(4)
+
+#define MIPI_PHY_CLK_LANE_CTRL CSI2_REG_D(0x04)
+#define MIPI_PHY_CLK_LANE_CTRL_FORCE_ULPS_ENTER BIT(0)
+#define MIPI_PHY_CLK_LANE_CTRL_FORCE_ULPS_EXIT BIT(1)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS (0 << 3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_2 BIT(3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_4 (2 << 3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_8 (3 << 3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_16 (4 << 3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_EN BIT(6)
+#define MIPI_PHY_CLK_LANE_CTRL_LPEN_DIS BIT(7)
+#define MIPI_PHY_CLK_LANE_CTRL_END_EN BIT(8)
+#define MIPI_PHY_CLK_LANE_CTRL_HS_RX_EN BIT(9)
+
+#define MIPI_PHY_DATA_LANE_CTRL1 CSI2_REG_D(0x0c)
+#define MIPI_PHY_DATA_LANE_CTRL1_INSERT_ERRESC BIT(0)
+#define MIPI_PHY_DATA_LANE_CTRL1_HS_SYNC_CHK_EN BIT(1)
+#define MIPI_PHY_DATA_LANE_CTRL1_PIPE_MASK GENMASK(6, 2)
+#define MIPI_PHY_DATA_LANE_CTRL1_PIPE_ALL_EN (0x1f << 2)
+#define MIPI_PHY_DATA_LANE_CTRL1_PIPE_DELAY_MASK GENMASK(9, 7)
+#define MIPI_PHY_DATA_LANE_CTRL1_PIPE_DELAY_3 (3 << 7)
+
+#define MIPI_PHY_TCLK_MISS CSI2_REG_D(0x10)
+#define MIPI_PHY_TCLK_MISS_CYCLES_MASK GENMASK(7, 0)
+#define MIPI_PHY_TCLK_MISS_CYCLES_9 (9 << 0)
+
+#define MIPI_PHY_TCLK_SETTLE CSI2_REG_D(0x14)
+#define MIPI_PHY_TCLK_SETTLE_CYCLES_MASK GENMASK(7, 0)
+#define MIPI_PHY_TCLK_SETTLE_CYCLES_31 (31 << 0)
+
+#define MIPI_PHY_THS_EXIT CSI2_REG_D(0x18)
+#define MIPI_PHY_THS_EXIT_CYCLES_MASK GENMASK(7, 0)
+#define MIPI_PHY_THS_EXIT_CYCLES_8 (8 << 0)
+
+#define MIPI_PHY_THS_SKIP CSI2_REG_D(0x1c)
+#define MIPI_PHY_THS_SKIP_CYCLES_MASK GENMASK(7, 0)
+#define MIPI_PHY_THS_SKIP_CYCLES_10 (10 << 0)
+
+#define MIPI_PHY_THS_SETTLE CSI2_REG_D(0x20)
+#define MIPI_PHY_THS_SETTLE_CYCLES_MASK GENMASK(7, 0)
+
+#define MIPI_PHY_TINIT CSI2_REG_D(0x24)
+#define MIPI_PHY_TINIT_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TINIT_CYCLES_20000 (20000 << 0)
+
+#define MIPI_PHY_TULPS_C CSI2_REG_D(0x28)
+#define MIPI_PHY_TULPS_C_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TULPS_C_CYCLES_4096 (4096 << 0)
+
+#define MIPI_PHY_TULPS_S CSI2_REG_D(0x2c)
+#define MIPI_PHY_TULPS_S_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TULPS_S_CYCLES_256 (256 << 0)
+
+#define MIPI_PHY_TMBIAS CSI2_REG_D(0x30)
+#define MIPI_PHY_TMBIAS_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TMBIAS_CYCLES_256 (256 << 0)
+
+#define MIPI_PHY_TLP_EN_W CSI2_REG_D(0x34)
+#define MIPI_PHY_TLP_EN_W_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TLP_EN_W_CYCLES_12 (12 << 0)
+
+#define MIPI_PHY_TLPOK CSI2_REG_D(0x38)
+#define MIPI_PHY_TLPOK_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TLPOK_CYCLES_256 (256 << 0)
+
+#define MIPI_PHY_TWD_INIT CSI2_REG_D(0x3c)
+#define MIPI_PHY_TWD_INIT_DOG_MASK GENMASK(31, 0)
+#define MIPI_PHY_TWD_INIT_DOG_0X400000 (0x400000 << 0)
+
+#define MIPI_PHY_TWD_HS CSI2_REG_D(0x40)
+#define MIPI_PHY_TWD_HS_DOG_MASK GENMASK(31, 0)
+#define MIPI_PHY_TWD_HS_DOG_0X400000 (0x400000 << 0)
+
+#define MIPI_PHY_MUX_CTRL0 CSI2_REG_D(0x284)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_MASK GENMASK(3, 0)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE0 (0 << 0)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE1 BIT(0)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE2 (2 << 0)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE3 (3 << 0)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_MASK GENMASK(7, 4)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE0 (0 << 4)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE1 BIT(4)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE2 (2 << 4)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE3 (3 << 4)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_MASK GENMASK(11, 8)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE0 (0 << 8)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE1 BIT(8)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE2 (2 << 8)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE3 (3 << 8)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_MASK GENMASK(14, 12)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE0 (0 << 12)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE1 BIT(12)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE2 (2 << 12)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE3 (3 << 12)
+
+#define MIPI_PHY_MUX_CTRL1 CSI2_REG_D(0x288)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_MASK GENMASK(3, 0)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN0 (0 << 0)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN1 BIT(0)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN2 (2 << 0)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN3 (3 << 0)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_MASK GENMASK(7, 4)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN0 (0 << 4)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN1 BIT(4)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN2 (2 << 4)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN3 (3 << 4)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_MASK GENMASK(11, 8)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN0 (0 << 8)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN1 BIT(8)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN2 (2 << 8)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN3 (3 << 8)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_MASK GENMASK(14, 12)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN0 (0 << 12)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN1 BIT(12)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN2 (2 << 12)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN3 (3 << 12)
+
+/* C3 CSI-2 HOST register */
+#define CSI2_HOST_N_LANES CSI2_REG_H(0x04)
+#define CSI2_HOST_N_LANES_MASK GENMASK(1, 0)
+#define CSI2_HOST_N_LANES_1 (0 << 0)
+#define CSI2_HOST_N_LANES_2 BIT(0)
+#define CSI2_HOST_N_LANES_3 (2 << 0)
+#define CSI2_HOST_N_LANES_4 (3 << 0)
+
+#define CSI2_HOST_CSI2_RESETN CSI2_REG_H(0x10)
+#define CSI2_HOST_CSI2_RESETN_MASK BIT(0)
+#define CSI2_HOST_CSI2_RESETN_ACTIVE (0 << 0)
+#define CSI2_HOST_CSI2_RESETN_EXIT BIT(0)
+
+#define C3_MIPI_CSI2_MAX_WIDTH 2888
+#define C3_MIPI_CSI2_MIN_WIDTH 160
+#define C3_MIPI_CSI2_MAX_HEIGHT 2240
+#define C3_MIPI_CSI2_MIN_HEIGHT 120
+#define C3_MIPI_CSI2_DEFAULT_WIDTH 1920
+#define C3_MIPI_CSI2_DEFAULT_HEIGHT 1080
+#define C3_MIPI_CSI2_DEFAULT_FMT MEDIA_BUS_FMT_SRGGB10_1X10
+
+/* C3 CSI-2 pad list */
+enum {
+ C3_MIPI_CSI2_PAD_SINK,
+ C3_MIPI_CSI2_PAD_SRC,
+ C3_MIPI_CSI2_PAD_MAX
+};
+
+/*
+ * struct c3_csi_info - MIPI CSI2 information
+ *
+ * @clocks: array of MIPI CSI2 clock names
+ * @clock_num: actual clock number
+ */
+struct c3_csi_info {
+ char *clocks[MIPI_CSI2_CLOCK_NUM_MAX];
+ u32 clock_num;
+};
+
+/*
+ * struct c3_csi_device - MIPI CSI2 platform device
+ *
+ * @dev: pointer to the struct device
+ * @aphy: MIPI CSI2 aphy register address
+ * @dphy: MIPI CSI2 dphy register address
+ * @host: MIPI CSI2 host register address
+ * @clks: array of MIPI CSI2 clocks
+ * @sd: MIPI CSI2 sub-device
+ * @pads: MIPI CSI2 sub-device pads
+ * @notifier: notifier to register on the v4l2-async API
+ * @src_pad: source sub-device pad
+ * @bus: MIPI CSI2 bus information
+ * @info: version-specific MIPI CSI2 information
+ */
+struct c3_csi_device {
+ struct device *dev;
+ void __iomem *aphy;
+ void __iomem *dphy;
+ void __iomem *host;
+ struct clk_bulk_data clks[MIPI_CSI2_CLOCK_NUM_MAX];
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[C3_MIPI_CSI2_PAD_MAX];
+ struct v4l2_async_notifier notifier;
+ struct media_pad *src_pad;
+ struct v4l2_mbus_config_mipi_csi2 bus;
+
+ const struct c3_csi_info *info;
+};
+
+static const u32 c3_mipi_csi_formats[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+};
+
+/* Hardware configuration */
+
+static void c3_mipi_csi_write(struct c3_csi_device *csi, u32 reg, u32 val)
+{
+ void __iomem *addr;
+
+ switch (CSI2_SUBMD(reg)) {
+ case SUBMD_APHY:
+ addr = csi->aphy + CSI2_REG_ADDR(reg);
+ break;
+ case SUBMD_DPHY:
+ addr = csi->dphy + CSI2_REG_ADDR(reg);
+ break;
+ case SUBMD_HOST:
+ addr = csi->host + CSI2_REG_ADDR(reg);
+ break;
+ default:
+ dev_err(csi->dev, "Invalid sub-module: %lu\n", CSI2_SUBMD(reg));
+ return;
+ }
+
+ writel(val, addr);
+}
+
+static void c3_mipi_csi_cfg_aphy(struct c3_csi_device *csi)
+{
+ c3_mipi_csi_write(csi, CSI_PHY_CNTL0,
+ CSI_PHY_CNTL0_HS_LP_BIAS_EN |
+ CSI_PHY_CNTL0_HS_RX_TRIM_11 |
+ CSI_PHY_CNTL0_LP_LOW_VTH_2 |
+ CSI_PHY_CNTL0_LP_HIGH_VTH_4 |
+ CSI_PHY_CNTL0_DATA_LANE0_HS_DIG_EN |
+ CSI_PHY_CNTL0_DATA_LANE1_HS_DIG_EN |
+ CSI_PHY_CNTL0_CLK0_LANE_HS_DIG_EN |
+ CSI_PHY_CNTL0_DATA_LANE2_HS_DIG_EN |
+ CSI_PHY_CNTL0_DATA_LANE3_HS_DIG_EN);
+
+ c3_mipi_csi_write(csi, CSI_PHY_CNTL1,
+ CSI_PHY_CNTL1_HS_EQ_CAP_SMALL |
+ CSI_PHY_CNTL1_HS_EQ_RES_MED |
+ CSI_PHY_CNTL1_CLK_CHN_EQ_MAX_GAIN |
+ CSI_PHY_CNTL1_DATA_CHN_EQ_MAX_GAIN |
+ CSI_PHY_CNTL1_COM_BG_EN |
+ CSI_PHY_CNTL1_HS_SYNC_EN);
+}
+
+static void c3_mipi_csi_cfg_dphy(struct c3_csi_device *csi, s64 rate)
+{
+ u32 val;
+ u32 settle;
+
+ /* Calculate the high speed settle */
+ val = DIV_ROUND_UP_ULL(1000000000, rate);
+ settle = (16 * val + 230) / 10;
+
+ c3_mipi_csi_write(csi, MIPI_PHY_CLK_LANE_CTRL,
+ MIPI_PHY_CLK_LANE_CTRL_HS_RX_EN |
+ MIPI_PHY_CLK_LANE_CTRL_END_EN |
+ MIPI_PHY_CLK_LANE_CTRL_LPEN_DIS |
+ MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_EN |
+ MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_8);
+
+ c3_mipi_csi_write(csi, MIPI_PHY_TCLK_MISS, MIPI_PHY_TCLK_MISS_CYCLES_9);
+ c3_mipi_csi_write(csi, MIPI_PHY_TCLK_SETTLE,
+ MIPI_PHY_TCLK_SETTLE_CYCLES_31);
+ c3_mipi_csi_write(csi, MIPI_PHY_THS_EXIT, MIPI_PHY_THS_EXIT_CYCLES_8);
+ c3_mipi_csi_write(csi, MIPI_PHY_THS_SKIP, MIPI_PHY_THS_SKIP_CYCLES_10);
+ c3_mipi_csi_write(csi, MIPI_PHY_THS_SETTLE, settle);
+ c3_mipi_csi_write(csi, MIPI_PHY_TINIT, MIPI_PHY_TINIT_CYCLES_20000);
+ c3_mipi_csi_write(csi, MIPI_PHY_TMBIAS, MIPI_PHY_TMBIAS_CYCLES_256);
+ c3_mipi_csi_write(csi, MIPI_PHY_TULPS_C, MIPI_PHY_TULPS_C_CYCLES_4096);
+ c3_mipi_csi_write(csi, MIPI_PHY_TULPS_S, MIPI_PHY_TULPS_S_CYCLES_256);
+ c3_mipi_csi_write(csi, MIPI_PHY_TLP_EN_W, MIPI_PHY_TLP_EN_W_CYCLES_12);
+ c3_mipi_csi_write(csi, MIPI_PHY_TLPOK, MIPI_PHY_TLPOK_CYCLES_256);
+ c3_mipi_csi_write(csi, MIPI_PHY_TWD_INIT,
+ MIPI_PHY_TWD_INIT_DOG_0X400000);
+ c3_mipi_csi_write(csi, MIPI_PHY_TWD_HS, MIPI_PHY_TWD_HS_DOG_0X400000);
+
+ c3_mipi_csi_write(csi, MIPI_PHY_DATA_LANE_CTRL1,
+ MIPI_PHY_DATA_LANE_CTRL1_INSERT_ERRESC |
+ MIPI_PHY_DATA_LANE_CTRL1_HS_SYNC_CHK_EN |
+ MIPI_PHY_DATA_LANE_CTRL1_PIPE_ALL_EN |
+ MIPI_PHY_DATA_LANE_CTRL1_PIPE_DELAY_3);
+
+ /* Set the order of lanes */
+ c3_mipi_csi_write(csi, MIPI_PHY_MUX_CTRL0,
+ MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE3 |
+ MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE2 |
+ MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE1 |
+ MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE0);
+
+ c3_mipi_csi_write(csi, MIPI_PHY_MUX_CTRL1,
+ MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN3 |
+ MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN2 |
+ MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN1 |
+ MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN0);
+
+ /* Enable digital data and clock lanes */
+ c3_mipi_csi_write(csi, MIPI_PHY_CTRL,
+ MIPI_PHY_CTRL_DATA_LANE0_EN |
+ MIPI_PHY_CTRL_DATA_LANE1_EN |
+ MIPI_PHY_CTRL_DATA_LANE2_EN |
+ MIPI_PHY_CTRL_DATA_LANE3_EN |
+ MIPI_PHY_CTRL_CLOCK_LANE_EN);
+}
+
+static void c3_mipi_csi_cfg_host(struct c3_csi_device *csi)
+{
+ /* Reset CSI-2 controller output */
+ c3_mipi_csi_write(csi, CSI2_HOST_CSI2_RESETN,
+ CSI2_HOST_CSI2_RESETN_ACTIVE);
+ c3_mipi_csi_write(csi, CSI2_HOST_CSI2_RESETN,
+ CSI2_HOST_CSI2_RESETN_EXIT);
+
+ /* Set data lane number */
+ c3_mipi_csi_write(csi, CSI2_HOST_N_LANES, csi->bus.num_data_lanes - 1);
+}
+
+static int c3_mipi_csi_start_stream(struct c3_csi_device *csi,
+ struct v4l2_subdev *src_sd)
+{
+ s64 link_freq;
+ s64 lane_rate;
+
+ link_freq = v4l2_get_link_freq(src_sd->ctrl_handler, 0, 0);
+ if (link_freq < 0) {
+ dev_err(csi->dev,
+ "Unable to obtain link frequency: %lld\n", link_freq);
+ return link_freq;
+ }
+
+ lane_rate = link_freq * 2;
+ if (lane_rate > 1500000000) {
+ dev_err(csi->dev, "Invalid lane rate: %lld\n", lane_rate);
+ return -EINVAL;
+ }
+
+ c3_mipi_csi_cfg_aphy(csi);
+ c3_mipi_csi_cfg_dphy(csi, lane_rate);
+ c3_mipi_csi_cfg_host(csi);
+
+ return 0;
+}
+
+static int c3_mipi_csi_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_csi_device *csi = v4l2_get_subdevdata(sd);
+ struct media_pad *sink_pad;
+ struct v4l2_subdev *src_sd;
+ int ret;
+
+ sink_pad = &csi->pads[C3_MIPI_CSI2_PAD_SINK];
+ csi->src_pad = media_pad_remote_pad_unique(sink_pad);
+ if (IS_ERR(csi->src_pad)) {
+ dev_dbg(csi->dev, "Failed to get source pad for MIPI CSI-2\n");
+ return -EPIPE;
+ }
+
+ src_sd = media_entity_to_v4l2_subdev(csi->src_pad->entity);
+
+ pm_runtime_resume_and_get(csi->dev);
+
+ c3_mipi_csi_start_stream(csi, src_sd);
+
+ ret = v4l2_subdev_enable_streams(src_sd, csi->src_pad->index, BIT(0));
+ if (ret) {
+ pm_runtime_put(csi->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int c3_mipi_csi_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_csi_device *csi = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *src_sd;
+
+ if (csi->src_pad) {
+ src_sd = media_entity_to_v4l2_subdev(csi->src_pad->entity);
+ v4l2_subdev_disable_streams(src_sd, csi->src_pad->index,
+ BIT(0));
+ }
+ csi->src_pad = NULL;
+
+ pm_runtime_put(csi->dev);
+
+ return 0;
+}
+
+static int c3_mipi_csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ switch (code->pad) {
+ case C3_MIPI_CSI2_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(c3_mipi_csi_formats))
+ return -EINVAL;
+
+ code->code = c3_mipi_csi_formats[code->index];
+ break;
+ case C3_MIPI_CSI2_PAD_SRC:
+ if (code->index)
+ return -EINVAL;
+
+ fmt = v4l2_subdev_state_get_format(state, code->pad);
+ code->code = fmt->code;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_mipi_csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ unsigned int i;
+
+ if (format->pad != C3_MIPI_CSI2_PAD_SINK)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ fmt = v4l2_subdev_state_get_format(state, format->pad);
+
+ for (i = 0; i < ARRAY_SIZE(c3_mipi_csi_formats); i++) {
+ if (format->format.code == c3_mipi_csi_formats[i]) {
+ fmt->code = c3_mipi_csi_formats[i];
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(c3_mipi_csi_formats))
+ fmt->code = c3_mipi_csi_formats[0];
+
+ fmt->width = clamp_t(u32, format->format.width,
+ C3_MIPI_CSI2_MIN_WIDTH, C3_MIPI_CSI2_MAX_WIDTH);
+ fmt->height = clamp_t(u32, format->format.height,
+ C3_MIPI_CSI2_MIN_HEIGHT, C3_MIPI_CSI2_MAX_HEIGHT);
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ format->format = *fmt;
+
+ /* Synchronize the format to source pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_MIPI_CSI2_PAD_SRC);
+ *fmt = format->format;
+
+ return 0;
+}
+
+static int c3_mipi_csi_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, C3_MIPI_CSI2_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(state, C3_MIPI_CSI2_PAD_SRC);
+
+ sink_fmt->width = C3_MIPI_CSI2_DEFAULT_WIDTH;
+ sink_fmt->height = C3_MIPI_CSI2_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = C3_MIPI_CSI2_DEFAULT_FMT;
+ sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ *src_fmt = *sink_fmt;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops c3_mipi_csi_pad_ops = {
+ .enum_mbus_code = c3_mipi_csi_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = c3_mipi_csi_set_fmt,
+ .enable_streams = c3_mipi_csi_enable_streams,
+ .disable_streams = c3_mipi_csi_disable_streams,
+};
+
+static const struct v4l2_subdev_ops c3_mipi_csi_subdev_ops = {
+ .pad = &c3_mipi_csi_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops c3_mipi_csi_internal_ops = {
+ .init_state = c3_mipi_csi_init_state,
+};
+
+/* Media entity operations */
+static const struct media_entity_operations c3_mipi_csi_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* PM runtime */
+
+static int c3_mipi_csi_runtime_suspend(struct device *dev)
+{
+ struct c3_csi_device *csi = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(csi->info->clock_num, csi->clks);
+
+ return 0;
+}
+
+static int c3_mipi_csi_runtime_resume(struct device *dev)
+{
+ struct c3_csi_device *csi = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(csi->info->clock_num, csi->clks);
+}
+
+static const struct dev_pm_ops c3_mipi_csi_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(c3_mipi_csi_runtime_suspend,
+ c3_mipi_csi_runtime_resume, NULL)
+};
+
+/* Probe/remove & platform driver */
+
+static int c3_mipi_csi_subdev_init(struct c3_csi_device *csi)
+{
+ struct v4l2_subdev *sd = &csi->sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &c3_mipi_csi_subdev_ops);
+ sd->owner = THIS_MODULE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &c3_mipi_csi_internal_ops;
+ snprintf(sd->name, sizeof(sd->name), "%s", MIPI_CSI2_SUBDEV_NAME);
+
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->entity.ops = &c3_mipi_csi_entity_ops;
+
+ sd->dev = csi->dev;
+ v4l2_set_subdevdata(sd, csi);
+
+ csi->pads[C3_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ csi->pads[C3_MIPI_CSI2_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, C3_MIPI_CSI2_PAD_MAX,
+ csi->pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret) {
+ media_entity_cleanup(&sd->entity);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void c3_mipi_csi_subdev_deinit(struct c3_csi_device *csi)
+{
+ v4l2_subdev_cleanup(&csi->sd);
+ media_entity_cleanup(&csi->sd.entity);
+}
+
+/* Subdev notifier register */
+static int c3_mipi_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct c3_csi_device *csi = v4l2_get_subdevdata(notifier->sd);
+ struct media_pad *sink = &csi->sd.entity.pads[C3_MIPI_CSI2_PAD_SINK];
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static const struct v4l2_async_notifier_operations c3_mipi_csi_notify_ops = {
+ .bound = c3_mipi_csi_notify_bound,
+};
+
+static int c3_mipi_csi_async_register(struct c3_csi_device *csi)
+{
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_subdev_nf_init(&csi->notifier, &csi->sd);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_put_handle;
+
+ csi->bus = vep.bus.mipi_csi2;
+
+ asc = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
+ struct v4l2_async_connection);
+ if (IS_ERR(asc)) {
+ ret = PTR_ERR(asc);
+ goto err_put_handle;
+ }
+
+ csi->notifier.ops = &c3_mipi_csi_notify_ops;
+ ret = v4l2_async_nf_register(&csi->notifier);
+ if (ret)
+ goto err_cleanup_nf;
+
+ ret = v4l2_async_register_subdev(&csi->sd);
+ if (ret)
+ goto err_unregister_nf;
+
+ fwnode_handle_put(ep);
+
+ return 0;
+
+err_unregister_nf:
+ v4l2_async_nf_unregister(&csi->notifier);
+err_cleanup_nf:
+ v4l2_async_nf_cleanup(&csi->notifier);
+err_put_handle:
+ fwnode_handle_put(ep);
+ return ret;
+}
+
+static void c3_mipi_csi_async_unregister(struct c3_csi_device *csi)
+{
+ v4l2_async_unregister_subdev(&csi->sd);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+}
+
+static int c3_mipi_csi_ioremap_resource(struct c3_csi_device *csi)
+{
+ struct device *dev = csi->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ csi->aphy = devm_platform_ioremap_resource_byname(pdev, "aphy");
+ if (IS_ERR(csi->aphy))
+ return PTR_ERR(csi->aphy);
+
+ csi->dphy = devm_platform_ioremap_resource_byname(pdev, "dphy");
+ if (IS_ERR(csi->dphy))
+ return PTR_ERR(csi->dphy);
+
+ csi->host = devm_platform_ioremap_resource_byname(pdev, "host");
+ if (IS_ERR(csi->host))
+ return PTR_ERR(csi->host);
+
+ return 0;
+}
+
+static int c3_mipi_csi_get_clocks(struct c3_csi_device *csi)
+{
+ const struct c3_csi_info *info = csi->info;
+
+ for (unsigned int i = 0; i < info->clock_num; i++)
+ csi->clks[i].id = info->clocks[i];
+
+ return devm_clk_bulk_get(csi->dev, info->clock_num, csi->clks);
+}
+
+static int c3_mipi_csi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct c3_csi_device *csi;
+ int ret;
+
+ csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->info = of_device_get_match_data(dev);
+ csi->dev = dev;
+
+ ret = c3_mipi_csi_ioremap_resource(csi);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to ioremap resource\n");
+
+ ret = c3_mipi_csi_get_clocks(csi);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ platform_set_drvdata(pdev, csi);
+
+ pm_runtime_enable(dev);
+
+ ret = c3_mipi_csi_subdev_init(csi);
+ if (ret)
+ goto err_disable_runtime_pm;
+
+ ret = c3_mipi_csi_async_register(csi);
+ if (ret)
+ goto err_deinit_subdev;
+
+ return 0;
+
+err_deinit_subdev:
+ c3_mipi_csi_subdev_deinit(csi);
+err_disable_runtime_pm:
+ pm_runtime_disable(dev);
+ return ret;
+};
+
+static void c3_mipi_csi_remove(struct platform_device *pdev)
+{
+ struct c3_csi_device *csi = platform_get_drvdata(pdev);
+
+ c3_mipi_csi_async_unregister(csi);
+ c3_mipi_csi_subdev_deinit(csi);
+
+ pm_runtime_disable(&pdev->dev);
+};
+
+static const struct c3_csi_info c3_mipi_csi_info = {
+ .clocks = {"vapb", "phy0"},
+ .clock_num = 2
+};
+
+static const struct of_device_id c3_mipi_csi_of_match[] = {
+ {
+ .compatible = "amlogic,c3-mipi-csi2",
+ .data = &c3_mipi_csi_info,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, c3_mipi_csi_of_match);
+
+static struct platform_driver c3_mipi_csi_driver = {
+ .probe = c3_mipi_csi_probe,
+ .remove = c3_mipi_csi_remove,
+ .driver = {
+ .name = "c3-mipi-csi2",
+ .of_match_table = c3_mipi_csi_of_match,
+ .pm = pm_ptr(&c3_mipi_csi_pm_ops),
+ },
+};
+
+module_platform_driver(c3_mipi_csi_driver);
+
+MODULE_AUTHOR("Keke Li <keke.li@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic C3 MIPI CSI-2 receiver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 6a38a0fa0e2d..85d518823159 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -805,7 +805,7 @@ static void vdec_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
cur_fmt = vpu_get_format(inst, inst->cap_format.type);
vbuf = &vpu_buf->m2m_buf.vb;
if (vbuf->vb2_buf.index != frame->id)
- dev_err(inst->dev, "[%d] buffer id(%d, %d) dismatch\n",
+ dev_err(inst->dev, "[%d] buffer id(%d, %d) mismatch\n",
inst->id, vbuf->vb2_buf.index, frame->id);
if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_READY && vdec->params.display_delay_enable)
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index 22f0da26ccec..1451549c9dd2 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -162,7 +162,6 @@ struct vpu_core {
struct delayed_work msg_delayed_work;
struct kfifo msg_fifo;
void *msg_buffer;
- unsigned int msg_buffer_size;
struct vpu_dev *vpu;
void *iface;
diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
index 8df85c14ab3f..da00f5fc0e5d 100644
--- a/drivers/media/platform/amphion/vpu_core.c
+++ b/drivers/media/platform/amphion/vpu_core.c
@@ -250,6 +250,7 @@ static void vpu_core_get_vpu(struct vpu_core *core)
static int vpu_core_register(struct device *dev, struct vpu_core *core)
{
struct vpu_dev *vpu = dev_get_drvdata(dev);
+ unsigned int buffer_size;
int ret = 0;
dev_dbg(core->dev, "register core %s\n", vpu_core_type_desc(core->type));
@@ -263,14 +264,14 @@ static int vpu_core_register(struct device *dev, struct vpu_core *core)
}
INIT_WORK(&core->msg_work, vpu_msg_run_work);
INIT_DELAYED_WORK(&core->msg_delayed_work, vpu_msg_delayed_work);
- core->msg_buffer_size = roundup_pow_of_two(VPU_MSG_BUFFER_SIZE);
- core->msg_buffer = vzalloc(core->msg_buffer_size);
+ buffer_size = roundup_pow_of_two(VPU_MSG_BUFFER_SIZE);
+ core->msg_buffer = vzalloc(buffer_size);
if (!core->msg_buffer) {
dev_err(core->dev, "failed allocate buffer for fifo\n");
ret = -ENOMEM;
goto error;
}
- ret = kfifo_init(&core->msg_fifo, core->msg_buffer, core->msg_buffer_size);
+ ret = kfifo_init(&core->msg_fifo, core->msg_buffer, buffer_size);
if (ret) {
dev_err(core->dev, "failed init kfifo\n");
goto error;
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index 4769c053c6c2..feca7d4220ed 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -3,6 +3,7 @@
* Copyright 2020-2021 NXP
*/
+#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/ioctl.h>
@@ -25,6 +26,10 @@
#include "vpu_imx8q.h"
#include "vpu_malone.h"
+static bool low_latency;
+module_param(low_latency, bool, 0644);
+MODULE_PARM_DESC(low_latency, "Set low latency frame flush mode: 0 (disable) or 1 (enable)");
+
#define CMD_SIZE 25600
#define MSG_SIZE 25600
#define CODEC_SIZE 0x1000
@@ -68,6 +73,12 @@
#define MALONE_DEC_FMT_RV_MASK BIT(21)
+#define MALONE_VERSION_MASK 0xFFFFF
+#define MALONE_VERSION(maj, min, inc) \
+ (FIELD_PREP(0xF0000, maj) | FIELD_PREP(0xFF00, min) | FIELD_PREP(0xFF, inc))
+#define CHECK_VERSION(iface, maj, min) \
+ (FIELD_GET(MALONE_VERSION_MASK, (iface)->fw_version) >= MALONE_VERSION(maj, min, 0))
+
enum vpu_malone_stream_input_mode {
INVALID_MODE = 0,
FRAME_LVL,
@@ -332,6 +343,8 @@ struct vpu_dec_ctrl {
u32 buf_addr[VID_API_NUM_STREAMS];
};
+static const struct malone_padding_scode *get_padding_scode(u32 type, u32 fmt);
+
u32 vpu_malone_get_data_size(void)
{
return sizeof(struct vpu_dec_ctrl);
@@ -654,9 +667,15 @@ static int vpu_malone_set_params(struct vpu_shared_addr *shared,
hc->jpg[instance].jpg_mjpeg_interlaced = 0;
}
- hc->codec_param[instance].disp_imm = params->display_delay_enable ? 1 : 0;
- if (malone_format != MALONE_FMT_AVC)
+ if (params->display_delay_enable &&
+ get_padding_scode(SCODE_PADDING_BUFFLUSH, params->codec_format))
+ hc->codec_param[instance].disp_imm = 1;
+ else
+ hc->codec_param[instance].disp_imm = 0;
+
+ if (params->codec_format == V4L2_PIX_FMT_HEVC && !CHECK_VERSION(iface, 1, 9))
hc->codec_param[instance].disp_imm = 0;
+
hc->codec_param[instance].dbglog_enable = 0;
iface->dbglog_desc.level = 0;
@@ -1023,6 +1042,7 @@ static const struct malone_padding_scode padding_scodes[] = {
{SCODE_PADDING_EOS, V4L2_PIX_FMT_JPEG, {0x0, 0x0}},
{SCODE_PADDING_BUFFLUSH, V4L2_PIX_FMT_H264, {0x15010000, 0x0}},
{SCODE_PADDING_BUFFLUSH, V4L2_PIX_FMT_H264_MVC, {0x15010000, 0x0}},
+ {SCODE_PADDING_BUFFLUSH, V4L2_PIX_FMT_HEVC, {0x3e010000, 0x20}},
};
static const struct malone_padding_scode padding_scode_dft = {0x0, 0x0};
@@ -1057,8 +1077,11 @@ static int vpu_malone_add_padding_scode(struct vpu_buffer *stream_buffer,
int ret;
ps = get_padding_scode(scode_type, pixelformat);
- if (!ps)
+ if (!ps) {
+ if (scode_type == SCODE_PADDING_BUFFLUSH)
+ return 0;
return -EINVAL;
+ }
wptr = readl(&str_buf->wptr);
if (wptr < stream_buffer->phys || wptr > stream_buffer->phys + stream_buffer->length)
@@ -1562,7 +1585,15 @@ static int vpu_malone_input_frame_data(struct vpu_malone_str_buffer __iomem *str
vpu_malone_update_wptr(str_buf, wptr);
- if (disp_imm && !vpu_vb_is_codecconfig(vbuf)) {
+ /*
+ * Enable the low latency flush mode if display delay is set to 0
+ * or the low latency frame flush mode if it is set to 1.
+ * The low latency flush mode requires some padding data to be appended to each frame,
+ * but there must not be any padding data between the sequence header and the frame.
+ * This module is currently only supported for the H264 and HEVC formats,
+ * for other formats, vpu_malone_add_scode() will return 0.
+ */
+ if ((disp_imm || low_latency) && !vpu_vb_is_codecconfig(vbuf)) {
ret = vpu_malone_add_scode(inst->core->iface,
inst->id,
&inst->stream_buffer,
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 0d1c39347529..a05a744cbb75 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -1072,16 +1072,12 @@ static int isi_formats_init(struct atmel_isi *isi)
return -ENXIO;
isi->num_user_formats = num_fmts;
- isi->user_formats = devm_kcalloc(isi->dev,
- num_fmts, sizeof(struct isi_format *),
- GFP_KERNEL);
+ isi->user_formats = devm_kmemdup_array(isi->dev, isi_fmts, num_fmts,
+ sizeof(*isi_fmts), GFP_KERNEL);
if (!isi->user_formats)
return -ENOMEM;
- memcpy(isi->user_formats, isi_fmts,
- num_fmts * sizeof(struct isi_format *));
isi->current_fmt = isi->user_formats[0];
-
return 0;
}
diff --git a/drivers/media/platform/imagination/e5010-jpeg-enc.c b/drivers/media/platform/imagination/e5010-jpeg-enc.c
index c194f830577f..ae868d9f73e1 100644
--- a/drivers/media/platform/imagination/e5010-jpeg-enc.c
+++ b/drivers/media/platform/imagination/e5010-jpeg-enc.c
@@ -1057,8 +1057,11 @@ static int e5010_probe(struct platform_device *pdev)
e5010->vdev->lock = &e5010->mutex;
ret = v4l2_device_register(dev, &e5010->v4l2_dev);
- if (ret)
- return dev_err_probe(dev, ret, "failed to register v4l2 device\n");
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register v4l2 device\n");
+ goto fail_after_video_device_alloc;
+ }
+
e5010->m2m_dev = v4l2_m2m_init(&e5010_m2m_ops);
if (IS_ERR(e5010->m2m_dev)) {
@@ -1118,6 +1121,8 @@ fail_after_video_register_device:
v4l2_m2m_release(e5010->m2m_dev);
fail_after_v4l2_register:
v4l2_device_unregister(&e5010->v4l2_dev);
+fail_after_video_device_alloc:
+ video_device_release(e5010->vdev);
return ret;
}
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index 834d2a354692..7eb12449b63a 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -1026,6 +1026,7 @@ static void mtk_jpeg_dec_device_run(void *priv)
spin_lock_irqsave(&jpeg->hw_lock, flags);
mtk_jpeg_dec_reset(jpeg->reg_base);
mtk_jpeg_dec_set_config(jpeg->reg_base,
+ jpeg->variant->support_34bit,
&jpeg_src_buf->dec_param,
jpeg_src_buf->bs_size,
&bs,
@@ -1570,7 +1571,8 @@ static irqreturn_t mtk_jpeg_enc_done(struct mtk_jpeg_dev *jpeg)
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base);
+ result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base,
+ jpeg->variant->support_34bit);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, result_size);
buf_state = VB2_BUF_STATE_DONE;
@@ -1770,6 +1772,7 @@ retry_select:
ctx->total_frame_num++;
mtk_jpeg_dec_reset(comp_jpeg[hw_id]->reg_base);
mtk_jpeg_dec_set_config(comp_jpeg[hw_id]->reg_base,
+ jpeg->variant->support_34bit,
&jpeg_src_buf->dec_param,
jpeg_src_buf->bs_size,
&bs,
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
index 8877eb39e807..02ed0ed5b736 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
@@ -34,6 +34,8 @@
#define MTK_JPEG_MAX_EXIF_SIZE (64 * 1024)
+#define MTK_JPEG_ADDR_MASK GENMASK(1, 0)
+
/**
* enum mtk_jpeg_ctx_state - states of the context state machine
* @MTK_JPEG_INIT: current state is initialized
@@ -62,6 +64,7 @@ enum mtk_jpeg_ctx_state {
* @cap_q_default_fourcc: capture queue default fourcc
* @multi_core: mark jpeg hw is multi_core or not
* @jpeg_worker: jpeg dec or enc worker
+ * @support_34bit: flag to check support for 34-bit DMA address
*/
struct mtk_jpeg_variant {
struct clk_bulk_data *clks;
@@ -78,6 +81,7 @@ struct mtk_jpeg_variant {
u32 cap_q_default_fourcc;
bool multi_core;
void (*jpeg_worker)(struct work_struct *work);
+ bool support_34bit;
};
struct mtk_jpeg_src_buf {
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
index 2c5d74939d0a..e78e1d11093c 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
@@ -5,6 +5,8 @@
* Rick Chang <rick.chang@mediatek.com>
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -279,23 +281,43 @@ static void mtk_jpeg_dec_set_brz_factor(void __iomem *base, u8 yscale_w,
writel(val, base + JPGDEC_REG_BRZ_FACTOR);
}
-static void mtk_jpeg_dec_set_dst_bank0(void __iomem *base, u32 addr_y,
- u32 addr_u, u32 addr_v)
+static void mtk_jpeg_dec_set_dst_bank0(void __iomem *base, bool support_34bit,
+ dma_addr_t addr_y, dma_addr_t addr_u, dma_addr_t addr_v)
{
+ u32 val;
+
mtk_jpeg_verify_align(addr_y, 16, JPGDEC_REG_DEST_ADDR0_Y);
- writel(addr_y, base + JPGDEC_REG_DEST_ADDR0_Y);
+ writel(lower_32_bits(addr_y), base + JPGDEC_REG_DEST_ADDR0_Y);
mtk_jpeg_verify_align(addr_u, 16, JPGDEC_REG_DEST_ADDR0_U);
- writel(addr_u, base + JPGDEC_REG_DEST_ADDR0_U);
+ writel(lower_32_bits(addr_u), base + JPGDEC_REG_DEST_ADDR0_U);
mtk_jpeg_verify_align(addr_v, 16, JPGDEC_REG_DEST_ADDR0_V);
- writel(addr_v, base + JPGDEC_REG_DEST_ADDR0_V);
+ writel(lower_32_bits(addr_v), base + JPGDEC_REG_DEST_ADDR0_V);
+ if (support_34bit) {
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_y));
+ writel(val, base + JPGDEC_REG_DEST_ADDR0_Y_EXT);
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_u));
+ writel(val, base + JPGDEC_REG_DEST_ADDR0_U_EXT);
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_v));
+ writel(val, base + JPGDEC_REG_DEST_ADDR0_V_EXT);
+ }
}
-static void mtk_jpeg_dec_set_dst_bank1(void __iomem *base, u32 addr_y,
- u32 addr_u, u32 addr_v)
+static void mtk_jpeg_dec_set_dst_bank1(void __iomem *base, bool support_34bit,
+ dma_addr_t addr_y, dma_addr_t addr_u, dma_addr_t addr_v)
{
- writel(addr_y, base + JPGDEC_REG_DEST_ADDR1_Y);
- writel(addr_u, base + JPGDEC_REG_DEST_ADDR1_U);
- writel(addr_v, base + JPGDEC_REG_DEST_ADDR1_V);
+ u32 val;
+
+ writel(lower_32_bits(addr_y), base + JPGDEC_REG_DEST_ADDR1_Y);
+ writel(lower_32_bits(addr_u), base + JPGDEC_REG_DEST_ADDR1_U);
+ writel(lower_32_bits(addr_v), base + JPGDEC_REG_DEST_ADDR1_V);
+ if (support_34bit) {
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_y));
+ writel(val, base + JPGDEC_REG_DEST_ADDR1_Y_EXT);
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_u));
+ writel(val, base + JPGDEC_REG_DEST_ADDR1_U_EXT);
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_v));
+ writel(val, base + JPGDEC_REG_DEST_ADDR1_V_EXT);
+ }
}
static void mtk_jpeg_dec_set_mem_stride(void __iomem *base, u32 stride_y,
@@ -322,18 +344,30 @@ static void mtk_jpeg_dec_set_dec_mode(void __iomem *base, u32 mode)
writel(mode & 0x03, base + JPGDEC_REG_OPERATION_MODE);
}
-static void mtk_jpeg_dec_set_bs_write_ptr(void __iomem *base, u32 ptr)
+static void mtk_jpeg_dec_set_bs_write_ptr(void __iomem *base, bool support_34bit, dma_addr_t ptr)
{
+ u32 val;
+
mtk_jpeg_verify_align(ptr, 16, JPGDEC_REG_FILE_BRP);
- writel(ptr, base + JPGDEC_REG_FILE_BRP);
+ writel(lower_32_bits(ptr), base + JPGDEC_REG_FILE_BRP);
+ if (support_34bit) {
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(ptr));
+ writel(val, base + JPGDEC_REG_FILE_BRP_EXT);
+ }
}
-static void mtk_jpeg_dec_set_bs_info(void __iomem *base, u32 addr, u32 size,
- u32 bitstream_size)
+static void mtk_jpeg_dec_set_bs_info(void __iomem *base, bool support_34bit,
+ dma_addr_t addr, u32 size, u32 bitstream_size)
{
+ u32 val;
+
mtk_jpeg_verify_align(addr, 16, JPGDEC_REG_FILE_ADDR);
mtk_jpeg_verify_align(size, 128, JPGDEC_REG_FILE_TOTAL_SIZE);
- writel(addr, base + JPGDEC_REG_FILE_ADDR);
+ writel(lower_32_bits(addr), base + JPGDEC_REG_FILE_ADDR);
+ if (support_34bit) {
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr));
+ writel(val, base + JPGDEC_REG_FILE_ADDR_EXT);
+ }
writel(size, base + JPGDEC_REG_FILE_TOTAL_SIZE);
writel(bitstream_size, base + JPGDEC_REG_BIT_STREAM_SIZE);
}
@@ -404,6 +438,7 @@ static void mtk_jpeg_dec_set_sampling_factor(void __iomem *base, u32 comp_num,
}
void mtk_jpeg_dec_set_config(void __iomem *base,
+ bool support_34bits,
struct mtk_jpeg_dec_param *cfg,
u32 bitstream_size,
struct mtk_jpeg_bs *bs,
@@ -413,8 +448,8 @@ void mtk_jpeg_dec_set_config(void __iomem *base,
mtk_jpeg_dec_set_dec_mode(base, 0);
mtk_jpeg_dec_set_comp0_du(base, cfg->unit_num);
mtk_jpeg_dec_set_total_mcu(base, cfg->total_mcu);
- mtk_jpeg_dec_set_bs_info(base, bs->str_addr, bs->size, bitstream_size);
- mtk_jpeg_dec_set_bs_write_ptr(base, bs->end_addr);
+ mtk_jpeg_dec_set_bs_info(base, support_34bits, bs->str_addr, bs->size, bitstream_size);
+ mtk_jpeg_dec_set_bs_write_ptr(base, support_34bits, bs->end_addr);
mtk_jpeg_dec_set_du_membership(base, cfg->membership, 1,
(cfg->comp_num == 1) ? 1 : 0);
mtk_jpeg_dec_set_comp_id(base, cfg->comp_id[0], cfg->comp_id[1],
@@ -432,9 +467,9 @@ void mtk_jpeg_dec_set_config(void __iomem *base,
cfg->mem_stride[1]);
mtk_jpeg_dec_set_img_stride(base, cfg->img_stride[0],
cfg->img_stride[1]);
- mtk_jpeg_dec_set_dst_bank0(base, fb->plane_addr[0],
+ mtk_jpeg_dec_set_dst_bank0(base, support_34bits, fb->plane_addr[0],
fb->plane_addr[1], fb->plane_addr[2]);
- mtk_jpeg_dec_set_dst_bank1(base, 0, 0, 0);
+ mtk_jpeg_dec_set_dst_bank1(base, support_34bits, 0, 0, 0);
mtk_jpeg_dec_set_dma_group(base, cfg->dma_mcu, cfg->dma_group,
cfg->dma_last_mcu);
mtk_jpeg_dec_set_pause_mcu_idx(base, cfg->total_mcu);
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h
index 8c31c6b12417..2948c9c300a4 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h
@@ -71,6 +71,7 @@ int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param);
u32 mtk_jpeg_dec_get_int_status(void __iomem *dec_reg_base);
u32 mtk_jpeg_dec_enum_result(u32 irq_result);
void mtk_jpeg_dec_set_config(void __iomem *base,
+ bool support_34bits,
struct mtk_jpeg_dec_param *cfg,
u32 bitstream_size,
struct mtk_jpeg_bs *bs,
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h
index 27b7711ca341..e94f52de7c69 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h
@@ -46,5 +46,13 @@
#define JPGDEC_REG_INTERRUPT_STATUS 0x0274
#define JPGDEC_REG_STATUS 0x0278
#define JPGDEC_REG_BIT_STREAM_SIZE 0x0344
+#define JPGDEC_REG_DEST_ADDR0_Y_EXT 0x0360
+#define JPGDEC_REG_DEST_ADDR0_U_EXT 0x0364
+#define JPGDEC_REG_DEST_ADDR0_V_EXT 0x0368
+#define JPGDEC_REG_DEST_ADDR1_Y_EXT 0x036c
+#define JPGDEC_REG_DEST_ADDR1_U_EXT 0x0370
+#define JPGDEC_REG_DEST_ADDR1_V_EXT 0x0374
+#define JPGDEC_REG_FILE_ADDR_EXT 0x0378
+#define JPGDEC_REG_FILE_BRP_EXT 0x037c
#endif /* _MTK_JPEG_REG_H */
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
index f8fa3b841ccf..9ab27aee302a 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
@@ -5,6 +5,8 @@
*
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -62,9 +64,9 @@ void mtk_jpeg_enc_reset(void __iomem *base)
}
EXPORT_SYMBOL_GPL(mtk_jpeg_enc_reset);
-u32 mtk_jpeg_enc_get_file_size(void __iomem *base)
+u32 mtk_jpeg_enc_get_file_size(void __iomem *base, bool support_34bit)
{
- return readl(base + JPEG_ENC_DMA_ADDR0) -
+ return (readl(base + JPEG_ENC_DMA_ADDR0) << ((support_34bit) ? 2 : 0)) -
readl(base + JPEG_ENC_DST_ADDR0);
}
EXPORT_SYMBOL_GPL(mtk_jpeg_enc_get_file_size);
@@ -84,14 +86,24 @@ void mtk_jpeg_set_enc_src(struct mtk_jpeg_ctx *ctx, void __iomem *base,
{
int i;
dma_addr_t dma_addr;
+ u32 addr_ext;
+ bool support_34bit = ctx->jpeg->variant->support_34bit;
for (i = 0; i < src_buf->num_planes; i++) {
dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, i) +
src_buf->planes[i].data_offset;
- if (!i)
- writel(dma_addr, base + JPEG_ENC_SRC_LUMA_ADDR);
+ if (i == 0)
+ writel(lower_32_bits(dma_addr), base + JPEG_ENC_SRC_LUMA_ADDR);
else
- writel(dma_addr, base + JPEG_ENC_SRC_CHROMA_ADDR);
+ writel(lower_32_bits(dma_addr), base + JPEG_ENC_SRC_CHROMA_ADDR);
+
+ if (support_34bit) {
+ addr_ext = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(dma_addr));
+ if (i == 0)
+ writel(addr_ext, base + JPEG_ENC_SRC_LUMA_ADDR_EXT);
+ else
+ writel(addr_ext, base + JPEG_ENC_SRC_CHRO_ADDR_EXT);
+ }
}
}
EXPORT_SYMBOL_GPL(mtk_jpeg_set_enc_src);
@@ -103,6 +115,8 @@ void mtk_jpeg_set_enc_dst(struct mtk_jpeg_ctx *ctx, void __iomem *base,
size_t size;
u32 dma_addr_offset;
u32 dma_addr_offsetmask;
+ u32 addr_ext;
+ bool support_34bit = ctx->jpeg->variant->support_34bit;
dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
dma_addr_offset = ctx->enable_exif ? MTK_JPEG_MAX_EXIF_SIZE : 0;
@@ -113,6 +127,12 @@ void mtk_jpeg_set_enc_dst(struct mtk_jpeg_ctx *ctx, void __iomem *base,
writel(dma_addr_offsetmask & 0xf, base + JPEG_ENC_BYTE_OFFSET_MASK);
writel(dma_addr & ~0xf, base + JPEG_ENC_DST_ADDR0);
writel((dma_addr + size) & ~0xf, base + JPEG_ENC_STALL_ADDR0);
+
+ if (support_34bit) {
+ addr_ext = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(dma_addr));
+ writel(addr_ext, base + JPEG_ENC_DEST_ADDR0_EXT);
+ writel(addr_ext + size, base + JPEG_ENC_STALL_ADDR0_EXT);
+ }
}
EXPORT_SYMBOL_GPL(mtk_jpeg_set_enc_dst);
@@ -278,7 +298,8 @@ static irqreturn_t mtk_jpegenc_hw_irq_handler(int irq, void *priv)
if (!(irq_status & JPEG_ENC_INT_STATUS_DONE))
dev_warn(jpeg->dev, "Jpg Enc occurs unknown Err.");
- result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base);
+ result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base,
+ ctx->jpeg->variant->support_34bit);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, result_size);
buf_state = VB2_BUF_STATE_DONE;
v4l2_m2m_buf_done(src_buf, buf_state);
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h
index 61c60e4e58ea..31ec9030ae88 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h
@@ -68,6 +68,11 @@
#define JPEG_ENC_DCM_CTRL 0x300
#define JPEG_ENC_CODEC_SEL 0x314
#define JPEG_ENC_ULTRA_THRES 0x318
+#define JPEG_ENC_SRC_LUMA_ADDR_EXT 0x584
+#define JPEG_ENC_SRC_CHRO_ADDR_EXT 0x588
+#define JPEG_ENC_Q_TBL_ADDR_EXT 0x58C
+#define JPEG_ENC_DEST_ADDR0_EXT 0x590
+#define JPEG_ENC_STALL_ADDR0_EXT 0x594
/**
* struct mtk_jpeg_enc_qlt - JPEG encoder quality data
@@ -80,7 +85,7 @@ struct mtk_jpeg_enc_qlt {
};
void mtk_jpeg_enc_reset(void __iomem *base);
-u32 mtk_jpeg_enc_get_file_size(void __iomem *base);
+u32 mtk_jpeg_enc_get_file_size(void __iomem *base, bool support_34bit);
void mtk_jpeg_enc_start(void __iomem *enc_reg_base);
void mtk_jpeg_set_enc_src(struct mtk_jpeg_ctx *ctx, void __iomem *base,
struct vb2_buffer *src_buf);
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
index 28c998bd3a81..d0fd77dcf8e2 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
@@ -342,7 +342,7 @@ static int mtk_mdp_try_crop(struct mtk_mdp_ctx *ctx, u32 type,
if (r->left & 1)
r->left -= 1;
- mtk_mdp_dbg(2, "[%d] crop l,t,w,h:%d,%d,%d,%d, max:%dx%d", ctx->id,
+ mtk_mdp_dbg(2, "[%d] crop (%d,%d)/%ux%u, max:%dx%d", ctx->id,
r->left, r->top, r->width,
r->height, max_w, max_h);
return 0;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
index 935ae9825728..222611e03a06 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
@@ -12,8 +12,6 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk-img-ipi.h"
-struct platform_device *mdp_get_plat_device(struct platform_device *pdev);
-
struct mdp_cmdq_param {
struct img_config *config;
struct img_ipi_frameparam *param;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index f571f561f070..8de2c8e4d333 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -79,25 +79,6 @@ static struct platform_device *__get_pdev_by_id(struct platform_device *pdev,
return mdp_pdev;
}
-struct platform_device *mdp_get_plat_device(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *mdp_node;
- struct platform_device *mdp_pdev;
-
- mdp_node = of_parse_phandle(dev->of_node, MDP_PHANDLE_NAME, 0);
- if (!mdp_node) {
- dev_err(dev, "can't get node %s\n", MDP_PHANDLE_NAME);
- return NULL;
- }
-
- mdp_pdev = of_find_device_by_node(mdp_node);
- of_node_put(mdp_node);
-
- return mdp_pdev;
-}
-EXPORT_SYMBOL_GPL(mdp_get_plat_device);
-
int mdp_vpu_get_locked(struct mdp_dev *mdp)
{
int ret = 0;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
index 657356f87743..644b223b2877 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
@@ -236,7 +236,7 @@ int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
u32 framew, frameh, walign, halign;
int ret;
- dev_dbg(dev, "%d target:%d, set:(%d,%d) %ux%u", ctx->id,
+ dev_dbg(dev, "%d target:%d, set:(%d,%d)/%ux%u", ctx->id,
s->target, s->r.left, s->r.top, s->r.width, s->r.height);
left = s->r.left;
@@ -275,7 +275,7 @@ int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
r->width = right - left;
r->height = bottom - top;
- dev_dbg(dev, "%d crop:(%d,%d) %ux%u", ctx->id,
+ dev_dbg(dev, "%d crop:(%d,%d)/%ux%u", ctx->id,
r->left, r->top, r->width, r->height);
return 0;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
index ac568ed14fa2..aececca7ecf8 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -17,6 +17,7 @@
#define IS_VDEC_LAT_ARCH(hw_arch) ((hw_arch) >= MTK_VDEC_LAT_SINGLE_CORE)
#define IS_VDEC_INNER_RACING(capability) ((capability) & MTK_VCODEC_INNER_RACING)
+#define IS_VDEC_SUPPORT_EXT(capability) ((capability) & MTK_VDEC_IS_SUPPORT_EXT)
enum mtk_vcodec_dec_chip_name {
MTK_VDEC_INVAL = 0,
@@ -42,6 +43,7 @@ enum mtk_vdec_format_types {
MTK_VDEC_FORMAT_HEVC_FRAME = 0x1000,
MTK_VCODEC_INNER_RACING = 0x20000,
MTK_VDEC_IS_SUPPORT_10BIT = 0x40000,
+ MTK_VDEC_IS_SUPPORT_EXT = 0x80000,
};
/*
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
index afa224da0f41..d873159b9b30 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
@@ -152,8 +152,6 @@ static const struct mtk_stateless_control mtk_stateless_controls[] = {
.id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
.def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
.max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
- .menu_skip_mask =
- BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE),
},
.codec_type = V4L2_PIX_FMT_HEVC_SLICE,
},
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
index 1ed0ccec5665..5b25e1679b51 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
@@ -30,6 +30,7 @@ enum vdec_h264_core_dec_err_type {
/**
* struct vdec_h264_slice_lat_dec_param - parameters for decode current frame
+ * (shared data between host and firmware)
*
* @sps: h264 sps syntax parameters
* @pps: h264 pps syntax parameters
@@ -48,7 +49,7 @@ struct vdec_h264_slice_lat_dec_param {
};
/**
- * struct vdec_h264_slice_info - decode information
+ * struct vdec_h264_slice_info - decode information (shared data between host and firmware)
*
* @nal_info: nal info of current picture
* @timeout: Decode timeout: 1 timeout, 0 no timeout
@@ -72,23 +73,22 @@ struct vdec_h264_slice_info {
/**
* struct vdec_h264_slice_vsi - shared memory for decode information exchange
- * between SCP and Host.
+ * between SCP and Host (shared data between host and firmware).
*
- * @wdma_err_addr: wdma error dma address
- * @wdma_start_addr: wdma start dma address
- * @wdma_end_addr: wdma end dma address
- * @slice_bc_start_addr: slice bc start dma address
- * @slice_bc_end_addr: slice bc end dma address
- * @row_info_start_addr: row info start dma address
- * @row_info_end_addr: row info end dma address
- * @trans_start: trans start dma address
- * @trans_end: trans end dma address
- * @wdma_end_addr_offset: wdma end address offset
+ * @wdma_err_addr: wdma error dma address
+ * @wdma_start_addr: wdma start dma address
+ * @wdma_end_addr: wdma end dma address
+ * @slice_bc_start_addr: slice bc start dma address
+ * @slice_bc_end_addr: slice bc end dma address
+ * @row_info_start_addr: row info start dma address
+ * @row_info_end_addr: row info end dma address
+ * @trans_start: trans start dma address
+ * @trans_end: trans end dma address
+ * @wdma_end_addr_offset: wdma end address offset
*
- * @mv_buf_dma: HW working motion vector buffer
- * dma address (AP-W, VPU-R)
- * @dec: decode information (AP-R, VPU-W)
- * @h264_slice_params: decode parameters for hw used
+ * @mv_buf_dma: HW working motion vector buffer
+ * @dec: decode information (AP-R, VPU-W)
+ * @h264_slice_params: decode parameters for hw used
*/
struct vdec_h264_slice_vsi {
/* LAT dec addr */
@@ -112,12 +112,12 @@ struct vdec_h264_slice_vsi {
* struct vdec_h264_slice_share_info - shared information used to exchange
* message between lat and core
*
- * @sps: sequence header information from user space
- * @dec_params: decoder params from user space
- * @h264_slice_params: decoder params used for hardware
- * @trans_start: trans start dma address
- * @trans_end: trans end dma address
- * @nal_info: nal info of current picture
+ * @sps: sequence header information from user space
+ * @dec_params: decoder params from user space
+ * @h264_slice_params: decoder params used for hardware
+ * @trans_start: trans start dma address
+ * @trans_end: trans end dma address
+ * @nal_info: nal info of current picture
*/
struct vdec_h264_slice_share_info {
struct v4l2_ctrl_h264_sps sps;
@@ -128,6 +128,86 @@ struct vdec_h264_slice_share_info {
u16 nal_info;
};
+/*
+ * struct vdec_h264_slice_mem - memory address and size
+ * (shared data between host and firmware)
+ */
+struct vdec_h264_slice_mem {
+ union {
+ u64 buf;
+ u64 dma_addr;
+ };
+ union {
+ size_t size;
+ u64 dma_addr_end;
+ };
+};
+
+/**
+ * struct vdec_h264_slice_fb - frame buffer for decoding
+ * (shared data between host and firmware)
+ *
+ * @y: current luma buffer address info
+ * @c: current chroma buffer address info
+ */
+struct vdec_h264_slice_fb {
+ struct vdec_h264_slice_mem y;
+ struct vdec_h264_slice_mem c;
+};
+
+/**
+ * struct vdec_h264_slice_info_ext - extend decode information
+ * (shared data between host and firmware)
+ *
+ * @wdma_end_addr_offset: offset from buffer start
+ * @nal_info: nal info of current picture
+ * @timeout: toggles whether a decode operation is timeout
+ * @reserved: reserved
+ * @vdec_fb_va: vdec frame buffer struct virtual address
+ * @crc: displays the hardware status
+ */
+struct vdec_h264_slice_info_ext {
+ u64 wdma_end_addr_offset;
+ u16 nal_info;
+ u16 timeout;
+ u32 reserved;
+ u64 vdec_fb_va;
+ u32 crc[8];
+};
+
+/**
+ * struct vdec_h264_slice_vsi_ext - extend shared memory for decode information exchange
+ * between SCP and Host (shared data between host and firmware).
+ *
+ * @bs: input buffer info
+ * @fb: current y/c buffer
+ *
+ * @ube: buffer used to share date between lat and core
+ * @trans: transcoded buffer used for core decode
+ * @row_info: row info buffer
+ * @err_map: error map buffer
+ * @slice_bc: slice buffer
+ *
+ * @mv_buf_dma: store hardware motion vector data
+ * @dec: decode information (AP-R, VPU-W)
+ * @h264_slice_params: decode parameters used for the hw
+ */
+struct vdec_h264_slice_vsi_ext {
+ /* LAT dec addr */
+ struct vdec_h264_slice_mem bs;
+ struct vdec_h264_slice_fb fb;
+
+ struct vdec_h264_slice_mem ube;
+ struct vdec_h264_slice_mem trans;
+ struct vdec_h264_slice_mem row_info;
+ struct vdec_h264_slice_mem err_map;
+ struct vdec_h264_slice_mem slice_bc;
+
+ struct vdec_h264_slice_mem mv_buf_dma[H264_MAX_MV_NUM];
+ struct vdec_h264_slice_info_ext dec;
+ struct vdec_h264_slice_lat_dec_param h264_slice_params;
+};
+
/**
* struct vdec_h264_slice_inst - h264 decoder instance
*
@@ -138,17 +218,21 @@ struct vdec_h264_slice_share_info {
* @vpu: VPU instance
* @vsi: vsi used for lat
* @vsi_core: vsi used for core
- *
- * @vsi_ctx: Local VSI data for this decoding context
+ * @vsi_ctx: vsi data for this decoding context
+ * @vsi_ext: extended vsi used for lat
+ * @vsi_core_ext: extended vsi used for core
+ * @vsi_ctx_ext: extended vsi data for this decoding context
* @h264_slice_param: the parameters that hardware use to decode
*
- * @resolution_changed:resolution changed
+ * @resolution_changed: resolution changed
* @realloc_mv_buf: reallocate mv buffer
* @cap_num_planes: number of capture queue plane
*
* @dpb: decoded picture buffer used to store reference
* buffer information
- *@is_field_bitstream: is field bitstream
+ * @is_field_bitstream: not support field bitstream, only support frame
+ *
+ * @decode: lat decoder pointer for different architectures
*/
struct vdec_h264_slice_inst {
unsigned int slice_dec_num;
@@ -156,10 +240,18 @@ struct vdec_h264_slice_inst {
struct mtk_vcodec_mem pred_buf;
struct mtk_vcodec_mem mv_buf[H264_MAX_MV_NUM];
struct vdec_vpu_inst vpu;
- struct vdec_h264_slice_vsi *vsi;
- struct vdec_h264_slice_vsi *vsi_core;
-
- struct vdec_h264_slice_vsi vsi_ctx;
+ union {
+ struct {
+ struct vdec_h264_slice_vsi *vsi;
+ struct vdec_h264_slice_vsi *vsi_core;
+ struct vdec_h264_slice_vsi vsi_ctx;
+ };
+ struct {
+ struct vdec_h264_slice_vsi_ext *vsi_ext;
+ struct vdec_h264_slice_vsi_ext *vsi_core_ext;
+ struct vdec_h264_slice_vsi_ext vsi_ctx_ext;
+ };
+ };
struct vdec_h264_slice_lat_dec_param h264_slice_param;
unsigned int resolution_changed;
@@ -168,12 +260,15 @@ struct vdec_h264_slice_inst {
struct v4l2_h264_dpb_entry dpb[16];
bool is_field_bitstream;
+
+ int (*decode)(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *unused, bool *res_chg);
};
static int vdec_h264_slice_fill_decode_parameters(struct vdec_h264_slice_inst *inst,
- struct vdec_h264_slice_share_info *share_info)
+ struct vdec_h264_slice_share_info *share_info,
+ struct vdec_h264_slice_lat_dec_param *slice_param)
{
- struct vdec_h264_slice_lat_dec_param *slice_param = &inst->vsi->h264_slice_params;
const struct v4l2_ctrl_h264_decode_params *dec_params;
const struct v4l2_ctrl_h264_scaling_matrix *src_matrix;
const struct v4l2_ctrl_h264_sps *sps;
@@ -266,9 +361,6 @@ static int get_vdec_sig_decode_parameters(struct vdec_h264_slice_inst *inst)
mtk_vdec_h264_get_ref_list(b0_reflist, v4l2_b0_reflist, reflist_builder.num_valid);
mtk_vdec_h264_get_ref_list(b1_reflist, v4l2_b1_reflist, reflist_builder.num_valid);
- memcpy(&inst->vsi_ctx.h264_slice_params, slice_param,
- sizeof(inst->vsi_ctx.h264_slice_params));
-
return 0;
}
@@ -392,68 +484,148 @@ static void vdec_h264_slice_get_crop_info(struct vdec_h264_slice_inst *inst,
cr->left, cr->top, cr->width, cr->height);
}
-static int vdec_h264_slice_init(struct mtk_vcodec_dec_ctx *ctx)
+static void vdec_h264_slice_setup_lat_buffer_ext(struct vdec_h264_slice_inst *inst,
+ struct mtk_vcodec_mem *bs,
+ struct vdec_lat_buf *lat_buf)
{
- struct vdec_h264_slice_inst *inst;
- int err, vsi_size;
+ struct mtk_vcodec_mem *mem;
+ int i;
- inst = kzalloc(sizeof(*inst), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
+ inst->vsi_ext->bs.dma_addr = (u64)bs->dma_addr;
+ inst->vsi_ext->bs.size = bs->size;
- inst->ctx = ctx;
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ inst->vsi_ext->mv_buf_dma[i].dma_addr = mem->dma_addr;
+ inst->vsi_ext->mv_buf_dma[i].size = mem->size;
+ }
+ inst->vsi_ext->ube.dma_addr = lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
+ inst->vsi_ext->ube.size = lat_buf->ctx->msg_queue.wdma_addr.size;
- inst->vpu.id = SCP_IPI_VDEC_LAT;
- inst->vpu.core_id = SCP_IPI_VDEC_CORE;
- inst->vpu.ctx = ctx;
- inst->vpu.codec_type = ctx->current_codec;
- inst->vpu.capture_type = ctx->capture_fourcc;
+ inst->vsi_ext->row_info.dma_addr = 0;
+ inst->vsi_ext->row_info.size = 0;
- err = vpu_dec_init(&inst->vpu);
- if (err) {
- mtk_vdec_err(ctx, "vdec_h264 init err=%d", err);
- goto error_free_inst;
+ inst->vsi_ext->err_map.dma_addr = lat_buf->wdma_err_addr.dma_addr;
+ inst->vsi_ext->err_map.size = lat_buf->wdma_err_addr.size;
+
+ inst->vsi_ext->slice_bc.dma_addr = lat_buf->slice_bc_addr.dma_addr;
+ inst->vsi_ext->slice_bc.size = lat_buf->slice_bc_addr.size;
+
+ inst->vsi_ext->trans.dma_addr_end = inst->ctx->msg_queue.wdma_rptr_addr;
+ inst->vsi_ext->trans.dma_addr = inst->ctx->msg_queue.wdma_wptr_addr;
+}
+
+static int vdec_h264_slice_setup_core_buffer_ext(struct vdec_h264_slice_inst *inst,
+ struct vdec_h264_slice_share_info *share_info,
+ struct vdec_lat_buf *lat_buf)
+{
+ struct mtk_vcodec_mem *mem;
+ struct mtk_vcodec_dec_ctx *ctx = inst->ctx;
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ struct vdec_fb *fb;
+ u64 y_fb_dma, c_fb_dma = 0;
+ int i;
+
+ fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
+ if (!fb) {
+ mtk_vdec_err(ctx, "Unable to get a CAPTURE buffer for CAPTURE queue is empty.");
+ return -EBUSY;
}
- vsi_size = round_up(sizeof(struct vdec_h264_slice_vsi), VCODEC_DEC_ALIGNED_64);
- inst->vsi = inst->vpu.vsi;
- inst->vsi_core =
- (struct vdec_h264_slice_vsi *)(((char *)inst->vpu.vsi) + vsi_size);
- inst->resolution_changed = true;
- inst->realloc_mv_buf = true;
+ y_fb_dma = (u64)fb->base_y.dma_addr;
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
+ c_fb_dma = y_fb_dma + ctx->picinfo.fb_sz[0];
+ else
+ c_fb_dma = (u64)fb->base_c.dma_addr;
- mtk_vdec_debug(ctx, "lat struct size = %d,%d,%d,%d vsi: %d\n",
- (int)sizeof(struct mtk_h264_sps_param),
- (int)sizeof(struct mtk_h264_pps_param),
- (int)sizeof(struct vdec_h264_slice_lat_dec_param),
- (int)sizeof(struct mtk_h264_dpb_info),
- vsi_size);
- mtk_vdec_debug(ctx, "lat H264 instance >> %p, codec_type = 0x%x",
- inst, inst->vpu.codec_type);
+ mtk_vdec_debug(ctx, "[h264-core] y/c addr = 0x%llx 0x%llx", y_fb_dma, c_fb_dma);
- ctx->drv_handle = inst;
- return 0;
+ inst->vsi_core_ext->fb.y.dma_addr = y_fb_dma;
+ inst->vsi_core_ext->fb.y.size = ctx->picinfo.fb_sz[0];
+ inst->vsi_core_ext->fb.c.dma_addr = c_fb_dma;
+ inst->vsi_core_ext->fb.c.size = ctx->picinfo.fb_sz[1];
-error_free_inst:
- kfree(inst);
- return err;
+ inst->vsi_core_ext->dec.vdec_fb_va = (unsigned long)fb;
+ inst->vsi_core_ext->dec.nal_info = share_info->nal_info;
+
+ inst->vsi_core_ext->ube.dma_addr = lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
+ inst->vsi_core_ext->ube.size = lat_buf->ctx->msg_queue.wdma_addr.size;
+
+ inst->vsi_core_ext->err_map.dma_addr = lat_buf->wdma_err_addr.dma_addr;
+ inst->vsi_core_ext->err_map.size = lat_buf->wdma_err_addr.size;
+
+ inst->vsi_core_ext->slice_bc.dma_addr = lat_buf->slice_bc_addr.dma_addr;
+ inst->vsi_core_ext->slice_bc.size = lat_buf->slice_bc_addr.size;
+
+ inst->vsi_core_ext->row_info.dma_addr = 0;
+ inst->vsi_core_ext->row_info.size = 0;
+
+ inst->vsi_core_ext->trans.dma_addr = share_info->trans_start;
+ inst->vsi_core_ext->trans.dma_addr_end = share_info->trans_end;
+
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ inst->vsi_core_ext->mv_buf_dma[i].dma_addr = mem->dma_addr;
+ inst->vsi_core_ext->mv_buf_dma[i].size = mem->size;
+ }
+
+ vb2_v4l2 = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ v4l2_m2m_buf_copy_metadata(&lat_buf->ts_info, vb2_v4l2, true);
+
+ return 0;
}
-static void vdec_h264_slice_deinit(void *h_vdec)
+static int vdec_h264_slice_core_decode_ext(struct vdec_lat_buf *lat_buf)
{
- struct vdec_h264_slice_inst *inst = h_vdec;
+ int err, timeout;
+ struct mtk_vcodec_dec_ctx *ctx = lat_buf->ctx;
+ struct vdec_h264_slice_inst *inst = ctx->drv_handle;
+ struct vdec_h264_slice_share_info *share_info = lat_buf->private_data;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
- vpu_dec_deinit(&inst->vpu);
- vdec_h264_slice_free_mv_buf(inst);
- vdec_msg_queue_deinit(&inst->ctx->msg_queue, inst->ctx);
+ memcpy(&inst->vsi_core_ext->h264_slice_params, &share_info->h264_slice_params,
+ sizeof(share_info->h264_slice_params));
- kfree(inst);
+ err = vdec_h264_slice_setup_core_buffer_ext(inst, share_info, lat_buf);
+ if (err)
+ goto vdec_dec_end;
+
+ vdec_h264_slice_fill_decode_reflist(inst, &inst->vsi_core_ext->h264_slice_params,
+ share_info);
+ err = vpu_dec_core(vpu);
+ if (err) {
+ mtk_vdec_err(ctx, "core decode err=%d", err);
+ goto vdec_dec_end;
+ }
+
+ /* wait decoder done interrupt */
+ timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
+ if (timeout)
+ mtk_vdec_err(ctx, "core decode timeout: pic_%d", ctx->decoded_frame_cnt);
+ inst->vsi_core_ext->dec.timeout = !!timeout;
+
+ vpu_dec_core_end(vpu);
+
+ /* crc is hardware checksum, can be used to check whether the decoder result is right.*/
+ mtk_vdec_debug(ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+ ctx->decoded_frame_cnt,
+ inst->vsi_core_ext->dec.crc[0], inst->vsi_core_ext->dec.crc[1],
+ inst->vsi_core_ext->dec.crc[2], inst->vsi_core_ext->dec.crc[3],
+ inst->vsi_core_ext->dec.crc[4], inst->vsi_core_ext->dec.crc[5],
+ inst->vsi_core_ext->dec.crc[6], inst->vsi_core_ext->dec.crc[7]);
+
+vdec_dec_end:
+ vdec_msg_queue_update_ube_rptr(&lat_buf->ctx->msg_queue, share_info->trans_end);
+ ctx->dev->vdec_pdata->cap_to_disp(ctx, !!err, lat_buf->src_buf_req);
+ mtk_vdec_debug(ctx, "core decode done err=%d", err);
+ ctx->decoded_frame_cnt++;
+ return 0;
}
static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
{
struct vdec_fb *fb;
- u64 vdec_fb_va;
u64 y_fb_dma, c_fb_dma;
int err, timeout, i;
struct mtk_vcodec_dec_ctx *ctx = lat_buf->ctx;
@@ -463,22 +635,19 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
struct mtk_vcodec_mem *mem;
struct vdec_vpu_inst *vpu = &inst->vpu;
- mtk_vdec_debug(ctx, "[h264-core] vdec_h264 core decode");
memcpy(&inst->vsi_core->h264_slice_params, &share_info->h264_slice_params,
sizeof(share_info->h264_slice_params));
fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
if (!fb) {
err = -EBUSY;
- mtk_vdec_err(ctx, "fb buffer is NULL");
+ mtk_vdec_err(ctx, "Unable to get a CAPTURE buffer for CAPTURE queue is empty.");
goto vdec_dec_end;
}
- vdec_fb_va = (unsigned long)fb;
y_fb_dma = (u64)fb->base_y.dma_addr;
if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
- c_fb_dma =
- y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
+ c_fb_dma = y_fb_dma + ctx->picinfo.fb_sz[0];
else
c_fb_dma = (u64)fb->base_c.dma_addr;
@@ -486,7 +655,7 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
inst->vsi_core->dec.y_fb_dma = y_fb_dma;
inst->vsi_core->dec.c_fb_dma = c_fb_dma;
- inst->vsi_core->dec.vdec_fb_va = vdec_fb_va;
+ inst->vsi_core->dec.vdec_fb_va = (unsigned long)fb;
inst->vsi_core->dec.nal_info = share_info->nal_info;
inst->vsi_core->wdma_start_addr =
lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
@@ -524,6 +693,8 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
inst->vsi_core->dec.timeout = !!timeout;
vpu_dec_core_end(vpu);
+
+ /* crc is hardware checksum, can be used to check whether the decoder result is right.*/
mtk_vdec_debug(ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
ctx->decoded_frame_cnt,
inst->vsi_core->dec.crc[0], inst->vsi_core->dec.crc[1],
@@ -536,6 +707,7 @@ vdec_dec_end:
ctx->dev->vdec_pdata->cap_to_disp(ctx, !!err, lat_buf->src_buf_req);
mtk_vdec_debug(ctx, "core decode done err=%d", err);
ctx->decoded_frame_cnt++;
+
return 0;
}
@@ -562,6 +734,128 @@ static void vdec_h264_insert_startcode(struct mtk_vcodec_dec_dev *vcodec_dev, un
(*bs_size) += 4;
}
+static int vdec_h264_slice_lat_decode_ext(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ struct mtk_video_dec_buf *src_buf_info;
+ int err, timeout = 0;
+ unsigned int data[2];
+ struct vdec_lat_buf *lat_buf;
+ struct vdec_h264_slice_share_info *share_info;
+
+ if (vdec_msg_queue_init(&inst->ctx->msg_queue, inst->ctx,
+ vdec_h264_slice_core_decode_ext,
+ sizeof(*share_info)))
+ return -ENOMEM;
+
+ /* bs NULL means flush decoder */
+ if (!bs) {
+ vdec_msg_queue_wait_lat_buf_full(&inst->ctx->msg_queue);
+ return vpu_dec_reset(vpu);
+ }
+
+ if (inst->is_field_bitstream)
+ return -EINVAL;
+
+ lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
+ if (!lat_buf) {
+ mtk_vdec_debug(inst->ctx, "failed to get lat buffer");
+ return -EAGAIN;
+ }
+ share_info = lat_buf->private_data;
+ src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+
+ lat_buf->src_buf_req = src_buf_info->m2m_buf.vb.vb2_buf.req_obj.req;
+ v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb, &lat_buf->ts_info, true);
+
+ err = vdec_h264_slice_fill_decode_parameters(inst, share_info,
+ &inst->vsi_ext->h264_slice_params);
+ if (err)
+ goto err_free_fb_out;
+
+ vdec_h264_insert_startcode(inst->ctx->dev, bs->va, &bs->size,
+ &share_info->h264_slice_params.pps);
+
+ *res_chg = inst->resolution_changed;
+ if (inst->resolution_changed) {
+ mtk_vdec_debug(inst->ctx, "- resolution changed -");
+ if (inst->realloc_mv_buf) {
+ err = vdec_h264_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
+ inst->realloc_mv_buf = false;
+ if (err)
+ goto err_free_fb_out;
+ }
+ inst->resolution_changed = false;
+ }
+
+ vdec_h264_slice_setup_lat_buffer_ext(inst, bs, lat_buf);
+ mtk_vdec_debug(inst->ctx, "lat:trans(0x%llx 0x%lx) err:0x%llx",
+ inst->vsi_ext->ube.dma_addr, (unsigned long)inst->vsi_ext->ube.size,
+ inst->vsi_ext->err_map.dma_addr);
+
+ mtk_vdec_debug(inst->ctx, "slice(0x%llx 0x%lx) rprt((0x%llx 0x%llx))",
+ inst->vsi_ext->slice_bc.dma_addr,
+ (unsigned long)inst->vsi_ext->slice_bc.size,
+ inst->vsi_ext->trans.dma_addr, inst->vsi_ext->trans.dma_addr_end);
+
+ err = vpu_dec_start(vpu, data, 2);
+ if (err) {
+ mtk_vdec_debug(inst->ctx, "lat decode err: %d", err);
+ goto err_free_fb_out;
+ }
+
+ share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+ inst->vsi_ext->dec.wdma_end_addr_offset;
+
+ share_info->trans_start = inst->ctx->msg_queue.wdma_wptr_addr;
+ share_info->nal_info = inst->vsi_ext->dec.nal_info;
+
+ if (IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
+ memcpy(&share_info->h264_slice_params, &inst->vsi_ext->h264_slice_params,
+ sizeof(share_info->h264_slice_params));
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
+ }
+
+ /* wait decoder done interrupt */
+ timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
+ if (timeout)
+ mtk_vdec_err(inst->ctx, "lat decode timeout: pic_%d", inst->slice_dec_num);
+ inst->vsi_ext->dec.timeout = !!timeout;
+
+ err = vpu_dec_end(vpu);
+ if (err == SLICE_HEADER_FULL || err == TRANS_BUFFER_FULL) {
+ if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ inst->slice_dec_num++;
+ mtk_vdec_err(inst->ctx, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err);
+ return -EINVAL;
+ }
+
+ share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+ inst->vsi_ext->dec.wdma_end_addr_offset;
+
+ vdec_msg_queue_update_ube_wptr(&lat_buf->ctx->msg_queue, share_info->trans_end);
+
+ if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
+ memcpy(&share_info->h264_slice_params, &inst->vsi_ext->h264_slice_params,
+ sizeof(share_info->h264_slice_params));
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
+ }
+ mtk_vdec_debug(inst->ctx, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
+ inst->vsi_ext->dec.crc[0], inst->vsi_ext->dec.crc[1],
+ inst->vsi_ext->dec.crc[2]);
+
+ inst->slice_dec_num++;
+ return 0;
+err_free_fb_out:
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ mtk_vdec_err(inst->ctx, "slice dec number: %d err: %d", inst->slice_dec_num, err);
+ return err;
+}
+
static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg)
{
@@ -608,7 +902,8 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf->src_buf_req = src_buf_info->m2m_buf.vb.vb2_buf.req_obj.req;
v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb, &lat_buf->ts_info, true);
- err = vdec_h264_slice_fill_decode_parameters(inst, share_info);
+ err = vdec_h264_slice_fill_decode_parameters(inst, share_info,
+ &inst->vsi->h264_slice_params);
if (err)
goto err_free_fb_out;
@@ -706,6 +1001,101 @@ err_free_fb_out:
return err;
}
+static int vdec_h264_slice_single_decode_ext(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *unused, bool *res_chg)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ struct mtk_video_dec_buf *src_buf_info, *dst_buf_info;
+ struct vdec_fb *fb;
+ unsigned int data[2], i;
+ u64 y_fb_dma, c_fb_dma;
+ struct mtk_vcodec_mem *mem;
+ int err;
+
+ /* bs NULL means flush decoder */
+ if (!bs)
+ return vpu_dec_reset(vpu);
+
+ fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
+ if (!fb) {
+ mtk_vdec_err(inst->ctx,
+ "Unable to get a CAPTURE buffer for CAPTURE queue is empty.");
+ return -ENOMEM;
+ }
+
+ src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+ dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+
+ y_fb_dma = fb->base_y.dma_addr;
+ c_fb_dma = fb->base_c.dma_addr;
+ mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx",
+ inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
+
+ inst->vsi_ctx_ext.bs.dma_addr = (u64)bs->dma_addr;
+ inst->vsi_ctx_ext.bs.size = bs->size;
+ inst->vsi_ctx_ext.fb.y.dma_addr = y_fb_dma;
+ inst->vsi_ctx_ext.fb.c.dma_addr = c_fb_dma;
+ inst->vsi_ctx_ext.dec.vdec_fb_va = (u64)(uintptr_t)fb;
+
+ v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb,
+ &dst_buf_info->m2m_buf.vb, true);
+ err = get_vdec_sig_decode_parameters(inst);
+ if (err)
+ goto err_free_fb_out;
+
+ memcpy(&inst->vsi_ctx_ext.h264_slice_params, &inst->h264_slice_param,
+ sizeof(inst->vsi_ctx_ext.h264_slice_params));
+
+ *res_chg = inst->resolution_changed;
+ if (inst->resolution_changed) {
+ mtk_vdec_debug(inst->ctx, "- resolution changed -");
+ if (inst->realloc_mv_buf) {
+ err = vdec_h264_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
+ inst->realloc_mv_buf = false;
+ if (err)
+ goto err_free_fb_out;
+ }
+ inst->resolution_changed = false;
+
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ inst->vsi_ctx_ext.mv_buf_dma[i].dma_addr = mem->dma_addr;
+ }
+ }
+
+ memcpy(inst->vpu.vsi, &inst->vsi_ctx_ext, sizeof(inst->vsi_ctx_ext));
+ err = vpu_dec_start(vpu, data, 2);
+ if (err)
+ goto err_free_fb_out;
+
+ /* wait decoder done interrupt */
+ err = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
+ if (err)
+ mtk_vdec_err(inst->ctx, "decode timeout: pic_%d", inst->ctx->decoded_frame_cnt);
+
+ inst->vsi_ext->dec.timeout = !!err;
+ err = vpu_dec_end(vpu);
+ if (err)
+ goto err_free_fb_out;
+
+ memcpy(&inst->vsi_ctx_ext, inst->vpu.vsi, sizeof(inst->vsi_ctx_ext));
+ mtk_vdec_debug(inst->ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+ inst->ctx->decoded_frame_cnt,
+ inst->vsi_ctx_ext.dec.crc[0], inst->vsi_ctx_ext.dec.crc[1],
+ inst->vsi_ctx_ext.dec.crc[2], inst->vsi_ctx_ext.dec.crc[3],
+ inst->vsi_ctx_ext.dec.crc[4], inst->vsi_ctx_ext.dec.crc[5],
+ inst->vsi_ctx_ext.dec.crc[6], inst->vsi_ctx_ext.dec.crc[7]);
+
+ inst->ctx->decoded_frame_cnt++;
+ return 0;
+
+err_free_fb_out:
+ mtk_vdec_err(inst->ctx, "dec frame number: %d err: %d", inst->ctx->decoded_frame_cnt, err);
+ return err;
+}
+
static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *unused, bool *res_chg)
{
@@ -725,7 +1115,8 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
if (!fb) {
- mtk_vdec_err(inst->ctx, "fb buffer is NULL");
+ mtk_vdec_err(inst->ctx,
+ "Unable to get a CAPTURE buffer for CAPTURE queue is empty.");
return -ENOMEM;
}
@@ -749,6 +1140,9 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
if (err)
goto err_free_fb_out;
+ memcpy(&inst->vsi_ctx.h264_slice_params, &inst->h264_slice_param,
+ sizeof(inst->vsi_ctx.h264_slice_params));
+
buf = (unsigned char *)bs->va;
nal_start_idx = mtk_vdec_h264_find_start_code(buf, bs->size);
if (nal_start_idx < 0) {
@@ -806,21 +1200,95 @@ err_free_fb_out:
return err;
}
+static int vdec_h264_slice_init(struct mtk_vcodec_dec_ctx *ctx)
+{
+ struct vdec_h264_slice_inst *inst;
+ int err, vsi_size;
+ unsigned char *temp;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = SCP_IPI_VDEC_LAT;
+ inst->vpu.core_id = SCP_IPI_VDEC_CORE;
+ inst->vpu.ctx = ctx;
+ inst->vpu.codec_type = ctx->current_codec;
+ inst->vpu.capture_type = ctx->capture_fourcc;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vdec_err(ctx, "vdec_h264 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ if (IS_VDEC_SUPPORT_EXT(ctx->dev->dec_capability)) {
+ vsi_size = sizeof(struct vdec_h264_slice_vsi_ext);
+
+ vsi_size = round_up(vsi_size, VCODEC_DEC_ALIGNED_64);
+ inst->vsi_ext = inst->vpu.vsi;
+ temp = (unsigned char *)inst->vsi_ext;
+ inst->vsi_core_ext = (struct vdec_h264_slice_vsi_ext *)(temp + vsi_size);
+
+ if (inst->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_PURE_SINGLE_CORE)
+ inst->decode = vdec_h264_slice_single_decode_ext;
+ else
+ inst->decode = vdec_h264_slice_lat_decode_ext;
+ } else {
+ vsi_size = sizeof(struct vdec_h264_slice_vsi);
+
+ vsi_size = round_up(vsi_size, VCODEC_DEC_ALIGNED_64);
+ inst->vsi = inst->vpu.vsi;
+ temp = (unsigned char *)inst->vsi;
+ inst->vsi_core = (struct vdec_h264_slice_vsi *)(temp + vsi_size);
+
+ if (inst->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_PURE_SINGLE_CORE)
+ inst->decode = vdec_h264_slice_single_decode;
+ else
+ inst->decode = vdec_h264_slice_lat_decode;
+ }
+ inst->resolution_changed = true;
+ inst->realloc_mv_buf = true;
+
+ mtk_vdec_debug(ctx, "lat struct size = %d,%d,%d,%d vsi: %d\n",
+ (int)sizeof(struct mtk_h264_sps_param),
+ (int)sizeof(struct mtk_h264_pps_param),
+ (int)sizeof(struct vdec_h264_slice_lat_dec_param),
+ (int)sizeof(struct mtk_h264_dpb_info),
+ vsi_size);
+ mtk_vdec_debug(ctx, "lat H264 instance >> %p, codec_type = 0x%x",
+ inst, inst->vpu.codec_type);
+
+ ctx->drv_handle = inst;
+ return 0;
+
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static void vdec_h264_slice_deinit(void *h_vdec)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+
+ vpu_dec_deinit(&inst->vpu);
+ vdec_h264_slice_free_mv_buf(inst);
+ vdec_msg_queue_deinit(&inst->ctx->msg_queue, inst->ctx);
+
+ kfree(inst);
+}
+
static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *unused, bool *res_chg)
{
struct vdec_h264_slice_inst *inst = h_vdec;
- int ret;
if (!h_vdec)
return -EINVAL;
- if (inst->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_PURE_SINGLE_CORE)
- ret = vdec_h264_slice_single_decode(h_vdec, bs, unused, res_chg);
- else
- ret = vdec_h264_slice_lat_decode(h_vdec, bs, unused, res_chg);
-
- return ret;
+ return inst->decode(h_vdec, bs, unused, res_chg);
}
static int vdec_h264_slice_get_param(void *h_vdec, enum vdec_get_param_type type,
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
index aa721cc43647..2725db882e5b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
@@ -821,7 +821,7 @@ static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst,
inst->vsi_core->fb.y.dma_addr = y_fb_dma;
inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[0];
inst->vsi_core->fb.c.dma_addr = c_fb_dma;
- inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[1];
+ inst->vsi_core->fb.c.size = ctx->picinfo.fb_sz[1];
inst->vsi_core->dec.vdec_fb_va = (unsigned long)fb;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
index 8522f71fc901..0f63657d8bad 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
@@ -515,7 +515,7 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
struct venc_frame_info frame_info;
struct mtk_vcodec_enc_ctx *ctx = inst->ctx;
- mtk_venc_debug(ctx, "frm_cnt = %d\n ", inst->frm_cnt);
+ mtk_venc_debug(ctx, "frm_cnt = %d\n", inst->frm_cnt);
if (MTK_ENC_IOVA_IS_34BIT(ctx)) {
gop_size = inst->vsi_34->config.gop_size;
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
index 7a9d8928ae40..44e904e61801 100644
--- a/drivers/media/platform/nuvoton/npcm-video.c
+++ b/drivers/media/platform/nuvoton/npcm-video.c
@@ -578,7 +578,7 @@ static unsigned int npcm_video_hres(struct npcm_video *video)
regmap_read(gfxi, HVCNTL, &hvcntl);
apb_hor_res = (((hvcnth & HVCNTH_MASK) << 8) + (hvcntl & HVCNTL_MASK) + 1);
- return apb_hor_res;
+ return (apb_hor_res > MAX_WIDTH) ? MAX_WIDTH : apb_hor_res;
}
static unsigned int npcm_video_vres(struct npcm_video *video)
@@ -591,7 +591,7 @@ static unsigned int npcm_video_vres(struct npcm_video *video)
apb_ver_res = (((vvcnth & VVCNTH_MASK) << 8) + (vvcntl & VVCNTL_MASK));
- return apb_ver_res;
+ return (apb_ver_res > MAX_HEIGHT) ? MAX_HEIGHT : apb_ver_res;
}
static int npcm_video_capres(struct npcm_video *video, unsigned int hor_res,
@@ -863,7 +863,6 @@ static void npcm_video_detect_resolution(struct npcm_video *video)
struct regmap *gfxi = video->gfx_regmap;
unsigned int dispst;
- video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
det->width = npcm_video_hres(video);
det->height = npcm_video_vres(video);
@@ -892,12 +891,16 @@ static void npcm_video_detect_resolution(struct npcm_video *video)
clear_bit(VIDEO_RES_CHANGING, &video->flags);
}
- if (det->width && det->height)
+ if (det->width && det->height) {
video->v4l2_input_status = 0;
-
- dev_dbg(video->dev, "Got resolution[%dx%d] -> [%dx%d], status %d\n",
- act->width, act->height, det->width, det->height,
- video->v4l2_input_status);
+ dev_dbg(video->dev, "Got resolution[%dx%d] -> [%dx%d], status %d\n",
+ act->width, act->height, det->width, det->height,
+ video->v4l2_input_status);
+ } else {
+ video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+ dev_err(video->dev, "Got invalid resolution[%dx%d]\n", det->width,
+ det->height);
+ }
}
static int npcm_video_set_resolution(struct npcm_video *video,
diff --git a/drivers/media/platform/nxp/dw100/dw100.c b/drivers/media/platform/nxp/dw100/dw100.c
index 66582e7f92fc..3d1db1121bf9 100644
--- a/drivers/media/platform/nxp/dw100/dw100.c
+++ b/drivers/media/platform/nxp/dw100/dw100.c
@@ -961,9 +961,9 @@ static int dw100_s_selection(struct file *file, void *fh,
src_q_data = dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
dev_dbg(&ctx->dw_dev->pdev->dev,
- ">>> Buffer Type: %u Target: %u Rect: %ux%u@%d.%d\n",
+ ">>> Buffer Type: %u Target: %u Rect: (%d,%d)/%ux%u\n",
sel->type, sel->target,
- sel->r.width, sel->r.height, sel->r.left, sel->r.top);
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
@@ -1025,9 +1025,9 @@ static int dw100_s_selection(struct file *file, void *fh,
}
dev_dbg(&ctx->dw_dev->pdev->dev,
- "<<< Buffer Type: %u Target: %u Rect: %ux%u@%d.%d\n",
+ "<<< Buffer Type: %u Target: %u Rect: (%d,%d)/%ux%u\n",
sel->type, sel->target,
- sel->r.width, sel->r.height, sel->r.left, sel->r.top);
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
return 0;
}
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
index d579c804b047..adb93e977be9 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
@@ -89,6 +89,7 @@
/* SLOT_STATUS fields for slots 0..3 */
#define SLOT_STATUS_FRMDONE (0x1 << 3)
#define SLOT_STATUS_ENC_CONFIG_ERR (0x1 << 8)
+#define SLOT_STATUS_ONGOING (0x1 << 31)
/* SLOT_IRQ_EN fields TBD */
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 1221b309a916..5c17bc58181e 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -535,7 +535,18 @@ static const unsigned char jpeg_sos_maximal[] = {
};
static const unsigned char jpeg_image_red[] = {
- 0xFC, 0x5F, 0xA2, 0xBF, 0xCA, 0x73, 0xFE, 0xFE,
+ 0xF9, 0xFE, 0x8A, 0xFC, 0x34, 0xFD, 0xC4, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x0F, 0xFF, 0xD0, 0xF9,
+ 0xFE, 0x8A, 0xFC, 0x34, 0xFD, 0xC4, 0x28, 0xA0,
0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
@@ -545,7 +556,7 @@ static const unsigned char jpeg_image_red[] = {
0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
- 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00
+ 0x00, 0x28, 0xA0, 0x0F
};
static const unsigned char jpeg_eoi[] = {
@@ -752,6 +763,39 @@ static int mxc_get_free_slot(struct mxc_jpeg_slot_data *slot_data)
return -1;
}
+static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg)
+{
+ /* free descriptor for decoding/encoding phase */
+ dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
+ jpeg->slot_data.desc,
+ jpeg->slot_data.desc_handle);
+ jpeg->slot_data.desc = NULL;
+ jpeg->slot_data.desc_handle = 0;
+
+ /* free descriptor for encoder configuration phase / decoder DHT */
+ dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
+ jpeg->slot_data.cfg_desc,
+ jpeg->slot_data.cfg_desc_handle);
+ jpeg->slot_data.cfg_desc_handle = 0;
+ jpeg->slot_data.cfg_desc = NULL;
+
+ /* free configuration stream */
+ dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM,
+ jpeg->slot_data.cfg_stream_vaddr,
+ jpeg->slot_data.cfg_stream_handle);
+ jpeg->slot_data.cfg_stream_vaddr = NULL;
+ jpeg->slot_data.cfg_stream_handle = 0;
+
+ dma_free_coherent(jpeg->dev, jpeg->slot_data.cfg_dec_size,
+ jpeg->slot_data.cfg_dec_vaddr,
+ jpeg->slot_data.cfg_dec_daddr);
+ jpeg->slot_data.cfg_dec_size = 0;
+ jpeg->slot_data.cfg_dec_vaddr = NULL;
+ jpeg->slot_data.cfg_dec_daddr = 0;
+
+ jpeg->slot_data.used = false;
+}
+
static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg)
{
struct mxc_jpeg_desc *desc;
@@ -788,36 +832,25 @@ static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg)
goto err;
jpeg->slot_data.cfg_stream_vaddr = cfg_stm;
+ jpeg->slot_data.cfg_dec_size = MXC_JPEG_PATTERN_WIDTH * MXC_JPEG_PATTERN_HEIGHT * 2;
+ jpeg->slot_data.cfg_dec_vaddr = dma_alloc_coherent(jpeg->dev,
+ jpeg->slot_data.cfg_dec_size,
+ &jpeg->slot_data.cfg_dec_daddr,
+ GFP_ATOMIC);
+ if (!jpeg->slot_data.cfg_dec_vaddr)
+ goto err;
+
skip_alloc:
jpeg->slot_data.used = true;
return true;
err:
dev_err(jpeg->dev, "Could not allocate descriptors for slot %d", jpeg->slot_data.slot);
+ mxc_jpeg_free_slot_data(jpeg);
return false;
}
-static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg)
-{
- /* free descriptor for decoding/encoding phase */
- dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
- jpeg->slot_data.desc,
- jpeg->slot_data.desc_handle);
-
- /* free descriptor for encoder configuration phase / decoder DHT */
- dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
- jpeg->slot_data.cfg_desc,
- jpeg->slot_data.cfg_desc_handle);
-
- /* free configuration stream */
- dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM,
- jpeg->slot_data.cfg_stream_vaddr,
- jpeg->slot_data.cfg_stream_handle);
-
- jpeg->slot_data.used = false;
-}
-
static void mxc_jpeg_check_and_set_last_buffer(struct mxc_jpeg_ctx *ctx,
struct vb2_v4l2_buffer *src_buf,
struct vb2_v4l2_buffer *dst_buf)
@@ -877,6 +910,34 @@ static u32 mxc_jpeg_get_plane_size(struct mxc_jpeg_q_data *q_data, u32 plane_no)
return size;
}
+static bool mxc_dec_is_ongoing(struct mxc_jpeg_ctx *ctx)
+{
+ struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
+ u32 curr_desc;
+ u32 slot_status;
+
+ curr_desc = readl(jpeg->base_reg + MXC_SLOT_OFFSET(ctx->slot, SLOT_CUR_DESCPT_PTR));
+ if (curr_desc == jpeg->slot_data.cfg_desc_handle)
+ return true;
+
+ slot_status = readl(jpeg->base_reg + MXC_SLOT_OFFSET(ctx->slot, SLOT_STATUS));
+ if (slot_status & SLOT_STATUS_ONGOING)
+ return true;
+
+ /*
+ * The curr_desc register is updated when next_descpt_ptr is loaded,
+ * the ongoing bit of slot_status is set when the 32 bytes descriptor is loaded.
+ * So there will be a short time interval in between, which may cause fake false.
+ * Consider read register is quite slow compared with IP read 32byte from memory,
+ * read twice slot_status can avoid this situation.
+ */
+ slot_status = readl(jpeg->base_reg + MXC_SLOT_OFFSET(ctx->slot, SLOT_STATUS));
+ if (slot_status & SLOT_STATUS_ONGOING)
+ return true;
+
+ return false;
+}
+
static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
{
struct mxc_jpeg_dev *jpeg = priv;
@@ -946,7 +1007,8 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
mxc_jpeg_enc_mode_go(dev, reg, mxc_jpeg_is_extended_sequential(q_data->fmt));
goto job_unlock;
}
- if (jpeg->mode == MXC_JPEG_DECODE && jpeg_src_buf->dht_needed) {
+ if (jpeg->mode == MXC_JPEG_DECODE && jpeg_src_buf->dht_needed &&
+ mxc_dec_is_ongoing(ctx)) {
jpeg_src_buf->dht_needed = false;
dev_dbg(dev, "Decoder DHT cfg finished. Start decoding...\n");
goto job_unlock;
@@ -1209,14 +1271,14 @@ static void mxc_jpeg_config_dec_desc(struct vb2_buffer *out_buf,
*/
*cfg_size = mxc_jpeg_setup_cfg_stream(cfg_stream_vaddr,
V4L2_PIX_FMT_YUYV,
- MXC_JPEG_MIN_WIDTH,
- MXC_JPEG_MIN_HEIGHT);
+ MXC_JPEG_PATTERN_WIDTH,
+ MXC_JPEG_PATTERN_HEIGHT);
cfg_desc->next_descpt_ptr = desc_handle | MXC_NXT_DESCPT_EN;
- cfg_desc->buf_base0 = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ cfg_desc->buf_base0 = jpeg->slot_data.cfg_dec_daddr;
cfg_desc->buf_base1 = 0;
- cfg_desc->imgsize = MXC_JPEG_MIN_WIDTH << 16;
- cfg_desc->imgsize |= MXC_JPEG_MIN_HEIGHT;
- cfg_desc->line_pitch = MXC_JPEG_MIN_WIDTH * 2;
+ cfg_desc->imgsize = MXC_JPEG_PATTERN_WIDTH << 16;
+ cfg_desc->imgsize |= MXC_JPEG_PATTERN_HEIGHT;
+ cfg_desc->line_pitch = MXC_JPEG_PATTERN_WIDTH * 2;
cfg_desc->stm_ctrl = STM_CTRL_IMAGE_FORMAT(MXC_JPEG_YUV422);
cfg_desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
cfg_desc->stm_bufbase = cfg_stream_handle;
@@ -1918,9 +1980,19 @@ static void mxc_jpeg_buf_queue(struct vb2_buffer *vb)
jpeg_src_buf = vb2_to_mxc_buf(vb);
jpeg_src_buf->jpeg_parse_error = false;
ret = mxc_jpeg_parse(ctx, vb);
- if (ret)
+ if (ret) {
jpeg_src_buf->jpeg_parse_error = true;
+ /*
+ * if the capture queue is not setup, the device_run() won't be scheduled,
+ * need to drop the error buffer, so that the decoding can continue
+ */
+ if (!vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx))) {
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ return;
+ }
+ }
+
end:
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
index 86e324b21aed..fdde45f7e163 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
@@ -28,6 +28,8 @@
#define MXC_JPEG_W_ALIGN 3
#define MXC_JPEG_MAX_SIZEIMAGE 0xFFFFFC00
#define MXC_JPEG_MAX_PLANES 2
+#define MXC_JPEG_PATTERN_WIDTH 128
+#define MXC_JPEG_PATTERN_HEIGHT 64
enum mxc_jpeg_enc_state {
MXC_JPEG_ENCODING = 0, /* jpeg encode phase */
@@ -117,6 +119,9 @@ struct mxc_jpeg_slot_data {
dma_addr_t desc_handle;
dma_addr_t cfg_desc_handle; // configuration descriptor dma address
dma_addr_t cfg_stream_handle; // configuration bitstream dma address
+ dma_addr_t cfg_dec_size;
+ void *cfg_dec_vaddr;
+ dma_addr_t cfg_dec_daddr;
};
struct mxc_jpeg_dev {
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
index 794050a6a919..22e49d3a1287 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
@@ -43,6 +43,7 @@ struct mxc_isi_m2m_ctx_queue_data {
struct v4l2_pix_format_mplane format;
const struct mxc_isi_format_info *info;
u32 sequence;
+ bool streaming;
};
struct mxc_isi_m2m_ctx {
@@ -484,15 +485,18 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
struct mxc_isi_m2m *m2m = ctx->m2m;
bool bypass;
-
int ret;
+ if (q->streaming)
+ return 0;
+
mutex_lock(&m2m->lock);
if (m2m->usage_count == INT_MAX) {
@@ -545,6 +549,8 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh,
goto unchain;
}
+ q->streaming = true;
+
return 0;
unchain:
@@ -567,10 +573,14 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
struct mxc_isi_m2m *m2m = ctx->m2m;
v4l2_m2m_ioctl_streamoff(file, fh, type);
+ if (!q->streaming)
+ return 0;
+
mutex_lock(&m2m->lock);
/*
@@ -596,6 +606,8 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
mutex_unlock(&m2m->lock);
+ q->streaming = false;
+
return 0;
}
diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile
index f6db5b3b5ace..d26a9c24a430 100644
--- a/drivers/media/platform/qcom/camss/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -6,6 +6,7 @@ qcom-camss-objs += \
camss-csid.o \
camss-csid-4-1.o \
camss-csid-4-7.o \
+ camss-csid-680.o \
camss-csid-gen2.o \
camss-csid-780.o \
camss-csiphy-2ph-1-0.o \
@@ -17,6 +18,7 @@ qcom-camss-objs += \
camss-vfe-4-8.o \
camss-vfe-17x.o \
camss-vfe-480.o \
+ camss-vfe-680.o \
camss-vfe-780.o \
camss-vfe-gen1.o \
camss-vfe.o \
diff --git a/drivers/media/platform/qcom/camss/camss-csid-680.c b/drivers/media/platform/qcom/camss/camss-csid-680.c
new file mode 100644
index 000000000000..3ad3a174bcfb
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid-680.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
+ *
+ * Copyright (C) 2020-2025 Linaro Ltd.
+ */
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "camss.h"
+#include "camss-csid.h"
+#include "camss-csid-gen2.h"
+
+#define CSID_TOP_IO_PATH_CFG0(csid) (0x4 * (csid))
+#define CSID_TOP_IO_PATH_CFG0_INTERNAL_CSID BIT(0)
+#define CSID_TOP_IO_PATH_CFG0_SFE_0 BIT(1)
+#define CSID_TOP_IO_PATH_CFG0_SFE_1 GENMASK(1, 0)
+#define CSID_TOP_IO_PATH_CFG0_SBI_0 BIT(4)
+#define CSID_TOP_IO_PATH_CFG0_SBI_1 GENMASK(3, 0)
+#define CSID_TOP_IO_PATH_CFG0_SBI_2 GENMASK(3, 1)
+#define CSID_TOP_IO_PATH_CFG0_OUTPUT_IFE_EN BIT(8)
+#define CSID_TOP_IO_PATH_CFG0_SFE_OFFLINE_EN BIT(12)
+
+#define CSID_RESET_CMD 0x10
+#define CSID_RESET_CMD_HW_RESET BIT(0)
+#define CSID_RESET_CMD_SW_RESET BIT(1)
+#define CSID_RESET_CMD_IRQ_CTRL BIT(2)
+
+#define CSID_IRQ_CMD 0x14
+#define CSID_IRQ_CMD_CLEAR BIT(0)
+#define CSID_IRQ_CMD_SET BIT(4)
+
+#define CSID_REG_UPDATE_CMD 0x18
+
+#define CSID_CSI2_RDIN_IRQ_STATUS(rdi) (0xec + 0x10 * (rdi))
+#define CSID_CSI2_RDIN_CCIF_VIOLATION BIT(29)
+#define CSID_CSI2_RDIN_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP BIT(28)
+#define CSID_CSI2_RDIN_ERROR_REC_WIDTH_VIOLATION BIT(27)
+#define CSID_CSI2_RDIN_ERROR_REC_HEIGHT_VIOLATION BIT(26)
+#define CSID_CSI2_RDIN_BATCH_END_MISSING_VIOLATION BIT(25)
+#define CSID_CSI2_RDIN_ILLEGAL_BATCH_ID_IRQ BIT(24)
+#define CSID_CSI2_RDIN_RUP_DONE BIT(23)
+#define CSID_CSI2_RDIN_CAMIF_EPOCH_1_IRQ BIT(22)
+#define CSID_CSI2_RDIN_CAMIF_EPOCH_0_IRQ BIT(21)
+#define CSID_CSI2_RDIN_ERROR_REC_OVERFLOW_IRQ BIT(19)
+#define CSID_CSI2_RDIN_ERROR_REC_FRAME_DROP BIT(18)
+#define CSID_CSI2_RDIN_VCDT_GRP_CHANG BIT(17)
+#define CSID_CSI2_RDIN_VCDT_GRP_0_SEL BIT(16)
+#define CSID_CSI2_RDIN_VCDT_GRP_1_SEL BIT(15)
+#define CSID_CSI2_RDIN_ERROR_LINE_COUNT BIT(14)
+#define CSID_CSI2_RDIN_ERROR_PIX_COUNT BIT(13)
+#define CSID_CSI2_RDIN_INFO_INPUT_SOF BIT(12)
+#define CSID_CSI2_RDIN_INFO_INPUT_SOL BIT(11)
+#define CSID_CSI2_RDIN_INFO_INPUT_EOL BIT(10)
+#define CSID_CSI2_RDIN_INFO_INPUT_EOF BIT(9)
+#define CSID_CSI2_RDIN_INFO_FRAME_DROP_SOF BIT(8)
+#define CSID_CSI2_RDIN_INFO_FRAME_DROP_SOL BIT(7)
+#define CSID_CSI2_RDIN_INFO_FRAME_DROP_EOL BIT(6)
+#define CSID_CSI2_RDIN_INFO_FRAME_DROP_EOF BIT(5)
+#define CSID_CSI2_RDIN_INFO_CAMIF_SOF BIT(4)
+#define CSID_CSI2_RDIN_INFO_CAMIF_EOF BIT(3)
+#define CSID_CSI2_RDIN_INFO_FIFO_OVERFLOW BIT(2)
+#define CSID_CSI2_RDIN_RES1 BIT(1)
+#define CSID_CSI2_RDIN_RES0 BIT(0)
+
+#define CSID_CSI2_RDIN_IRQ_MASK(rdi) (0xf0 + 0x10 * (rdi))
+#define CSID_CSI2_RDIN_IRQ_CLEAR(rdi) (0xf4 + 0x10 * (rdi))
+#define CSID_CSI2_RDIN_IRQ_SET(rdi) (0xf8 + 0x10 * (rdi))
+
+#define CSID_TOP_IRQ_STATUS 0x7c
+#define CSID_TOP_IRQ_MASK 0x80
+#define CSID_TOP_IRQ_CLEAR 0x84
+#define CSID_TOP_IRQ_RESET BIT(0)
+#define CSID_TOP_IRQ_RX BIT(2)
+#define CSID_TOP_IRQ_LONG_PKT(rdi) (BIT(8) << (rdi))
+#define CSID_TOP_IRQ_BUF_DONE BIT(13)
+
+#define CSID_BUF_DONE_IRQ_STATUS 0x8c
+#define BUF_DONE_IRQ_STATUS_RDI_OFFSET (csid_is_lite(csid) ? 1 : 14)
+#define CSID_BUF_DONE_IRQ_MASK 0x90
+#define CSID_BUF_DONE_IRQ_CLEAR 0x94
+
+#define CSID_CSI2_RX_IRQ_STATUS 0x9c
+#define CSID_CSI2_RX_IRQ_MASK 0xa0
+#define CSID_CSI2_RX_IRQ_CLEAR 0xa4
+
+#define CSID_RESET_CFG 0xc
+#define CSID_RESET_CFG_MODE_IMMEDIATE BIT(0)
+#define CSID_RESET_CFG_LOCATION_COMPLETE BIT(4)
+
+#define CSID_CSI2_RDI_IRQ_STATUS(rdi) (0xec + 0x10 * (rdi))
+#define CSID_CSI2_RDI_IRQ_MASK(rdi) (0xf0 + 0x10 * (rdi))
+#define CSID_CSI2_RDI_IRQ_CLEAR(rdi) (0xf4 + 0x10 * (rdi))
+
+#define CSID_CSI2_RX_CFG0 0x200
+#define CSI2_RX_CFG0_NUM_ACTIVE_LANES 0
+#define CSI2_RX_CFG0_DL0_INPUT_SEL 4
+#define CSI2_RX_CFG0_DL1_INPUT_SEL 8
+#define CSI2_RX_CFG0_DL2_INPUT_SEL 12
+#define CSI2_RX_CFG0_DL3_INPUT_SEL 16
+#define CSI2_RX_CFG0_PHY_NUM_SEL 20
+#define CSI2_RX_CFG0_PHY_SEL_BASE_IDX 1
+#define CSI2_RX_CFG0_PHY_TYPE_SEL 24
+
+#define CSID_CSI2_RX_CFG1 0x204
+#define CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN BIT(0)
+#define CSI2_RX_CFG1_DE_SCRAMBLE_EN BIT(1)
+#define CSI2_RX_CFG1_VC_MODE BIT(2)
+#define CSI2_RX_CFG1_COMPLETE_STREAM_EN BIT(4)
+#define CSI2_RX_CFG1_COMPLETE_STREAM_FRAME_TIMING BIT(5)
+#define CSI2_RX_CFG1_MISR_EN BIT(6)
+#define CSI2_RX_CFG1_CGC_MODE BIT(7)
+
+#define CSID_CSI2_RX_CAPTURE_CTRL 0x208
+#define CSI2_RX_CAPTURE_CTRL_LONG_PKT_EN BIT(0)
+#define CSI2_RX_CAPTURE_CTRL_SHORT_PKT_EN BIT(1)
+#define CSI2_RX_CAPTURE_CTRL_CPHY_PKT_EN BIT(2)
+#define CSI2_RX_CAPTURE_CTRL_LONG_PKT_DT GENMASK(9, 4)
+#define CSI2_RX_CAPTURE_CTRL_LONG_PKT_VC GENMASK(14, 10)
+#define CSI2_RX_CAPTURE_CTRL_SHORT_PKT_VC GENMASK(19, 15)
+#define CSI2_RX_CAPTURE_CTRL_CPHY_PKT_DT GENMASK(20, 25)
+#define CSI2_RX_CAPTURE_CTRL_CPHY_PKT_VC GENMASK(30, 26)
+
+#define CSID_CSI2_RX_TOTAL_PKTS_RCVD 0x240
+#define CSID_CSI2_RX_STATS_ECC 0x244
+#define CSID_CSI2_RX_CRC_ERRORS 0x248
+
+#define CSID_RDI_CFG0(rdi) (0x500 + 0x100 * (rdi))
+#define RDI_CFG0_DECODE_FORMAT 12
+#define RDI_CFG0_DATA_TYPE 16
+#define RDI_CFG0_VIRTUAL_CHANNEL 22
+#define RDI_CFG0_DT_ID 27
+#define RDI_CFG0_ENABLE BIT(31)
+
+#define CSID_RDI_CTRL(rdi) (0x504 + 0x100 * (rdi))
+#define CSID_RDI_CTRL_HALT_CMD_HALT_AT_FRAME_BOUNDARY 0
+#define CSID_RDI_CTRL_HALT_CMD_RESUME_AT_FRAME_BOUNDARY 1
+
+#define CSID_RDI_CFG1(rdi) (0x510 + 0x100 * (rdi))
+#define RDI_CFG1_TIMESTAMP_STB_FRAME BIT(0)
+#define RDI_CFG1_TIMESTAMP_STB_IRQ BIT(1)
+#define RDI_CFG1_BYTE_CNTR_EN BIT(2)
+#define RDI_CFG1_TIMESTAMP_EN BIT(4)
+#define RDI_CFG1_DROP_H_EN BIT(5)
+#define RDI_CFG1_DROP_V_EN BIT(6)
+#define RDI_CFG1_CROP_H_EN BIT(7)
+#define RDI_CFG1_CROP_V_EN BIT(8)
+#define RDI_CFG1_MISR_EN BIT(9)
+#define RDI_CFG1_PLAIN_ALIGN_MSB BIT(11)
+#define RDI_CFG1_EARLY_EOF_EN BIT(14)
+#define RDI_CFG1_PACKING_MIPI BIT(15)
+
+#define CSID_RDI_ERR_RECOVERY_CFG0(rdi) (0x514 + 0x100 * (rdi))
+#define CSID_RDI_EPOCH_IRQ_CFG(rdi) (0x52c + 0x100 * (rdi))
+#define CSID_RDI_FRM_DROP_PATTERN(rdi) (0x540 + 0x100 * (rdi))
+#define CSID_RDI_FRM_DROP_PERIOD(rdi) (0x544 + 0x100 * (rdi))
+#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) (0x548 + 0x100 * (rdi))
+#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) (0x54c + 0x100 * (rdi))
+#define CSID_RDI_PIX_DROP_PATTERN(rdi) (0x558 + 0x100 * (rdi))
+#define CSID_RDI_PIX_DROP_PERIOD(rdi) (0x55c + 0x100 * (rdi))
+#define CSID_RDI_LINE_DROP_PATTERN(rdi) (0x560 + 0x100 * (rdi))
+#define CSID_RDI_LINE_DROP_PERIOD(rdi) (0x564 + 0x100 * (rdi))
+
+static inline int reg_update_rdi(struct csid_device *csid, int n)
+{
+ return BIT(4 + n) + BIT(20 + n);
+}
+
+static void csid_reg_update(struct csid_device *csid, int port_id)
+{
+ csid->reg_update |= reg_update_rdi(csid, port_id);
+ writel(csid->reg_update, csid->base + CSID_REG_UPDATE_CMD);
+}
+
+static inline void csid_reg_update_clear(struct csid_device *csid,
+ int port_id)
+{
+ csid->reg_update &= ~reg_update_rdi(csid, port_id);
+ writel(csid->reg_update, csid->base + CSID_REG_UPDATE_CMD);
+}
+
+static void __csid_configure_rx(struct csid_device *csid,
+ struct csid_phy_config *phy, int vc)
+{
+ u32 val;
+
+ val = (phy->lane_cnt - 1) << CSI2_RX_CFG0_NUM_ACTIVE_LANES;
+ val |= phy->lane_assign << CSI2_RX_CFG0_DL0_INPUT_SEL;
+ val |= (phy->csiphy_id + CSI2_RX_CFG0_PHY_SEL_BASE_IDX) << CSI2_RX_CFG0_PHY_NUM_SEL;
+
+ writel(val, csid->base + CSID_CSI2_RX_CFG0);
+
+ val = CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
+ if (vc > 3)
+ val |= CSI2_RX_CFG1_VC_MODE;
+ writel(val, csid->base + CSID_CSI2_RX_CFG1);
+}
+
+static void __csid_ctrl_rdi(struct csid_device *csid, int enable, u8 rdi)
+{
+ u32 val;
+
+ if (enable)
+ val = CSID_RDI_CTRL_HALT_CMD_RESUME_AT_FRAME_BOUNDARY;
+ else
+ val = CSID_RDI_CTRL_HALT_CMD_HALT_AT_FRAME_BOUNDARY;
+
+ writel(val, csid->base + CSID_RDI_CTRL(rdi));
+}
+
+static void __csid_configure_top(struct csid_device *csid)
+{
+ u32 val;
+
+ val = CSID_TOP_IO_PATH_CFG0_OUTPUT_IFE_EN | CSID_TOP_IO_PATH_CFG0_INTERNAL_CSID;
+ writel(val, csid->camss->csid_wrapper_base +
+ CSID_TOP_IO_PATH_CFG0(csid->id));
+}
+
+static void __csid_configure_rdi_stream(struct csid_device *csid, u8 enable, u8 vc)
+{
+ struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc];
+ const struct csid_format_info *format = csid_get_fmt_entry(csid->res->formats->formats,
+ csid->res->formats->nformats,
+ input_format->code);
+ u8 lane_cnt = csid->phy.lane_cnt;
+ u8 dt_id;
+ u32 val;
+
+ if (!lane_cnt)
+ lane_cnt = 4;
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(vc));
+
+ /*
+ * DT_ID is a two bit bitfield that is concatenated with
+ * the four least significant bits of the five bit VC
+ * bitfield to generate an internal CID value.
+ *
+ * CSID_RDI_CFG0(vc)
+ * DT_ID : 28:27
+ * VC : 26:22
+ * DT : 21:16
+ *
+ * CID : VC 3:0 << 2 | DT_ID 1:0
+ */
+ dt_id = vc & 0x03;
+
+ /* note: for non-RDI path, this should be format->decode_format */
+ val |= DECODE_FORMAT_PAYLOAD_ONLY << RDI_CFG0_DECODE_FORMAT;
+ val |= format->data_type << RDI_CFG0_DATA_TYPE;
+ val |= vc << RDI_CFG0_VIRTUAL_CHANNEL;
+ val |= dt_id << RDI_CFG0_DT_ID;
+ writel(val, csid->base + CSID_RDI_CFG0(vc));
+
+ val = RDI_CFG1_TIMESTAMP_STB_FRAME;
+ val |= RDI_CFG1_BYTE_CNTR_EN;
+ val |= RDI_CFG1_TIMESTAMP_EN;
+ val |= RDI_CFG1_DROP_H_EN;
+ val |= RDI_CFG1_DROP_V_EN;
+ val |= RDI_CFG1_CROP_H_EN;
+ val |= RDI_CFG1_CROP_V_EN;
+ val |= RDI_CFG1_PACKING_MIPI;
+
+ writel(val, csid->base + CSID_RDI_CFG1(vc));
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(vc));
+
+ val = 1;
+ writel(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(vc));
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_CTRL(vc));
+
+ val = readl(csid->base + CSID_RDI_CFG0(vc));
+ if (enable)
+ val |= RDI_CFG0_ENABLE;
+ else
+ val &= ~RDI_CFG0_ENABLE;
+ writel(val, csid->base + CSID_RDI_CFG0(vc));
+}
+
+static void csid_configure_stream(struct csid_device *csid, u8 enable)
+{
+ int i;
+
+ __csid_configure_top(csid);
+
+ /* Loop through all enabled VCs and configure stream for each */
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) {
+ if (csid->phy.en_vc & BIT(i)) {
+ __csid_configure_rdi_stream(csid, enable, i);
+ __csid_configure_rx(csid, &csid->phy, i);
+ __csid_ctrl_rdi(csid, enable, i);
+ }
+ }
+}
+
+/*
+ * csid_reset - Trigger reset on CSID module and wait to complete
+ * @csid: CSID device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_reset(struct csid_device *csid)
+{
+ unsigned long time;
+ u32 val;
+ int i;
+
+ reinit_completion(&csid->reset_complete);
+
+ writel(CSID_IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+
+ /* preserve registers */
+ val = CSID_RESET_CFG_MODE_IMMEDIATE | CSID_RESET_CFG_LOCATION_COMPLETE;
+ writel(val, csid->base + CSID_RESET_CFG);
+
+ val = CSID_RESET_CMD_HW_RESET | CSID_RESET_CMD_SW_RESET;
+ writel(val, csid->base + CSID_RESET_CMD);
+
+ time = wait_for_completion_timeout(&csid->reset_complete,
+ msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(csid->camss->dev, "CSID reset timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) {
+ /* Enable RUP done for the client port */
+ writel(CSID_CSI2_RDIN_RUP_DONE, csid->base + CSID_CSI2_RDIN_IRQ_MASK(i));
+ }
+
+ /* Clear RDI status */
+ writel(~0u, csid->base + CSID_BUF_DONE_IRQ_CLEAR);
+
+ /* Enable BUF_DONE bit for all write-master client ports */
+ writel(~0u, csid->base + CSID_BUF_DONE_IRQ_MASK);
+
+ /* Unmask all TOP interrupts */
+ writel(~0u, csid->base + CSID_TOP_IRQ_MASK);
+
+ return 0;
+}
+
+static void csid_rup_complete(struct csid_device *csid, int rdi)
+{
+ csid_reg_update_clear(csid, rdi);
+}
+
+/*
+ * csid_isr - CSID module interrupt service routine
+ * @irq: Interrupt line
+ * @dev: CSID device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csid_isr(int irq, void *dev)
+{
+ struct csid_device *csid = dev;
+ u32 buf_done_val, val, val_top;
+ int i;
+
+ /* Latch and clear TOP status */
+ val_top = readl(csid->base + CSID_TOP_IRQ_STATUS);
+ writel(val_top, csid->base + CSID_TOP_IRQ_CLEAR);
+
+ /* Latch and clear CSID_CSI2 status */
+ val = readl(csid->base + CSID_CSI2_RX_IRQ_STATUS);
+ writel(val, csid->base + CSID_CSI2_RX_IRQ_CLEAR);
+
+ /* Latch and clear top level BUF_DONE status */
+ buf_done_val = readl(csid->base + CSID_BUF_DONE_IRQ_STATUS);
+ writel(buf_done_val, csid->base + CSID_BUF_DONE_IRQ_CLEAR);
+
+ /* Process state for each RDI channel */
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) {
+ val = readl(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(i));
+ if (val)
+ writel(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(i));
+
+ if (val & CSID_CSI2_RDIN_RUP_DONE)
+ csid_rup_complete(csid, i);
+
+ if (buf_done_val & BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i))
+ camss_buf_done(csid->camss, csid->id, i);
+ }
+
+ /* Issue clear command */
+ writel(CSID_IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+
+ /* Reset complete */
+ if (val_top & CSID_TOP_IRQ_RESET)
+ complete(&csid->reset_complete);
+
+ return IRQ_HANDLED;
+}
+
+static void csid_subdev_reg_update(struct csid_device *csid, int port_id, bool is_clear)
+{
+ if (is_clear)
+ csid_reg_update_clear(csid, port_id);
+ else
+ csid_reg_update(csid, port_id);
+}
+
+static void csid_subdev_init(struct csid_device *csid) {}
+
+const struct csid_hw_ops csid_ops_680 = {
+ .configure_testgen_pattern = NULL,
+ .configure_stream = csid_configure_stream,
+ .hw_version = csid_hw_version,
+ .isr = csid_isr,
+ .reset = csid_reset,
+ .src_pad_code = csid_src_pad_code,
+ .subdev_init = csid_subdev_init,
+ .reg_update = csid_subdev_reg_update,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index d08117f46f3b..5284b5857368 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -613,8 +613,8 @@ u32 csid_hw_version(struct csid_device *csid)
hw_gen = (hw_version >> HW_VERSION_GENERATION) & 0xF;
hw_rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
hw_step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
- dev_info(csid->camss->dev, "CSID:%d HW Version = %u.%u.%u\n",
- csid->id, hw_gen, hw_rev, hw_step);
+ dev_dbg(csid->camss->dev, "CSID:%d HW Version = %u.%u.%u\n",
+ csid->id, hw_gen, hw_rev, hw_step);
return hw_version;
}
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 90b8fc5852be..9dc826d8c8f6 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -213,6 +213,7 @@ extern const struct csid_formats csid_formats_gen2;
extern const struct csid_hw_ops csid_ops_4_1;
extern const struct csid_hw_ops csid_ops_4_7;
+extern const struct csid_hw_ops csid_ops_680;
extern const struct csid_hw_ops csid_ops_gen2;
extern const struct csid_hw_ops csid_ops_780;
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index a6cc957b986e..f732a76de93e 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -55,11 +55,12 @@
#define CSIPHY_DNP_PARAMS 4
#define CSIPHY_2PH_REGS 5
#define CSIPHY_3PH_REGS 6
+#define CSIPHY_SKEW_CAL 7
struct csiphy_lane_regs {
s32 reg_addr;
s32 reg_data;
- s32 delay;
+ u32 delay_us;
u32 csiphy_param_type;
};
@@ -423,6 +424,123 @@ csiphy_lane_regs lane_regs_sm8550[] = {
{0x0C64, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
};
+/* 4nm 2PH v 2.1.2 2p5Gbps 4 lane DPHY mode */
+static const struct
+csiphy_lane_regs lane_regs_x1e80100[] = {
+ /* Power up lanes 2ph mode */
+ {0x1014, 0xD5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL},
+ {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL},
+ {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL},
+ {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL},
+
+ {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL},
+ {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL},
+ {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL},
+ {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL},
+
+ {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL},
+ {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL},
+ {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL},
+ {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL},
+
+ {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL},
+ {0x0C5C, 0x00, 0x00, CSIPHY_SKEW_CAL},
+ {0x0C60, 0xBD, 0x00, CSIPHY_SKEW_CAL},
+ {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL},
+};
+
static void csiphy_hw_version_read(struct csiphy_device *csiphy,
struct device *dev)
{
@@ -593,6 +711,9 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
case CSIPHY_SETTLE_CNT_LOWER_BYTE:
val = settle_cnt & 0xff;
break;
+ case CSIPHY_SKEW_CAL:
+ /* TODO: support application of skew from dt flag */
+ continue;
case CSIPHY_DNP_PARAMS:
continue;
default:
@@ -600,6 +721,8 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
break;
}
writel_relaxed(val, csiphy->base + r->reg_addr);
+ if (r->delay_us)
+ udelay(r->delay_us);
}
}
@@ -626,6 +749,7 @@ static bool csiphy_is_gen2(u32 version)
case CAMSS_8280XP:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_X1E80100:
ret = true;
break;
}
@@ -714,6 +838,11 @@ static int csiphy_init(struct csiphy_device *csiphy)
regs->lane_regs = &lane_regs_sc8280xp[0];
regs->lane_array_size = ARRAY_SIZE(lane_regs_sc8280xp);
break;
+ case CAMSS_X1E80100:
+ regs->lane_regs = &lane_regs_x1e80100[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_x1e80100);
+ regs->offset = 0x1000;
+ break;
case CAMSS_8550:
regs->lane_regs = &lane_regs_sm8550[0];
regs->lane_array_size = ARRAY_SIZE(lane_regs_sm8550);
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index c053616558a7..c622efcc92ff 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -586,7 +586,7 @@ int msm_csiphy_subdev_init(struct camss *camss,
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
- int i, j, k;
+ int i, j;
int ret;
csiphy->camss = camss;
@@ -680,23 +680,21 @@ int msm_csiphy_subdev_init(struct camss *camss,
for (j = 0; j < clock->nfreqs; j++)
clock->freq[j] = res->clock_rate[i][j];
- for (k = 0; k < camss->res->csiphy_num; k++) {
- csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
- "csiphy%d_timer", k);
- if (csiphy->rate_set[i])
- break;
-
- if (camss->res->version == CAMSS_660) {
- csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
- "csi%d_phy", k);
- if (csiphy->rate_set[i])
- break;
- }
+ csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
+ "csiphy%d_timer",
+ csiphy->id);
+ if (csiphy->rate_set[i])
+ continue;
- csiphy->rate_set[i] = csiphy_match_clock_name(clock->name, "csiphy%d", k);
+ if (camss->res->version == CAMSS_660) {
+ csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
+ "csi%d_phy",
+ csiphy->id);
if (csiphy->rate_set[i])
- break;
+ continue;
}
+
+ csiphy->rate_set[i] = csiphy_match_clock_name(clock->name, "csiphy%d", csiphy->id);
}
/* CSIPHY supplies */
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index 86b98b37838e..ab91273303b9 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -81,6 +81,7 @@ struct csiphy_hw_ops {
};
struct csiphy_subdev_resources {
+ u8 id;
const struct csiphy_hw_ops *hw_ops;
const struct csiphy_formats *formats;
};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-680.c b/drivers/media/platform/qcom/camss/camss-vfe-680.c
new file mode 100644
index 000000000000..99036e7c1e76
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-680.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe-680.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v680
+ *
+ * Copyright (C) 2025 Linaro Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "camss.h"
+#include "camss-vfe.h"
+
+#define VFE_TOP_IRQn_STATUS(vfe, n) ((vfe_is_lite(vfe) ? 0x1c : 0x44) + (n) * 4)
+#define VFE_TOP_IRQn_MASK(vfe, n) ((vfe_is_lite(vfe) ? 0x24 : 0x34) + (n) * 4)
+#define VFE_TOP_IRQn_CLEAR(vfe, n) ((vfe_is_lite(vfe) ? 0x2c : 0x3c) + (n) * 4)
+#define VFE_IRQ1_SOF(vfe, n) ((vfe_is_lite(vfe) ? BIT(2) : BIT(8)) << ((n) * 2))
+#define VFE_IRQ1_EOF(vfe, n) ((vfe_is_lite(vfe) ? BIT(3) : BIT(9)) << ((n) * 2))
+#define VFE_TOP_IRQ_CMD(vfe) (vfe_is_lite(vfe) ? 0x38 : 0x30)
+#define VFE_TOP_IRQ_CMD_GLOBAL_CLEAR BIT(0)
+#define VFE_TOP_DIAG_CONFIG (vfe_is_lite(vfe) ? 0x40 : 0x50)
+
+#define VFE_TOP_DEBUG_11(vfe) (vfe_is_lite(vfe) ? 0x40 : 0xcc)
+#define VFE_TOP_DEBUG_12(vfe) (vfe_is_lite(vfe) ? 0x40 : 0xd0)
+#define VFE_TOP_DEBUG_13(vfe) (vfe_is_lite(vfe) ? 0x40 : 0xd4)
+
+#define VFE_BUS_IRQn_MASK(vfe, n) ((vfe_is_lite(vfe) ? 0x218 : 0xc18) + (n) * 4)
+#define VFE_BUS_IRQn_CLEAR(vfe, n) ((vfe_is_lite(vfe) ? 0x220 : 0xc20) + (n) * 4)
+#define VFE_BUS_IRQn_STATUS(vfe, n) ((vfe_is_lite(vfe) ? 0x228 : 0xc28) + (n) * 4)
+#define VFE_BUS_IRQ_GLOBAL_CLEAR(vfe) (vfe_is_lite(vfe) ? 0x230 : 0xc30)
+#define VFE_BUS_WR_VIOLATION_STATUS(vfe) (vfe_is_lite(vfe) ? 0x264 : 0xc64)
+#define VFE_BUS_WR_OVERFLOW_STATUS(vfe) (vfe_is_lite(vfe) ? 0x268 : 0xc68)
+#define VFE_BUS_WR_IMAGE_VIOLATION_STATUS(vfe) (vfe_is_lite(vfe) ? 0x270 : 0xc70)
+
+#define VFE_BUS_WRITE_CLIENT_CFG(vfe, c) ((vfe_is_lite(vfe) ? 0x400 : 0xe00) + (c) * 0x100)
+#define VFE_BUS_WRITE_CLIENT_CFG_EN BIT(0)
+#define VFE_BUS_IMAGE_ADDR(vfe, c) ((vfe_is_lite(vfe) ? 0x404 : 0xe04) + (c) * 0x100)
+#define VFE_BUS_FRAME_INCR(vfe, c) ((vfe_is_lite(vfe) ? 0x408 : 0xe08) + (c) * 0x100)
+#define VFE_BUS_IMAGE_CFG0(vfe, c) ((vfe_is_lite(vfe) ? 0x40c : 0xe0c) + (c) * 0x100)
+#define VFE_BUS_IMAGE_CFG0_DATA(h, s) (((h) << 16) | ((s) >> 4))
+#define WM_IMAGE_CFG_0_DEFAULT_WIDTH (0xFFFF)
+
+#define VFE_BUS_IMAGE_CFG1(vfe, c) ((vfe_is_lite(vfe) ? 0x410 : 0xe10) + (c) * 0x100)
+#define VFE_BUS_IMAGE_CFG2(vfe, c) ((vfe_is_lite(vfe) ? 0x414 : 0xe14) + (c) * 0x100)
+#define VFE_BUS_PACKER_CFG(vfe, c) ((vfe_is_lite(vfe) ? 0x418 : 0xe18) + (c) * 0x100)
+#define VFE_BUS_IRQ_SUBSAMPLE_PERIOD(vfe, c) ((vfe_is_lite(vfe) ? 0x430 : 0xe30) + (c) * 0x100)
+#define VFE_BUS_IRQ_SUBSAMPLE_PATTERN(vfe, c) ((vfe_is_lite(vfe) ? 0x434 : 0xe34) + (c) * 0x100)
+#define VFE_BUS_FRAMEDROP_PERIOD(vfe, c) ((vfe_is_lite(vfe) ? 0x438 : 0xe38) + (c) * 0x100)
+#define VFE_BUS_FRAMEDROP_PATTERN(vfe, c) ((vfe_is_lite(vfe) ? 0x43c : 0xe3c) + (c) * 0x100)
+#define VFE_BUS_MMU_PREFETCH_CFG(vfe, c) ((vfe_is_lite(vfe) ? 0x460 : 0xe60) + (c) * 0x100)
+#define VFE_BUS_MMU_PREFETCH_CFG_EN BIT(0)
+#define VFE_BUS_MMU_PREFETCH_MAX_OFFSET(vfe, c) ((vfe_is_lite(vfe) ? 0x464 : 0xe64) + (c) * 0x100)
+#define VFE_BUS_ADDR_STATUS0(vfe, c) ((vfe_is_lite(vfe) ? 0x470 : 0xe70) + (c) * 0x100)
+
+/*
+ * TODO: differentiate the port id based on requested type of RDI, BHIST etc
+ *
+ * IFE write master IDs
+ *
+ * VIDEO_FULL_Y 0
+ * VIDEO_FULL_C 1
+ * VIDEO_DS_4:1 2
+ * VIDEO_DS_16:1 3
+ * DISPLAY_FULL_Y 4
+ * DISPLAY_FULL_C 5
+ * DISPLAY_DS_4:1 6
+ * DISPLAY_DS_16:1 7
+ * FD_Y 8
+ * FD_C 9
+ * PIXEL_RAW 10
+ * STATS_BE0 11
+ * STATS_BHIST0 12
+ * STATS_TINTLESS_BG 13
+ * STATS_AWB_BG 14
+ * STATS_AWB_BFW 15
+ * STATS_BAF 16
+ * STATS_BHIST 17
+ * STATS_RS 18
+ * STATS_IHIST 19
+ * SPARSE_PD 20
+ * PDAF_V2.0_PD_DATA 21
+ * PDAF_V2.0_SAD 22
+ * LCR 23
+ * RDI0 24
+ * RDI1 25
+ * RDI2 26
+ * LTM_STATS 27
+ *
+ * IFE Lite write master IDs
+ *
+ * RDI0 0
+ * RDI1 1
+ * RDI2 2
+ * RDI3 3
+ * GAMMA 4
+ * BE 5
+ */
+
+/* TODO: assign an ENUM in resources and use the provided master
+ * id directly for RDI, STATS, AWB_BG, BHIST.
+ * This macro only works because RDI is all we support right now.
+ */
+#define RDI_WM(n) ((vfe_is_lite(vfe) ? 0 : 24) + (n))
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ /* VFE680 has no global reset, simply report a completion */
+ complete(&vfe->reset_complete);
+}
+
+/*
+ * vfe_isr - VFE module interrupt handler
+ * @irq: Interrupt line
+ * @dev: VFE device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ return IRQ_HANDLED;
+}
+
+/*
+ * vfe_halt - Trigger halt on VFE module and wait to complete
+ * @vfe: VFE device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_halt(struct vfe_device *vfe)
+{
+ /* rely on vfe_disable_output() to stop the VFE */
+ return 0;
+}
+
+static void vfe_disable_irq(struct vfe_device *vfe)
+{
+ writel(0u, vfe->base + VFE_TOP_IRQn_MASK(vfe, 0));
+ writel(0u, vfe->base + VFE_TOP_IRQn_MASK(vfe, 1));
+ writel(0u, vfe->base + VFE_BUS_IRQn_MASK(vfe, 0));
+ writel(0u, vfe->base + VFE_BUS_IRQn_MASK(vfe, 1));
+}
+
+static void vfe_wm_update(struct vfe_device *vfe, u8 rdi, u32 addr,
+ struct vfe_line *line)
+{
+ u8 wm = RDI_WM(rdi);
+
+ writel(addr, vfe->base + VFE_BUS_IMAGE_ADDR(vfe, wm));
+}
+
+static void vfe_wm_start(struct vfe_device *vfe, u8 rdi, struct vfe_line *line)
+{
+ struct v4l2_pix_format_mplane *pix =
+ &line->video_out.active_fmt.fmt.pix_mp;
+ u32 stride = pix->plane_fmt[0].bytesperline;
+ u32 cfg;
+ u8 wm;
+
+ cfg = VFE_BUS_IMAGE_CFG0_DATA(pix->height, stride);
+ wm = RDI_WM(rdi);
+
+ writel(cfg, vfe->base + VFE_BUS_IMAGE_CFG0(vfe, wm));
+ writel(0, vfe->base + VFE_BUS_IMAGE_CFG1(vfe, wm));
+ writel(stride, vfe->base + VFE_BUS_IMAGE_CFG2(vfe, wm));
+ writel(0, vfe->base + VFE_BUS_PACKER_CFG(vfe, wm));
+
+ /* Set total frame increment value */
+ writel(pix->plane_fmt[0].bytesperline * pix->height,
+ vfe->base + VFE_BUS_FRAME_INCR(vfe, wm));
+
+ /* MMU */
+ writel(VFE_BUS_MMU_PREFETCH_CFG_EN, vfe->base + VFE_BUS_MMU_PREFETCH_CFG(vfe, wm));
+ writel(~0u, vfe->base + VFE_BUS_MMU_PREFETCH_MAX_OFFSET(vfe, wm));
+
+ /* no dropped frames, one irq per frame */
+ writel(1, vfe->base + VFE_BUS_FRAMEDROP_PATTERN(vfe, wm));
+ writel(0, vfe->base + VFE_BUS_FRAMEDROP_PERIOD(vfe, wm));
+ writel(1, vfe->base + VFE_BUS_IRQ_SUBSAMPLE_PATTERN(vfe, wm));
+ writel(0, vfe->base + VFE_BUS_IRQ_SUBSAMPLE_PERIOD(vfe, wm));
+
+ /* We don't process IRQs for VFE in RDI mode at the moment */
+ vfe_disable_irq(vfe);
+
+ /* Enable WM */
+ writel(VFE_BUS_WRITE_CLIENT_CFG_EN,
+ vfe->base + VFE_BUS_WRITE_CLIENT_CFG(vfe, wm));
+
+ dev_dbg(vfe->camss->dev, "RDI%d WM:%d width %d height %d stride %d\n",
+ rdi, wm, pix->width, pix->height, stride);
+}
+
+static void vfe_wm_stop(struct vfe_device *vfe, u8 rdi)
+{
+ u8 wm = RDI_WM(rdi);
+
+ writel(0, vfe->base + VFE_BUS_WRITE_CLIENT_CFG(vfe, wm));
+}
+
+static const struct camss_video_ops vfe_video_ops_680 = {
+ .queue_buffer = vfe_queue_buffer_v2,
+ .flush_buffers = vfe_flush_buffers,
+};
+
+static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+{
+ vfe->video_ops = vfe_video_ops_680;
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ int port_id = line_id;
+
+ camss_reg_update(vfe->camss, vfe->id, port_id, false);
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ int port_id = line_id;
+
+ camss_reg_update(vfe->camss, vfe->id, port_id, true);
+}
+
+const struct vfe_hw_ops vfe_ops_680 = {
+ .global_reset = vfe_global_reset,
+ .hw_version = vfe_hw_version,
+ .isr = vfe_isr,
+ .pm_domain_off = vfe_pm_domain_off,
+ .pm_domain_on = vfe_pm_domain_on,
+ .subdev_init = vfe_subdev_init,
+ .vfe_disable = vfe_disable,
+ .vfe_enable = vfe_enable_v2,
+ .vfe_halt = vfe_halt,
+ .vfe_wm_start = vfe_wm_start,
+ .vfe_wm_stop = vfe_wm_stop,
+ .vfe_buf_done = vfe_buf_done,
+ .vfe_wm_update = vfe_wm_update,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index cf0e8f5c004a..4bca6c3abaff 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -346,6 +346,7 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
case CAMSS_8280XP:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_X1E80100:
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
{
@@ -428,8 +429,8 @@ u32 vfe_hw_version(struct vfe_device *vfe)
u32 rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
u32 step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
- dev_info(vfe->camss->dev, "VFE:%d HW Version = %u.%u.%u\n",
- vfe->id, gen, rev, step);
+ dev_dbg(vfe->camss->dev, "VFE:%d HW Version = %u.%u.%u\n",
+ vfe->id, gen, rev, step);
return hw_version;
}
@@ -1973,6 +1974,7 @@ static int vfe_bpl_align(struct vfe_device *vfe)
case CAMSS_8280XP:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_X1E80100:
ret = 16;
break;
default:
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 9dec5bc0d1b1..a23f666be753 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -243,6 +243,7 @@ extern const struct vfe_hw_ops vfe_ops_4_7;
extern const struct vfe_hw_ops vfe_ops_4_8;
extern const struct vfe_hw_ops vfe_ops_170;
extern const struct vfe_hw_ops vfe_ops_480;
+extern const struct vfe_hw_ops vfe_ops_680;
extern const struct vfe_hw_ops vfe_ops_780;
int vfe_get(struct vfe_device *vfe);
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 6791dfea91b1..06f42875702f 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -46,6 +46,7 @@ static const struct camss_subdev_resources csiphy_res_8x16[] = {
.reg = { "csiphy0", "csiphy0_clk_mux" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_2ph_1_0,
.formats = &csiphy_formats_8x16
}
@@ -62,6 +63,7 @@ static const struct camss_subdev_resources csiphy_res_8x16[] = {
.reg = { "csiphy1", "csiphy1_clk_mux" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_2ph_1_0,
.formats = &csiphy_formats_8x16
}
@@ -318,6 +320,7 @@ static const struct camss_subdev_resources csiphy_res_8x96[] = {
.reg = { "csiphy0", "csiphy0_clk_mux" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -334,6 +337,7 @@ static const struct camss_subdev_resources csiphy_res_8x96[] = {
.reg = { "csiphy1", "csiphy1_clk_mux" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -350,6 +354,7 @@ static const struct camss_subdev_resources csiphy_res_8x96[] = {
.reg = { "csiphy2", "csiphy2_clk_mux" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -524,6 +529,7 @@ static const struct camss_subdev_resources csiphy_res_660[] = {
.reg = { "csiphy0", "csiphy0_clk_mux" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -542,6 +548,7 @@ static const struct camss_subdev_resources csiphy_res_660[] = {
.reg = { "csiphy1", "csiphy1_clk_mux" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -560,6 +567,7 @@ static const struct camss_subdev_resources csiphy_res_660[] = {
.reg = { "csiphy2", "csiphy2_clk_mux" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -751,6 +759,7 @@ static const struct camss_subdev_resources csiphy_res_670[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -768,6 +777,7 @@ static const struct camss_subdev_resources csiphy_res_670[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -785,6 +795,7 @@ static const struct camss_subdev_resources csiphy_res_670[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -935,6 +946,7 @@ static const struct camss_subdev_resources csiphy_res_845[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -957,6 +969,7 @@ static const struct camss_subdev_resources csiphy_res_845[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -979,6 +992,7 @@ static const struct camss_subdev_resources csiphy_res_845[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1001,6 +1015,7 @@ static const struct camss_subdev_resources csiphy_res_845[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1179,6 +1194,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1192,6 +1208,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1205,6 +1222,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1218,6 +1236,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1231,6 +1250,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy4" },
.interrupt = { "csiphy4" },
.csiphy = {
+ .id = 4,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1244,6 +1264,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy5" },
.interrupt = { "csiphy5" },
.csiphy = {
+ .id = 5,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1458,6 +1479,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1472,6 +1494,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1486,6 +1509,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1500,6 +1524,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1514,6 +1539,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy4" },
.interrupt = { "csiphy4" },
.csiphy = {
+ .id = 4,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1766,6 +1792,7 @@ static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1779,6 +1806,7 @@ static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1792,6 +1820,7 @@ static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1805,6 +1834,7 @@ static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2134,6 +2164,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2147,6 +2178,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2160,6 +2192,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2173,6 +2206,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2186,6 +2220,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy4" },
.interrupt = { "csiphy4" },
.csiphy = {
+ .id = 4,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2199,6 +2234,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy5" },
.interrupt = { "csiphy5" },
.csiphy = {
+ .id = 5,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2212,6 +2248,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy6" },
.interrupt = { "csiphy6" },
.csiphy = {
+ .id = 6,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2225,6 +2262,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy7" },
.interrupt = { "csiphy7" },
.csiphy = {
+ .id = 7,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2445,6 +2483,299 @@ static const struct resources_icc icc_res_sm8550[] = {
},
};
+static const struct camss_subdev_resources csiphy_res_x1e80100[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdd-csiphy-0p8-supply",
+ "vdd-csiphy-1p2-supply" },
+ .clock = { "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 300000000, 400000000, 480000000 },
+ { 266666667, 400000000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .id = 0,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ },
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdd-csiphy-0p8-supply",
+ "vdd-csiphy-1p2-supply" },
+ .clock = { "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 300000000, 400000000, 480000000 },
+ { 266666667, 400000000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .id = 1,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ },
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdd-csiphy-0p8-supply",
+ "vdd-csiphy-1p2-supply" },
+ .clock = { "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 300000000, 400000000, 480000000 },
+ { 266666667, 400000000 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .id = 2,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ },
+ },
+ /* CSIPHY4 */
+ {
+ .regulators = { "vdd-csiphy-0p8-supply",
+ "vdd-csiphy-1p2-supply" },
+ .clock = { "csiphy4", "csiphy4_timer" },
+ .clock_rate = { { 300000000, 400000000, 480000000 },
+ { 266666667, 400000000 } },
+ .reg = { "csiphy4" },
+ .interrupt = { "csiphy4" },
+ .csiphy = {
+ .id = 4,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ },
+ },
+};
+
+static const struct camss_subdev_resources csid_res_x1e80100[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ },
+ },
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ },
+ },
+ /* CSID2 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" },
+ .csid = {
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ },
+ },
+ /* CSID_LITE0 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+ .reg = { "csid_lite0" },
+ .interrupt = { "csid_lite0" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID_LITE1 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+
+ .reg = { "csid_lite1" },
+ .interrupt = { "csid_lite1" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+};
+
+static const struct camss_subdev_resources vfe_res_x1e80100[] = {
+ /* IFE0 */
+ {
+ .regulators = {},
+ .clock = {"camnoc_rt_axi", "camnoc_nrt_axi", "cpas_ahb",
+ "cpas_fast_ahb", "cpas_vfe0", "vfe0_fast_ahb",
+ "vfe0" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 345600000, 432000000, 594000000, 675000000,
+ 727000000 }, },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 4,
+ .pd_name = "ife0",
+ .hw_ops = &vfe_ops_680,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ },
+ },
+ /* IFE1 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_rt_axi", "camnoc_nrt_axi", "cpas_ahb",
+ "cpas_fast_ahb", "cpas_vfe1", "vfe1_fast_ahb",
+ "vfe1" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 345600000, 432000000, 594000000, 675000000,
+ 727000000 }, },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 4,
+ .pd_name = "ife1",
+ .hw_ops = &vfe_ops_680,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ },
+ },
+ /* IFE_LITE_0 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_rt_axi", "camnoc_nrt_axi", "cpas_ahb",
+ "vfe_lite_ahb", "cpas_vfe_lite", "vfe_lite",
+ "vfe_lite_csid" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 266666667, 400000000, 480000000 },
+ { 266666667, 400000000, 480000000 }, },
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" },
+ .vfe = {
+ .is_lite = true,
+ .line_num = 4,
+ .hw_ops = &vfe_ops_680,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ },
+ },
+ /* IFE_LITE_1 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_rt_axi", "camnoc_nrt_axi", "cpas_ahb",
+ "vfe_lite_ahb", "cpas_vfe_lite", "vfe_lite",
+ "vfe_lite_csid" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 266666667, 400000000, 480000000 },
+ { 266666667, 400000000, 480000000 }, },
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" },
+ .vfe = {
+ .is_lite = true,
+ .line_num = 4,
+ .hw_ops = &vfe_ops_680,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ },
+ },
+};
+
+static const struct resources_icc icc_res_x1e80100[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 150000,
+ .icc_bw_tbl.peak = 300000,
+ },
+ {
+ .name = "hf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "sf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "sf_icp_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
+static const struct resources_wrapper csid_wrapper_res_x1e80100 = {
+ .reg = "csid_wrapper",
+};
+
/*
* camss_add_clock_margin - Add margin to clock frequency rate
* @rate: Clock frequency rate
@@ -2663,6 +2994,15 @@ static int camss_of_parse_endpoint_node(struct device *dev,
if (ret)
return ret;
+ /*
+ * Most SoCs support both D-PHY and C-PHY standards, but currently only
+ * D-PHY is supported in the driver.
+ */
+ if (vep.bus_type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(dev, "Unsupported bus type %d\n", vep.bus_type);
+ return -EINVAL;
+ }
+
csd->interface.csiphy_id = vep.base.port;
mipi_csi2 = &vep.bus.mipi_csi2;
@@ -2749,7 +3089,8 @@ static int camss_init_subdevices(struct camss *camss)
for (i = 0; i < camss->res->csiphy_num; i++) {
ret = msm_csiphy_subdev_init(camss, &camss->csiphy[i],
- &res->csiphy_res[i], i);
+ &res->csiphy_res[i],
+ res->csiphy_res[i].csiphy.id);
if (ret < 0) {
dev_err(camss->dev,
"Failed to init csiphy%d sub-device: %d\n",
@@ -3505,6 +3846,21 @@ static const struct camss_resources sm8550_resources = {
.link_entities = camss_link_entities
};
+static const struct camss_resources x1e80100_resources = {
+ .version = CAMSS_X1E80100,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_x1e80100,
+ .csid_res = csid_res_x1e80100,
+ .vfe_res = vfe_res_x1e80100,
+ .csid_wrapper_res = &csid_wrapper_res_x1e80100,
+ .icc_res = icc_res_x1e80100,
+ .icc_path_num = ARRAY_SIZE(icc_res_x1e80100),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_x1e80100),
+ .csid_num = ARRAY_SIZE(csid_res_x1e80100),
+ .vfe_num = ARRAY_SIZE(vfe_res_x1e80100),
+ .link_entities = camss_link_entities
+};
+
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
{ .compatible = "qcom,msm8953-camss", .data = &msm8953_resources },
@@ -3516,6 +3872,7 @@ static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,sdm845-camss", .data = &sdm845_resources },
{ .compatible = "qcom,sm8250-camss", .data = &sm8250_resources },
{ .compatible = "qcom,sm8550-camss", .data = &sm8550_resources },
+ { .compatible = "qcom,x1e80100-camss", .data = &x1e80100_resources },
{ }
};
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index b284b910ce42..63c0afee154a 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -86,6 +86,7 @@ enum camss_version {
CAMSS_8280XP,
CAMSS_845,
CAMSS_8550,
+ CAMSS_X1E80100,
};
enum icc_count {
diff --git a/drivers/media/platform/qcom/iris/Makefile b/drivers/media/platform/qcom/iris/Makefile
index 35390534534e..e86d00ee6f15 100644
--- a/drivers/media/platform/qcom/iris/Makefile
+++ b/drivers/media/platform/qcom/iris/Makefile
@@ -10,7 +10,7 @@ qcom-iris-objs += \
iris_hfi_gen2_packet.o \
iris_hfi_gen2_response.o \
iris_hfi_queue.o \
- iris_platform_sm8550.o \
+ iris_platform_gen2.o \
iris_power.o \
iris_probe.o \
iris_resources.o \
@@ -20,7 +20,7 @@ qcom-iris-objs += \
iris_vb2.o \
iris_vdec.o \
iris_vpu2.o \
- iris_vpu3.o \
+ iris_vpu3x.o \
iris_vpu_buffer.o \
iris_vpu_common.o \
diff --git a/drivers/media/platform/qcom/iris/iris_core.h b/drivers/media/platform/qcom/iris/iris_core.h
index 37fb4919fecc..aeeac32a1f6d 100644
--- a/drivers/media/platform/qcom/iris/iris_core.h
+++ b/drivers/media/platform/qcom/iris/iris_core.h
@@ -43,6 +43,7 @@ struct icc_info {
* @clock_tbl: table of iris clocks
* @clk_count: count of iris clocks
* @resets: table of iris reset clocks
+ * @controller_resets: table of controller reset clocks
* @iris_platform_data: a structure for platform data
* @state: current state of core
* @iface_q_table_daddr: device address for interface queue table memory
@@ -82,6 +83,7 @@ struct iris_core {
struct clk_bulk_data *clock_tbl;
u32 clk_count;
struct reset_control_bulk_data *resets;
+ struct reset_control_bulk_data *controller_resets;
const struct iris_platform_data *iris_platform_data;
enum iris_core_state state;
dma_addr_t iface_q_table_daddr;
diff --git a/drivers/media/platform/qcom/iris/iris_firmware.c b/drivers/media/platform/qcom/iris/iris_firmware.c
index 7c493b4a75db..f1b5cd56db32 100644
--- a/drivers/media/platform/qcom/iris/iris_firmware.c
+++ b/drivers/media/platform/qcom/iris/iris_firmware.c
@@ -53,8 +53,10 @@ static int iris_load_fw_to_memory(struct iris_core *core, const char *fw_name)
}
mem_virt = memremap(mem_phys, res_size, MEMREMAP_WC);
- if (!mem_virt)
+ if (!mem_virt) {
+ ret = -ENOMEM;
goto err_release_fw;
+ }
ret = qcom_mdt_load(dev, firmware, fw_name,
pas_id, mem_virt, mem_phys, res_size, NULL);
diff --git a/drivers/media/platform/qcom/iris/iris_platform_common.h b/drivers/media/platform/qcom/iris/iris_platform_common.h
index f6b15d2805fb..ac76d9e1ef9c 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_common.h
+++ b/drivers/media/platform/qcom/iris/iris_platform_common.h
@@ -33,8 +33,10 @@ enum pipe_type {
PIPE_4 = 4,
};
+extern struct iris_platform_data qcs8300_data;
extern struct iris_platform_data sm8250_data;
extern struct iris_platform_data sm8550_data;
+extern struct iris_platform_data sm8650_data;
enum platform_clk_type {
IRIS_AXI_CLK,
@@ -156,6 +158,8 @@ struct iris_platform_data {
unsigned int clk_tbl_size;
const char * const *clk_rst_tbl;
unsigned int clk_rst_tbl_size;
+ const char * const *controller_rst_tbl;
+ unsigned int controller_rst_tbl_size;
u64 dma_mask;
const char *fwname;
u32 pas_id;
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8550.c b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
index 35d278996c43..1e69ba15db0f 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_sm8550.c
+++ b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
@@ -10,6 +10,9 @@
#include "iris_platform_common.h"
#include "iris_vpu_common.h"
+#include "iris_platform_qcs8300.h"
+#include "iris_platform_sm8650.h"
+
#define VIDEO_ARCH_LX 1
static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
@@ -264,3 +267,119 @@ struct iris_platform_data sm8550_data = {
.dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
};
+
+/*
+ * Shares most of SM8550 data except:
+ * - vpu_ops to iris_vpu33_ops
+ * - clk_rst_tbl to sm8650_clk_reset_table
+ * - controller_rst_tbl to sm8650_controller_reset_table
+ * - fwname to "qcom/vpu/vpu33_p4.mbn"
+ */
+struct iris_platform_data sm8650_data = {
+ .get_instance = iris_hfi_gen2_get_instance,
+ .init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .vpu_ops = &iris_vpu33_ops,
+ .set_preset_registers = iris_set_sm8550_preset_registers,
+ .icc_tbl = sm8550_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8550_icc_table),
+ .clk_rst_tbl = sm8650_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8650_clk_reset_table),
+ .controller_rst_tbl = sm8650_controller_reset_table,
+ .controller_rst_tbl_size = ARRAY_SIZE(sm8650_controller_reset_table),
+ .bw_tbl_dec = sm8550_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8550_bw_table_dec),
+ .pmdomain_tbl = sm8550_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8550_pmdomain_table),
+ .opp_pd_tbl = sm8550_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8550_opp_pd_table),
+ .clk_tbl = sm8550_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8550_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu/vpu33_p4.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_sm8550,
+ .inst_fw_caps = inst_fw_cap_sm8550,
+ .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8550),
+ .tz_cp_config_data = &tz_cp_config_sm8550,
+ .core_arch = VIDEO_ARCH_LX,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .ubwc_config = &ubwc_config_sm8550,
+ .num_vpp_pipe = 4,
+ .max_session_count = 16,
+ .max_core_mbpf = ((8192 * 4352) / 256) * 2,
+ .input_config_params =
+ sm8550_vdec_input_config_params,
+ .input_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params),
+ .output_config_params =
+ sm8550_vdec_output_config_params,
+ .output_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_output_config_params),
+ .dec_input_prop = sm8550_vdec_subscribe_input_properties,
+ .dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
+ .dec_output_prop = sm8550_vdec_subscribe_output_properties,
+ .dec_output_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_output_properties),
+
+ .dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+};
+
+/*
+ * Shares most of SM8550 data except:
+ * - inst_caps to platform_inst_cap_qcs8300
+ * - inst_fw_caps to inst_fw_cap_qcs8300
+ */
+struct iris_platform_data qcs8300_data = {
+ .get_instance = iris_hfi_gen2_get_instance,
+ .init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .vpu_ops = &iris_vpu3_ops,
+ .set_preset_registers = iris_set_sm8550_preset_registers,
+ .icc_tbl = sm8550_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8550_icc_table),
+ .clk_rst_tbl = sm8550_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8550_clk_reset_table),
+ .bw_tbl_dec = sm8550_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8550_bw_table_dec),
+ .pmdomain_tbl = sm8550_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8550_pmdomain_table),
+ .opp_pd_tbl = sm8550_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8550_opp_pd_table),
+ .clk_tbl = sm8550_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8550_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu/vpu30_p4_s6.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_qcs8300,
+ .inst_fw_caps = inst_fw_cap_qcs8300,
+ .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_qcs8300),
+ .tz_cp_config_data = &tz_cp_config_sm8550,
+ .core_arch = VIDEO_ARCH_LX,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .ubwc_config = &ubwc_config_sm8550,
+ .num_vpp_pipe = 2,
+ .max_session_count = 16,
+ .max_core_mbpf = ((4096 * 2176) / 256) * 4,
+ .input_config_params =
+ sm8550_vdec_input_config_params,
+ .input_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params),
+ .output_config_params =
+ sm8550_vdec_output_config_params,
+ .output_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_output_config_params),
+ .dec_input_prop = sm8550_vdec_subscribe_input_properties,
+ .dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
+ .dec_output_prop = sm8550_vdec_subscribe_output_properties,
+ .dec_output_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_output_properties),
+
+ .dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
new file mode 100644
index 000000000000..f82355d72fcf
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+static struct platform_inst_fw_cap inst_fw_cap_qcs8300[] = {
+ {
+ .cap_id = PROFILE,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = LEVEL,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_6_1,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = INPUT_BUF_HOST_MAX_COUNT,
+ .min = DEFAULT_MAX_HOST_BUF_COUNT,
+ .max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
+ .step_or_mask = 1,
+ .value = DEFAULT_MAX_HOST_BUF_COUNT,
+ .hfi_id = HFI_PROP_BUFFER_HOST_MAX_COUNT,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROP_STAGE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = PIPE,
+ .min = PIPE_1,
+ .max = PIPE_2,
+ .step_or_mask = 1,
+ .value = PIPE_2,
+ .hfi_id = HFI_PROP_PIPE,
+ .set = iris_set_pipe,
+ },
+ {
+ .cap_id = POC,
+ .min = 0,
+ .max = 2,
+ .step_or_mask = 1,
+ .value = 1,
+ .hfi_id = HFI_PROP_PIC_ORDER_CNT_TYPE,
+ },
+ {
+ .cap_id = CODED_FRAMES,
+ .min = CODED_FRAMES_PROGRESSIVE,
+ .max = CODED_FRAMES_PROGRESSIVE,
+ .step_or_mask = 0,
+ .value = CODED_FRAMES_PROGRESSIVE,
+ .hfi_id = HFI_PROP_CODED_FRAMES,
+ },
+ {
+ .cap_id = BIT_DEPTH,
+ .min = BIT_DEPTH_8,
+ .max = BIT_DEPTH_8,
+ .step_or_mask = 1,
+ .value = BIT_DEPTH_8,
+ .hfi_id = HFI_PROP_LUMA_CHROMA_BIT_DEPTH,
+ },
+ {
+ .cap_id = RAP_FRAME,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ .hfi_id = HFI_PROP_DEC_START_FROM_RAP_FRAME,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+};
+
+static struct platform_inst_caps platform_inst_cap_qcs8300 = {
+ .min_frame_width = 96,
+ .max_frame_width = 4096,
+ .min_frame_height = 96,
+ .max_frame_height = 4096,
+ .max_mbpf = (4096 * 2176) / 256,
+ .mb_cycles_vpp = 200,
+ .mb_cycles_fw = 326389,
+ .mb_cycles_fw_vpp = 44156,
+ .num_comv = 0,
+};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8650.h b/drivers/media/platform/qcom/iris/iris_platform_sm8650.h
new file mode 100644
index 000000000000..75e9d572e788
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8650.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_PLATFORM_SM8650_H__
+#define __IRIS_PLATFORM_SM8650_H__
+
+static const char * const sm8650_clk_reset_table[] = { "bus", "core" };
+
+static const char * const sm8650_controller_reset_table[] = { "xo" };
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_probe.c b/drivers/media/platform/qcom/iris/iris_probe.c
index aca442dcc153..9a7ce142f700 100644
--- a/drivers/media/platform/qcom/iris/iris_probe.c
+++ b/drivers/media/platform/qcom/iris/iris_probe.c
@@ -91,25 +91,40 @@ static int iris_init_clocks(struct iris_core *core)
return 0;
}
-static int iris_init_resets(struct iris_core *core)
+static int iris_init_reset_table(struct iris_core *core,
+ struct reset_control_bulk_data **resets,
+ const char * const *rst_tbl, u32 rst_tbl_size)
{
- const char * const *rst_tbl;
- u32 rst_tbl_size;
u32 i = 0;
- rst_tbl = core->iris_platform_data->clk_rst_tbl;
- rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
-
- core->resets = devm_kzalloc(core->dev,
- sizeof(*core->resets) * rst_tbl_size,
- GFP_KERNEL);
- if (!core->resets)
+ *resets = devm_kzalloc(core->dev,
+ sizeof(struct reset_control_bulk_data) * rst_tbl_size,
+ GFP_KERNEL);
+ if (!*resets)
return -ENOMEM;
for (i = 0; i < rst_tbl_size; i++)
- core->resets[i].id = rst_tbl[i];
+ (*resets)[i].id = rst_tbl[i];
+
+ return devm_reset_control_bulk_get_exclusive(core->dev, rst_tbl_size, *resets);
+}
+
+static int iris_init_resets(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_init_reset_table(core, &core->resets,
+ core->iris_platform_data->clk_rst_tbl,
+ core->iris_platform_data->clk_rst_tbl_size);
+ if (ret)
+ return ret;
- return devm_reset_control_bulk_get_exclusive(core->dev, rst_tbl_size, core->resets);
+ if (!core->iris_platform_data->controller_rst_tbl_size)
+ return 0;
+
+ return iris_init_reset_table(core, &core->controller_resets,
+ core->iris_platform_data->controller_rst_tbl,
+ core->iris_platform_data->controller_rst_tbl_size);
}
static int iris_init_resources(struct iris_core *core)
@@ -321,15 +336,23 @@ static const struct dev_pm_ops iris_pm_ops = {
static const struct of_device_id iris_dt_match[] = {
{
- .compatible = "qcom,sm8550-iris",
- .data = &sm8550_data,
+ .compatible = "qcom,qcs8300-iris",
+ .data = &qcs8300_data,
},
#if (!IS_ENABLED(CONFIG_VIDEO_QCOM_VENUS))
- {
- .compatible = "qcom,sm8250-venus",
- .data = &sm8250_data,
- },
+ {
+ .compatible = "qcom,sm8250-venus",
+ .data = &sm8250_data,
+ },
#endif
+ {
+ .compatible = "qcom,sm8550-iris",
+ .data = &sm8550_data,
+ },
+ {
+ .compatible = "qcom,sm8650-iris",
+ .data = &sm8650_data,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, iris_dt_match);
diff --git a/drivers/media/platform/qcom/iris/iris_vpu2.c b/drivers/media/platform/qcom/iris/iris_vpu2.c
index 8f502aed43ce..7cf1bfc352d3 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu2.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu2.c
@@ -34,5 +34,6 @@ static u64 iris_vpu2_calc_freq(struct iris_inst *inst, size_t data_size)
const struct vpu_ops iris_vpu2_ops = {
.power_off_hw = iris_vpu_power_off_hw,
+ .power_off_controller = iris_vpu_power_off_controller,
.calc_freq = iris_vpu2_calc_freq,
};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu3.c b/drivers/media/platform/qcom/iris/iris_vpu3.c
deleted file mode 100644
index b484638e6105..000000000000
--- a/drivers/media/platform/qcom/iris/iris_vpu3.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
-#include <linux/iopoll.h>
-
-#include "iris_instance.h"
-#include "iris_vpu_common.h"
-#include "iris_vpu_register_defines.h"
-
-#define AON_MVP_NOC_RESET 0x0001F000
-
-#define WRAPPER_CORE_CLOCK_CONFIG (WRAPPER_BASE_OFFS + 0x88)
-#define CORE_CLK_RUN 0x0
-
-#define CPU_CS_AHB_BRIDGE_SYNC_RESET (CPU_CS_BASE_OFFS + 0x160)
-#define CORE_BRIDGE_SW_RESET BIT(0)
-#define CORE_BRIDGE_HW_RESET_DISABLE BIT(1)
-
-#define AON_WRAPPER_MVP_NOC_RESET_REQ (AON_MVP_NOC_RESET + 0x000)
-#define VIDEO_NOC_RESET_REQ (BIT(0) | BIT(1))
-
-#define AON_WRAPPER_MVP_NOC_RESET_ACK (AON_MVP_NOC_RESET + 0x004)
-
-#define VCODEC_SS_IDLE_STATUSN (VCODEC_BASE_OFFS + 0x70)
-
-static bool iris_vpu3_hw_power_collapsed(struct iris_core *core)
-{
- u32 value, pwr_status;
-
- value = readl(core->reg_base + WRAPPER_CORE_POWER_STATUS);
- pwr_status = value & BIT(1);
-
- return pwr_status ? false : true;
-}
-
-static void iris_vpu3_power_off_hardware(struct iris_core *core)
-{
- u32 reg_val = 0, value, i;
- int ret;
-
- if (iris_vpu3_hw_power_collapsed(core))
- goto disable_power;
-
- dev_err(core->dev, "video hw is power on\n");
-
- value = readl(core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
- if (value)
- writel(CORE_CLK_RUN, core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
-
- for (i = 0; i < core->iris_platform_data->num_vpp_pipe; i++) {
- ret = readl_poll_timeout(core->reg_base + VCODEC_SS_IDLE_STATUSN + 4 * i,
- reg_val, reg_val & 0x400000, 2000, 20000);
- if (ret)
- goto disable_power;
- }
-
- writel(VIDEO_NOC_RESET_REQ, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
-
- ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
- reg_val, reg_val & 0x3, 200, 2000);
- if (ret)
- goto disable_power;
-
- writel(0x0, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
-
- ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
- reg_val, !(reg_val & 0x3), 200, 2000);
- if (ret)
- goto disable_power;
-
- writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
- core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
- writel(CORE_BRIDGE_HW_RESET_DISABLE, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
- writel(0x0, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
-
-disable_power:
- iris_vpu_power_off_hw(core);
-}
-
-static u64 iris_vpu3_calculate_frequency(struct iris_inst *inst, size_t data_size)
-{
- struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
- struct v4l2_format *inp_f = inst->fmt_src;
- u32 height, width, mbs_per_second, mbpf;
- u64 fw_cycles, fw_vpp_cycles;
- u64 vsp_cycles, vpp_cycles;
- u32 fps = DEFAULT_FPS;
-
- width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
- height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
-
- mbpf = NUM_MBS_PER_FRAME(height, width);
- mbs_per_second = mbpf * fps;
-
- fw_cycles = fps * caps->mb_cycles_fw;
- fw_vpp_cycles = fps * caps->mb_cycles_fw_vpp;
-
- vpp_cycles = mult_frac(mbs_per_second, caps->mb_cycles_vpp, (u32)inst->fw_caps[PIPE].value);
- /* 21 / 20 is minimum overhead factor */
- vpp_cycles += max(div_u64(vpp_cycles, 20), fw_vpp_cycles);
-
- /* 1.059 is multi-pipe overhead */
- if (inst->fw_caps[PIPE].value > 1)
- vpp_cycles += div_u64(vpp_cycles * 59, 1000);
-
- vsp_cycles = fps * data_size * 8;
- vsp_cycles = div_u64(vsp_cycles, 2);
- /* VSP FW overhead 1.05 */
- vsp_cycles = div_u64(vsp_cycles * 21, 20);
-
- if (inst->fw_caps[STAGE].value == STAGE_1)
- vsp_cycles = vsp_cycles * 3;
-
- return max3(vpp_cycles, vsp_cycles, fw_cycles);
-}
-
-const struct vpu_ops iris_vpu3_ops = {
- .power_off_hw = iris_vpu3_power_off_hardware,
- .calc_freq = iris_vpu3_calculate_frequency,
-};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu3x.c b/drivers/media/platform/qcom/iris/iris_vpu3x.c
new file mode 100644
index 000000000000..9b7c9a1495ee
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu3x.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "iris_instance.h"
+#include "iris_vpu_common.h"
+#include "iris_vpu_register_defines.h"
+
+#define WRAPPER_TZ_BASE_OFFS 0x000C0000
+#define AON_BASE_OFFS 0x000E0000
+#define AON_MVP_NOC_RESET 0x0001F000
+
+#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x54)
+#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS (WRAPPER_BASE_OFFS + 0x58)
+#define WRAPPER_IRIS_CPU_NOC_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x5C)
+#define REQ_POWER_DOWN_PREP BIT(0)
+#define WRAPPER_IRIS_CPU_NOC_LPI_STATUS (WRAPPER_BASE_OFFS + 0x60)
+#define WRAPPER_CORE_CLOCK_CONFIG (WRAPPER_BASE_OFFS + 0x88)
+#define CORE_CLK_RUN 0x0
+
+#define WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS + 0x14)
+#define CTL_AXI_CLK_HALT BIT(0)
+#define CTL_CLK_HALT BIT(1)
+
+#define WRAPPER_TZ_QNS4PDXFIFO_RESET (WRAPPER_TZ_BASE_OFFS + 0x18)
+#define RESET_HIGH BIT(0)
+
+#define CPU_CS_AHB_BRIDGE_SYNC_RESET (CPU_CS_BASE_OFFS + 0x160)
+#define CORE_BRIDGE_SW_RESET BIT(0)
+#define CORE_BRIDGE_HW_RESET_DISABLE BIT(1)
+
+#define CPU_CS_X2RPMH (CPU_CS_BASE_OFFS + 0x168)
+#define MSK_SIGNAL_FROM_TENSILICA BIT(0)
+#define MSK_CORE_POWER_ON BIT(1)
+
+#define AON_WRAPPER_MVP_NOC_RESET_REQ (AON_MVP_NOC_RESET + 0x000)
+#define VIDEO_NOC_RESET_REQ (BIT(0) | BIT(1))
+
+#define AON_WRAPPER_MVP_NOC_RESET_ACK (AON_MVP_NOC_RESET + 0x004)
+
+#define VCODEC_SS_IDLE_STATUSN (VCODEC_BASE_OFFS + 0x70)
+
+#define AON_WRAPPER_MVP_NOC_LPI_CONTROL (AON_BASE_OFFS)
+#define AON_WRAPPER_MVP_NOC_LPI_STATUS (AON_BASE_OFFS + 0x4)
+
+#define AON_WRAPPER_MVP_NOC_CORE_SW_RESET (AON_BASE_OFFS + 0x18)
+#define SW_RESET BIT(0)
+#define AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL (AON_BASE_OFFS + 0x20)
+#define NOC_HALT BIT(0)
+#define AON_WRAPPER_SPARE (AON_BASE_OFFS + 0x28)
+
+static bool iris_vpu3x_hw_power_collapsed(struct iris_core *core)
+{
+ u32 value, pwr_status;
+
+ value = readl(core->reg_base + WRAPPER_CORE_POWER_STATUS);
+ pwr_status = value & BIT(1);
+
+ return pwr_status ? false : true;
+}
+
+static void iris_vpu3_power_off_hardware(struct iris_core *core)
+{
+ u32 reg_val = 0, value, i;
+ int ret;
+
+ if (iris_vpu3x_hw_power_collapsed(core))
+ goto disable_power;
+
+ dev_err(core->dev, "video hw is power on\n");
+
+ value = readl(core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+ if (value)
+ writel(CORE_CLK_RUN, core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+
+ for (i = 0; i < core->iris_platform_data->num_vpp_pipe; i++) {
+ ret = readl_poll_timeout(core->reg_base + VCODEC_SS_IDLE_STATUSN + 4 * i,
+ reg_val, reg_val & 0x400000, 2000, 20000);
+ if (ret)
+ goto disable_power;
+ }
+
+ writel(VIDEO_NOC_RESET_REQ, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
+ reg_val, reg_val & 0x3, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0x0, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
+ reg_val, !(reg_val & 0x3), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
+ core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(CORE_BRIDGE_HW_RESET_DISABLE, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(0x0, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+
+disable_power:
+ iris_vpu_power_off_hw(core);
+}
+
+static void iris_vpu33_power_off_hardware(struct iris_core *core)
+{
+ u32 reg_val = 0, value, i;
+ int ret;
+
+ if (iris_vpu3x_hw_power_collapsed(core))
+ goto disable_power;
+
+ dev_err(core->dev, "video hw is power on\n");
+
+ value = readl(core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+ if (value)
+ writel(CORE_CLK_RUN, core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+
+ for (i = 0; i < core->iris_platform_data->num_vpp_pipe; i++) {
+ ret = readl_poll_timeout(core->reg_base + VCODEC_SS_IDLE_STATUSN + 4 * i,
+ reg_val, reg_val & 0x400000, 2000, 20000);
+ if (ret)
+ goto disable_power;
+ }
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
+ reg_val, reg_val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ /* set MNoC to low power, set PD_NOC_QREQ (bit 0) */
+ writel(BIT(0), core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+
+ writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
+ core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(CORE_BRIDGE_HW_RESET_DISABLE, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(0x0, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+
+disable_power:
+ iris_vpu_power_off_hw(core);
+}
+
+static int iris_vpu33_power_off_controller(struct iris_core *core)
+{
+ u32 xo_rst_tbl_size = core->iris_platform_data->controller_rst_tbl_size;
+ u32 clk_rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
+ u32 val = 0;
+ int ret;
+
+ writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
+
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0x0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
+ val, val == 0, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(CTL_AXI_CLK_HALT | CTL_CLK_HALT,
+ core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
+ writel(RESET_HIGH, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
+ writel(0x0, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
+ writel(0x0, core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
+
+ reset_control_bulk_reset(clk_rst_tbl_size, core->resets);
+
+ /* Disable MVP NoC clock */
+ val = readl(core->reg_base + AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL);
+ val |= NOC_HALT;
+ writel(val, core->reg_base + AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL);
+
+ /* enable MVP NoC reset */
+ val = readl(core->reg_base + AON_WRAPPER_MVP_NOC_CORE_SW_RESET);
+ val |= SW_RESET;
+ writel(val, core->reg_base + AON_WRAPPER_MVP_NOC_CORE_SW_RESET);
+
+ /* poll AON spare register bit0 to become zero with 50ms timeout */
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_SPARE,
+ val, (val & BIT(0)) == 0, 1000, 50000);
+ if (ret)
+ goto disable_power;
+
+ /* enable bit(1) to avoid cvp noc xo reset */
+ val = readl(core->reg_base + AON_WRAPPER_SPARE);
+ val |= BIT(1);
+ writel(val, core->reg_base + AON_WRAPPER_SPARE);
+
+ reset_control_bulk_assert(xo_rst_tbl_size, core->controller_resets);
+
+ /* De-assert MVP NoC reset */
+ val = readl(core->reg_base + AON_WRAPPER_MVP_NOC_CORE_SW_RESET);
+ val &= ~SW_RESET;
+ writel(val, core->reg_base + AON_WRAPPER_MVP_NOC_CORE_SW_RESET);
+
+ usleep_range(80, 100);
+
+ reset_control_bulk_deassert(xo_rst_tbl_size, core->controller_resets);
+
+ /* reset AON spare register */
+ writel(0, core->reg_base + AON_WRAPPER_SPARE);
+
+ /* Enable MVP NoC clock */
+ val = readl(core->reg_base + AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL);
+ val &= ~NOC_HALT;
+ writel(val, core->reg_base + AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL);
+
+ iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
+
+disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+
+ return 0;
+}
+
+static u64 iris_vpu3x_calculate_frequency(struct iris_inst *inst, size_t data_size)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 height, width, mbs_per_second, mbpf;
+ u64 fw_cycles, fw_vpp_cycles;
+ u64 vsp_cycles, vpp_cycles;
+ u32 fps = DEFAULT_FPS;
+
+ width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
+ height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
+
+ mbpf = NUM_MBS_PER_FRAME(height, width);
+ mbs_per_second = mbpf * fps;
+
+ fw_cycles = fps * caps->mb_cycles_fw;
+ fw_vpp_cycles = fps * caps->mb_cycles_fw_vpp;
+
+ vpp_cycles = mult_frac(mbs_per_second, caps->mb_cycles_vpp, (u32)inst->fw_caps[PIPE].value);
+ /* 21 / 20 is minimum overhead factor */
+ vpp_cycles += max(div_u64(vpp_cycles, 20), fw_vpp_cycles);
+
+ /* 1.059 is multi-pipe overhead */
+ if (inst->fw_caps[PIPE].value > 1)
+ vpp_cycles += div_u64(vpp_cycles * 59, 1000);
+
+ vsp_cycles = fps * data_size * 8;
+ vsp_cycles = div_u64(vsp_cycles, 2);
+ /* VSP FW overhead 1.05 */
+ vsp_cycles = div_u64(vsp_cycles * 21, 20);
+
+ if (inst->fw_caps[STAGE].value == STAGE_1)
+ vsp_cycles = vsp_cycles * 3;
+
+ return max3(vpp_cycles, vsp_cycles, fw_cycles);
+}
+
+const struct vpu_ops iris_vpu3_ops = {
+ .power_off_hw = iris_vpu3_power_off_hardware,
+ .power_off_controller = iris_vpu_power_off_controller,
+ .calc_freq = iris_vpu3x_calculate_frequency,
+};
+
+const struct vpu_ops iris_vpu33_ops = {
+ .power_off_hw = iris_vpu33_power_off_hardware,
+ .power_off_controller = iris_vpu33_power_off_controller,
+ .calc_freq = iris_vpu3x_calculate_frequency,
+};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.c b/drivers/media/platform/qcom/iris/iris_vpu_common.c
index fe9896d66848..268e45acaa7c 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_common.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.c
@@ -211,7 +211,7 @@ skip_power_off:
return -EAGAIN;
}
-static int iris_vpu_power_off_controller(struct iris_core *core)
+int iris_vpu_power_off_controller(struct iris_core *core)
{
u32 val = 0;
int ret;
@@ -264,7 +264,7 @@ void iris_vpu_power_off(struct iris_core *core)
{
dev_pm_opp_set_rate(core->dev, 0);
core->iris_platform_data->vpu_ops->power_off_hw(core);
- iris_vpu_power_off_controller(core);
+ core->iris_platform_data->vpu_ops->power_off_controller(core);
iris_unset_icc_bw(core);
if (!iris_vpu_watchdog(core, core->intr_status))
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.h b/drivers/media/platform/qcom/iris/iris_vpu_common.h
index 63fa1fa5a498..93b7fa27be3b 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_common.h
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.h
@@ -10,9 +10,11 @@ struct iris_core;
extern const struct vpu_ops iris_vpu2_ops;
extern const struct vpu_ops iris_vpu3_ops;
+extern const struct vpu_ops iris_vpu33_ops;
struct vpu_ops {
void (*power_off_hw)(struct iris_core *core);
+ int (*power_off_controller)(struct iris_core *core);
u64 (*calc_freq)(struct iris_inst *inst, size_t data_size);
};
@@ -22,6 +24,7 @@ void iris_vpu_clear_interrupt(struct iris_core *core);
int iris_vpu_watchdog(struct iris_core *core, u32 intr_status);
int iris_vpu_prepare_pc(struct iris_core *core);
int iris_vpu_power_on(struct iris_core *core);
+int iris_vpu_power_off_controller(struct iris_core *core);
void iris_vpu_power_off_hw(struct iris_core *core);
void iris_vpu_power_off(struct iris_core *core);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 77d48578ecd2..d305d74bb152 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -438,7 +438,7 @@ static int venus_probe(struct platform_device *pdev)
ret = v4l2_device_register(dev, &core->v4l2_dev);
if (ret)
- goto err_core_deinit;
+ goto err_hfi_destroy;
platform_set_drvdata(pdev, core);
@@ -476,24 +476,24 @@ static int venus_probe(struct platform_device *pdev)
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
if (ret)
- goto err_venus_shutdown;
+ goto err_core_deinit;
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
if (ret)
- goto err_venus_shutdown;
+ goto err_core_deinit;
ret = pm_runtime_put_sync(dev);
if (ret) {
pm_runtime_get_noresume(dev);
- goto err_dev_unregister;
+ goto err_core_deinit;
}
venus_dbgfs_init(core);
return 0;
-err_dev_unregister:
- v4l2_device_unregister(&core->v4l2_dev);
+err_core_deinit:
+ hfi_core_deinit(core, false);
err_venus_shutdown:
venus_shutdown(core);
err_firmware_deinit:
@@ -506,9 +506,9 @@ err_runtime_disable:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
+ v4l2_device_unregister(&core->v4l2_dev);
+err_hfi_destroy:
hfi_destroy(core);
-err_core_deinit:
- hfi_core_deinit(core, false);
err_core_put:
if (core->pm_ops->core_put)
core->pm_ops->core_put(core);
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index abeeafa86697..b412e0c5515a 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -172,6 +172,7 @@ struct venus_format {
* @venus_ver: the venus firmware version
* @dump_core: a flag indicating that a core dump is required
* @ocs: OF changeset pointer
+ * @hwmode_dev: a flag indicating that HW_CTRL_TRIGGER is used in clock driver
*/
struct venus_core {
void __iomem *base;
@@ -235,6 +236,7 @@ struct venus_core {
} venus_ver;
unsigned long dump_core;
struct of_changeset *ocs;
+ bool hwmode_dev;
};
struct vdec_controls {
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 33a5a659c0ad..409aa9bd0b5d 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -412,9 +412,17 @@ static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable)
u32 val;
int ret;
- if (IS_V6(core))
- return dev_pm_genpd_set_hwmode(core->pmdomains->pd_devs[coreid], !enable);
- else if (coreid == VIDC_CORE_ID_1) {
+ ret = dev_pm_genpd_set_hwmode(core->pmdomains->pd_devs[coreid], !enable);
+ if (ret == -EOPNOTSUPP) {
+ core->hwmode_dev = false;
+ goto legacy;
+ }
+
+ core->hwmode_dev = true;
+ return ret;
+
+legacy:
+ if (coreid == VIDC_CORE_ID_1) {
ctrl = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
stat = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
} else {
@@ -450,7 +458,7 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
vcodec_clks_disable(core, core->vcodec0_clks);
- if (!IS_V6(core)) {
+ if (!core->hwmode_dev) {
ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
if (ret)
return ret;
@@ -468,7 +476,7 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
vcodec_clks_disable(core, core->vcodec1_clks);
- if (!IS_V6(core)) {
+ if (!core->hwmode_dev) {
ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
if (ret)
return ret;
@@ -491,11 +499,9 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
if (ret < 0)
return ret;
- if (!IS_V6(core)) {
- ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
- if (ret)
- return ret;
- }
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
+ if (ret)
+ return ret;
ret = vcodec_clks_enable(core, core->vcodec0_clks);
if (ret)
@@ -511,11 +517,9 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
if (ret < 0)
return ret;
- if (!IS_V6(core)) {
- ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
- if (ret)
- return ret;
- }
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
+ if (ret)
+ return ret;
ret = vcodec_clks_enable(core, core->vcodec1_clks);
if (ret)
@@ -811,7 +815,7 @@ static int vdec_power_v4(struct device *dev, int on)
else
vcodec_clks_disable(core, core->vcodec0_clks);
- vcodec_control_v4(core, VIDC_CORE_ID_1, false);
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
return ret;
}
@@ -856,7 +860,7 @@ static int venc_power_v4(struct device *dev, int on)
else
vcodec_clks_disable(core, core->vcodec1_clks);
- vcodec_control_v4(core, VIDC_CORE_ID_2, false);
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 9f82882b77bc..99ce5fd41577 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -154,14 +154,14 @@ find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type)
return NULL;
for (i = 0; i < size; i++) {
- bool valid;
+ bool valid = false;
if (fmt[i].type != type)
continue;
if (V4L2_TYPE_IS_OUTPUT(type)) {
valid = venus_helper_check_codec(inst, fmt[i].pixfmt);
- } else if (V4L2_TYPE_IS_CAPTURE(type)) {
+ } else {
valid = venus_helper_check_format(inst, fmt[i].pixfmt);
if (fmt[i].pixfmt == V4L2_PIX_FMT_QC10C &&
@@ -1110,10 +1110,20 @@ static int vdec_start_output(struct venus_inst *inst)
if (inst->codec_state == VENUS_DEC_STATE_SEEK) {
ret = venus_helper_process_initial_out_bufs(inst);
- if (inst->next_buf_last)
+ if (ret)
+ return ret;
+
+ if (inst->next_buf_last) {
inst->codec_state = VENUS_DEC_STATE_DRC;
- else
+ } else {
inst->codec_state = VENUS_DEC_STATE_DECODING;
+
+ if (inst->streamon_cap) {
+ ret = venus_helper_queue_dpb_bufs(inst);
+ if (ret)
+ return ret;
+ }
+ }
goto done;
}
diff --git a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
index 69a5f23e7954..fcadb2143c88 100644
--- a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
+++ b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
@@ -12,7 +12,6 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/media/platform/renesas/Kconfig b/drivers/media/platform/renesas/Kconfig
index c7fc718a30a5..27a54fa79083 100644
--- a/drivers/media/platform/renesas/Kconfig
+++ b/drivers/media/platform/renesas/Kconfig
@@ -30,23 +30,6 @@ config VIDEO_RCAR_CSI2
To compile this driver as a module, choose M here: the
module will be called rcar-csi2.
-config VIDEO_RCAR_ISP
- tristate "R-Car Image Signal Processor (ISP)"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select RESET_CONTROLLER
- select V4L2_FWNODE
- help
- Support for Renesas R-Car Image Signal Processor (ISP).
- Enable this to support the Renesas R-Car Image Signal
- Processor (ISP).
-
- To compile this driver as a module, choose M here: the
- module will be called rcar-isp.
-
config VIDEO_SH_VOU
tristate "SuperH VOU video output driver"
depends on V4L_PLATFORM_DRIVERS
@@ -56,6 +39,7 @@ config VIDEO_SH_VOU
help
Support for the Video Output Unit (VOU) on SuperH SoCs.
+source "drivers/media/platform/renesas/rcar-isp/Kconfig"
source "drivers/media/platform/renesas/rcar-vin/Kconfig"
source "drivers/media/platform/renesas/rzg2l-cru/Kconfig"
diff --git a/drivers/media/platform/renesas/Makefile b/drivers/media/platform/renesas/Makefile
index 50774a20330c..1127259c09d6 100644
--- a/drivers/media/platform/renesas/Makefile
+++ b/drivers/media/platform/renesas/Makefile
@@ -3,13 +3,13 @@
# Makefile for the Renesas capture/playback device drivers.
#
+obj-y += rcar-isp/
obj-y += rcar-vin/
obj-y += rzg2l-cru/
obj-y += vsp1/
obj-$(CONFIG_VIDEO_RCAR_CSI2) += rcar-csi2.o
obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o
-obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o
obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
diff --git a/drivers/media/platform/renesas/rcar-csi2.c b/drivers/media/platform/renesas/rcar-csi2.c
index 38a3149f9724..9979de4f6ef1 100644
--- a/drivers/media/platform/renesas/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-csi2.c
@@ -1075,16 +1075,10 @@ static int rcsi2_start_receiver_gen3(struct rcar_csi2 *priv,
vcdt2 |= vcdt_part << ((i % 2) * 16);
}
- if (fmt->field == V4L2_FIELD_ALTERNATE) {
+ if (fmt->field == V4L2_FIELD_ALTERNATE)
fld = FLD_DET_SEL(1) | FLD_FLD_EN4 | FLD_FLD_EN3 | FLD_FLD_EN2
| FLD_FLD_EN;
- if (fmt->height == 240)
- fld |= FLD_FLD_NUM(0);
- else
- fld |= FLD_FLD_NUM(1);
- }
-
/*
* Get the number of active data lanes inspecting the remote mbus
* configuration.
diff --git a/drivers/media/platform/renesas/rcar-isp/Kconfig b/drivers/media/platform/renesas/rcar-isp/Kconfig
new file mode 100644
index 000000000000..242f6a23851f
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-isp/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config VIDEO_RCAR_ISP
+ tristate "R-Car Image Signal Processor (ISP)"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car Image Signal Processor (ISP).
+ Enable this to support the Renesas R-Car Image Signal
+ Processor (ISP).
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-isp.
diff --git a/drivers/media/platform/renesas/rcar-isp/Makefile b/drivers/media/platform/renesas/rcar-isp/Makefile
new file mode 100644
index 000000000000..b542118c831e
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-isp/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+rcar-isp-objs = csisp.o
+
+obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp/csisp.c
index 4bc89d4757fa..1eb29a0b774a 100644
--- a/drivers/media/platform/renesas/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp/csisp.c
@@ -159,7 +159,7 @@ enum rcar_isp_pads {
struct rcar_isp {
struct device *dev;
- void __iomem *base;
+ void __iomem *csbase;
struct reset_control *rstc;
enum rcar_isp_input csi_input;
@@ -184,14 +184,14 @@ static inline struct rcar_isp *notifier_to_isp(struct v4l2_async_notifier *n)
return container_of(n, struct rcar_isp, notifier);
}
-static void risp_write(struct rcar_isp *isp, u32 offset, u32 value)
+static void risp_write_cs(struct rcar_isp *isp, u32 offset, u32 value)
{
- iowrite32(value, isp->base + offset);
+ iowrite32(value, isp->csbase + offset);
}
-static u32 risp_read(struct rcar_isp *isp, u32 offset)
+static u32 risp_read_cs(struct rcar_isp *isp, u32 offset)
{
- return ioread32(isp->base + offset);
+ return ioread32(isp->csbase + offset);
}
static int risp_power_on(struct rcar_isp *isp)
@@ -245,31 +245,31 @@ static int risp_start(struct rcar_isp *isp, struct v4l2_subdev_state *state)
if (isp->csi_input == RISP_CSI_INPUT1)
sel_csi = ISPINPUTSEL0_SEL_CSI0;
- risp_write(isp, ISPINPUTSEL0_REG,
- risp_read(isp, ISPINPUTSEL0_REG) | sel_csi);
+ risp_write_cs(isp, ISPINPUTSEL0_REG,
+ risp_read_cs(isp, ISPINPUTSEL0_REG) | sel_csi);
/* Configure Channel Selector. */
for (vc = 0; vc < 4; vc++) {
u8 ch = vc + 4;
u8 dt = format->datatype;
- risp_write(isp, ISPCS_FILTER_ID_CH_REG(ch), BIT(vc));
- risp_write(isp, ISPCS_DT_CODE03_CH_REG(ch),
- ISPCS_DT_CODE03_EN3 | ISPCS_DT_CODE03_DT3(dt) |
- ISPCS_DT_CODE03_EN2 | ISPCS_DT_CODE03_DT2(dt) |
- ISPCS_DT_CODE03_EN1 | ISPCS_DT_CODE03_DT1(dt) |
- ISPCS_DT_CODE03_EN0 | ISPCS_DT_CODE03_DT0(dt));
+ risp_write_cs(isp, ISPCS_FILTER_ID_CH_REG(ch), BIT(vc));
+ risp_write_cs(isp, ISPCS_DT_CODE03_CH_REG(ch),
+ ISPCS_DT_CODE03_EN3 | ISPCS_DT_CODE03_DT3(dt) |
+ ISPCS_DT_CODE03_EN2 | ISPCS_DT_CODE03_DT2(dt) |
+ ISPCS_DT_CODE03_EN1 | ISPCS_DT_CODE03_DT1(dt) |
+ ISPCS_DT_CODE03_EN0 | ISPCS_DT_CODE03_DT0(dt));
}
/* Setup processing method. */
- risp_write(isp, ISPPROCMODE_DT_REG(format->datatype),
- ISPPROCMODE_DT_PROC_MODE_VC3(format->procmode) |
- ISPPROCMODE_DT_PROC_MODE_VC2(format->procmode) |
- ISPPROCMODE_DT_PROC_MODE_VC1(format->procmode) |
- ISPPROCMODE_DT_PROC_MODE_VC0(format->procmode));
+ risp_write_cs(isp, ISPPROCMODE_DT_REG(format->datatype),
+ ISPPROCMODE_DT_PROC_MODE_VC3(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC2(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC1(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC0(format->procmode));
/* Start ISP. */
- risp_write(isp, ISPSTART_REG, ISPSTART_START);
+ risp_write_cs(isp, ISPSTART_REG, ISPSTART_START);
ret = v4l2_subdev_enable_streams(isp->remote, isp->remote_pad,
BIT_ULL(0));
@@ -284,7 +284,7 @@ static void risp_stop(struct rcar_isp *isp)
v4l2_subdev_disable_streams(isp->remote, isp->remote_pad, BIT_ULL(0));
/* Stop ISP. */
- risp_write(isp, ISPSTART_REG, ISPSTART_STOP);
+ risp_write_cs(isp, ISPSTART_REG, ISPSTART_STOP);
risp_power_off(isp);
}
@@ -465,9 +465,20 @@ static const struct media_entity_operations risp_entity_ops = {
static int risp_probe_resources(struct rcar_isp *isp,
struct platform_device *pdev)
{
- isp->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
- if (IS_ERR(isp->base))
- return PTR_ERR(isp->base);
+ struct resource *res;
+
+ /*
+ * For backward compatibility allow cs base to be the only reg if no
+ * reg-names are set in DT.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
+ if (!res)
+ isp->csbase = devm_platform_ioremap_resource(pdev, 0);
+ else
+ isp->csbase = devm_ioremap_resource(&pdev->dev, res);
+
+ if (IS_ERR(isp->csbase))
+ return PTR_ERR(isp->csbase);
isp->rstc = devm_reset_control_get(&pdev->dev, NULL);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index ddfb18e6e7a4..846ae7989b1d 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -1080,13 +1080,11 @@ static int __maybe_unused rvin_suspend(struct device *dev)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
- if (vin->state != RUNNING)
+ if (!vin->running)
return 0;
rvin_stop_streaming(vin);
- vin->state = SUSPENDED;
-
return 0;
}
@@ -1094,7 +1092,7 @@ static int __maybe_unused rvin_resume(struct device *dev)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
- if (vin->state != SUSPENDED)
+ if (!vin->running)
return 0;
/*
@@ -1275,7 +1273,7 @@ static const struct rvin_info rcar_info_r8a77995 = {
};
static const struct rvin_info rcar_info_gen4 = {
- .model = RCAR_GEN3,
+ .model = RCAR_GEN4,
.use_mc = true,
.use_isp = true,
.nv12 = true,
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 8de871240440..5c08ee2c9807 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -94,6 +94,7 @@
#define VNMC_INF_YUV16 (5 << 16)
#define VNMC_INF_RGB888 (6 << 16)
#define VNMC_INF_RGB666 (7 << 16)
+#define VNMC_EXINF_RAW8 (1 << 12) /* Gen4 specific */
#define VNMC_VUP (1 << 10)
#define VNMC_IM_ODD (0 << 3)
#define VNMC_IM_ODD_EVEN (1 << 3)
@@ -642,8 +643,6 @@ void rvin_scaler_gen3(struct rvin_dev *vin)
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
clip_size |= vin->compose.height / 2;
break;
default:
@@ -679,22 +678,6 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
fmt = rvin_format_from_pixel(vin, vin->format.pixelformat);
stride = vin->format.bytesperline / fmt->bpp;
-
- /* For RAW8 format bpp is 1, but the hardware process RAW8
- * format in 2 pixel unit hence configure VNIS_REG as stride / 2.
- */
- switch (vin->format.pixelformat) {
- case V4L2_PIX_FMT_SBGGR8:
- case V4L2_PIX_FMT_SGBRG8:
- case V4L2_PIX_FMT_SGRBG8:
- case V4L2_PIX_FMT_SRGGB8:
- case V4L2_PIX_FMT_GREY:
- stride /= 2;
- break;
- default:
- break;
- }
-
rvin_write(vin, stride, VNIS_REG);
}
@@ -727,8 +710,6 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_INTERLACED_BT:
vnmc = VNMC_IM_FULL | VNMC_FOC;
break;
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_NONE:
case V4L2_FIELD_ALTERNATE:
vnmc = VNMC_IM_ODD_EVEN;
@@ -791,6 +772,8 @@ static int rvin_setup(struct rvin_dev *vin)
case MEDIA_BUS_FMT_SRGGB8_1X8:
case MEDIA_BUS_FMT_Y8_1X8:
vnmc |= VNMC_INF_RAW8;
+ if (vin->info->model == RCAR_GEN4)
+ vnmc |= VNMC_EXINF_RAW8;
break;
case MEDIA_BUS_FMT_SBGGR10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
@@ -802,31 +785,8 @@ static int rvin_setup(struct rvin_dev *vin)
break;
}
- /* Make sure input interface and input format is valid. */
- if (vin->info->model == RCAR_GEN3) {
- switch (vnmc & VNMC_INF_MASK) {
- case VNMC_INF_YUV8_BT656:
- case VNMC_INF_YUV10_BT656:
- case VNMC_INF_YUV16:
- case VNMC_INF_RGB666:
- if (vin->is_csi) {
- vin_err(vin, "Invalid setting in MIPI CSI2\n");
- return -EINVAL;
- }
- break;
- case VNMC_INF_RAW8:
- if (!vin->is_csi) {
- vin_err(vin, "Invalid setting in Digital Pins\n");
- return -EINVAL;
- }
- break;
- default:
- break;
- }
- }
-
/* Enable VSYNC Field Toggle mode after one VSYNC input */
- if (vin->info->model == RCAR_GEN3)
+ if (vin->info->model == RCAR_GEN3 || vin->info->model == RCAR_GEN4)
dmr2 = VNDMR2_FTEV;
else
dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
@@ -910,7 +870,7 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_PIX_FMT_SGBRG10:
case V4L2_PIX_FMT_SGRBG10:
case V4L2_PIX_FMT_SRGGB10:
- dmr = VNDMR_RMODE_RAW10 | VNDMR_YC_THR;
+ dmr = VNDMR_RMODE_RAW10;
break;
default:
vin_err(vin, "Invalid pixelformat (0x%x)\n",
@@ -926,7 +886,7 @@ static int rvin_setup(struct rvin_dev *vin)
if (input_is_yuv == output_is_yuv)
vnmc |= VNMC_BPS;
- if (vin->info->model == RCAR_GEN3) {
+ if (vin->info->model == RCAR_GEN3 || vin->info->model == RCAR_GEN4) {
/* Select between CSI-2 and parallel input */
if (vin->is_csi)
vnmc &= ~VNMC_DPINE;
@@ -1021,33 +981,13 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
struct rvin_buffer *buf;
struct vb2_v4l2_buffer *vbuf;
dma_addr_t phys_addr;
- int prev;
/* A already populated slot shall never be overwritten. */
if (WARN_ON(vin->buf_hw[slot].buffer))
return;
- prev = (slot == 0 ? HW_BUFFER_NUM : slot) - 1;
-
- if (vin->buf_hw[prev].type == HALF_TOP) {
- vbuf = vin->buf_hw[prev].buffer;
- vin->buf_hw[slot].buffer = vbuf;
- vin->buf_hw[slot].type = HALF_BOTTOM;
- switch (vin->format.pixelformat) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
- phys_addr = vin->buf_hw[prev].phys +
- vin->format.sizeimage / 4;
- break;
- default:
- phys_addr = vin->buf_hw[prev].phys +
- vin->format.sizeimage / 2;
- break;
- }
- } else if ((vin->state != STOPPED && vin->state != RUNNING) ||
- list_empty(&vin->buf_list)) {
+ if (list_empty(&vin->buf_list)) {
vin->buf_hw[slot].buffer = NULL;
- vin->buf_hw[slot].type = FULL;
phys_addr = vin->scratch_phys;
} else {
/* Keep track of buffer we give to HW */
@@ -1056,16 +996,12 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
list_del_init(to_buf_list(vbuf));
vin->buf_hw[slot].buffer = vbuf;
- vin->buf_hw[slot].type =
- V4L2_FIELD_IS_SEQUENTIAL(vin->format.field) ?
- HALF_TOP : FULL;
-
/* Setup DMA */
phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
}
- vin_dbg(vin, "Filling HW slot: %d type: %d buffer: %p\n",
- slot, vin->buf_hw[slot].type, vin->buf_hw[slot].buffer);
+ vin_dbg(vin, "Filling HW slot: %d buffer: %p\n",
+ slot, vin->buf_hw[slot].buffer);
vin->buf_hw[slot].phys = phys_addr;
rvin_set_slot_addr(vin, slot, phys_addr);
@@ -1073,15 +1009,12 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
static int rvin_capture_start(struct rvin_dev *vin)
{
- int slot, ret;
+ int ret;
- for (slot = 0; slot < HW_BUFFER_NUM; slot++) {
+ for (unsigned int slot = 0; slot < HW_BUFFER_NUM; slot++) {
vin->buf_hw[slot].buffer = NULL;
- vin->buf_hw[slot].type = FULL;
- }
-
- for (slot = 0; slot < HW_BUFFER_NUM; slot++)
rvin_fill_hw_slot(vin, slot);
+ }
ret = rvin_setup(vin);
if (ret)
@@ -1094,8 +1027,6 @@ static int rvin_capture_start(struct rvin_dev *vin)
/* Continuous Frame Capture Mode */
rvin_write(vin, VNFC_C_FRAME, VNFC_REG);
- vin->state = STARTING;
-
return 0;
}
@@ -1136,9 +1067,9 @@ static irqreturn_t rvin_irq(int irq, void *data)
if (!(int_status & VNINTS_FIS))
goto done;
- /* Nothing to do if capture status is 'STOPPED' */
- if (vin->state == STOPPED) {
- vin_dbg(vin, "IRQ while state stopped\n");
+ /* Nothing to do if not running. */
+ if (!vin->running) {
+ vin_dbg(vin, "IRQ while not running, ignoring\n");
goto done;
}
@@ -1150,28 +1081,17 @@ static irqreturn_t rvin_irq(int irq, void *data)
* To hand buffers back in a known order to userspace start
* to capture first from slot 0.
*/
- if (vin->state == STARTING) {
+ if (!vin->sequence) {
if (slot != 0) {
vin_dbg(vin, "Starting sync slot: %d\n", slot);
goto done;
}
vin_dbg(vin, "Capture start synced!\n");
- vin->state = RUNNING;
}
/* Capture frame */
if (vin->buf_hw[slot].buffer) {
- /*
- * Nothing to do but refill the hardware slot if
- * capture only filled first half of vb2 buffer.
- */
- if (vin->buf_hw[slot].type == HALF_TOP) {
- vin->buf_hw[slot].buffer = NULL;
- rvin_fill_hw_slot(vin, slot);
- goto done;
- }
-
vin->buf_hw[slot].buffer->field =
rvin_get_active_field(vin, vnms);
vin->buf_hw[slot].buffer->sequence = vin->sequence;
@@ -1322,8 +1242,6 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
/* Supported natively */
break;
case V4L2_FIELD_ALTERNATE:
@@ -1336,8 +1254,6 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
/* Use VIN hardware to combine the two fields */
fmt.format.height *= 2;
break;
@@ -1351,7 +1267,7 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
if (rvin_scaler_needed(vin)) {
/* Gen3 can't scale NV12 */
- if (vin->info->model == RCAR_GEN3 &&
+ if ((vin->info->model == RCAR_GEN3 || vin->info->model == RCAR_GEN4) &&
vin->format.pixelformat == V4L2_PIX_FMT_NV12)
return -EPIPE;
@@ -1434,6 +1350,8 @@ int rvin_start_streaming(struct rvin_dev *vin)
if (ret)
rvin_set_stream(vin, 0);
+ vin->running = true;
+
spin_unlock_irqrestore(&vin->qlock, flags);
return ret;
@@ -1466,44 +1384,21 @@ err_scratch:
void rvin_stop_streaming(struct rvin_dev *vin)
{
- unsigned int i, retries;
unsigned long flags;
- bool buffersFreed;
spin_lock_irqsave(&vin->qlock, flags);
- if (vin->state == STOPPED) {
+ if (!vin->running) {
spin_unlock_irqrestore(&vin->qlock, flags);
return;
}
- vin->state = STOPPING;
-
- /* Wait until only scratch buffer is used, max 3 interrupts. */
- retries = 0;
- while (retries++ < RVIN_RETRIES) {
- buffersFreed = true;
- for (i = 0; i < HW_BUFFER_NUM; i++)
- if (vin->buf_hw[i].buffer)
- buffersFreed = false;
-
- if (buffersFreed)
- break;
-
- spin_unlock_irqrestore(&vin->qlock, flags);
- msleep(RVIN_TIMEOUT_MS);
- spin_lock_irqsave(&vin->qlock, flags);
- }
-
/* Wait for streaming to stop */
- retries = 0;
- while (retries++ < RVIN_RETRIES) {
-
+ for (unsigned int i = 0; i < RVIN_RETRIES; i++) {
rvin_capture_stop(vin);
/* Check if HW is stopped */
if (!rvin_capture_active(vin)) {
- vin->state = STOPPED;
break;
}
@@ -1512,32 +1407,25 @@ void rvin_stop_streaming(struct rvin_dev *vin)
spin_lock_irqsave(&vin->qlock, flags);
}
- if (!buffersFreed || vin->state != STOPPED) {
- /*
- * If this happens something have gone horribly wrong.
- * Set state to stopped to prevent the interrupt handler
- * to make things worse...
- */
- vin_err(vin, "Failed stop HW, something is seriously broken\n");
- vin->state = STOPPED;
- }
+ if (rvin_capture_active(vin))
+ vin_err(vin, "Hardware did not stop\n");
- spin_unlock_irqrestore(&vin->qlock, flags);
+ vin->running = false;
- /* If something went wrong, free buffers with an error. */
- if (!buffersFreed) {
- return_unused_buffers(vin, VB2_BUF_STATE_ERROR);
- for (i = 0; i < HW_BUFFER_NUM; i++) {
- if (vin->buf_hw[i].buffer)
- vb2_buffer_done(&vin->buf_hw[i].buffer->vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
- }
+ spin_unlock_irqrestore(&vin->qlock, flags);
rvin_set_stream(vin, 0);
/* disable interrupts */
rvin_disable_interrupts(vin);
+
+ /* Return unprocessed buffers from hardware. */
+ for (unsigned int i = 0; i < HW_BUFFER_NUM; i++) {
+ if (vin->buf_hw[i].buffer)
+ vb2_buffer_done(&vin->buf_hw[i].buffer->vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+
}
static void rvin_stop_streaming_vq(struct vb2_queue *vq)
@@ -1583,8 +1471,6 @@ int rvin_dma_register(struct rvin_dev *vin, int irq)
spin_lock_init(&vin->qlock);
- vin->state = STOPPED;
-
for (i = 0; i < HW_BUFFER_NUM; i++)
vin->buf_hw[i].buffer = NULL;
@@ -1687,7 +1573,7 @@ void rvin_set_alpha(struct rvin_dev *vin, unsigned int alpha)
vin->alpha = alpha;
- if (vin->state == STOPPED)
+ if (!vin->running)
goto out;
switch (vin->format.pixelformat) {
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
index 756fdfdbce61..db091af57c19 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
@@ -88,19 +88,19 @@ static const struct rvin_video_format rvin_formats[] = {
},
{
.fourcc = V4L2_PIX_FMT_SBGGR10,
- .bpp = 4,
+ .bpp = 2,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG10,
- .bpp = 4,
+ .bpp = 2,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG10,
- .bpp = 4,
+ .bpp = 2,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB10,
- .bpp = 4,
+ .bpp = 2,
},
};
@@ -161,9 +161,6 @@ static u32 rvin_format_bytesperline(struct rvin_dev *vin,
break;
}
- if (V4L2_FIELD_IS_SEQUENTIAL(pix->field))
- align = 0x80;
-
return ALIGN(pix->width, align) * fmt->bpp;
}
@@ -194,8 +191,6 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_ALTERNATE:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
break;
default:
pix->field = RVIN_DEFAULT_FIELD;
@@ -504,8 +499,6 @@ static int rvin_remote_rectangle(struct rvin_dev *vin, struct v4l2_rect *rect)
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
rect->height *= 2;
break;
}
@@ -591,8 +584,8 @@ static int rvin_s_selection(struct file *file, void *fh,
vin->crop = s->r = r;
- vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
- r.width, r.height, r.left, r.top,
+ vin_dbg(vin, "Cropped (%d,%d)/%ux%u of %dx%d\n",
+ r.left, r.top, r.width, r.height,
max_rect.width, max_rect.height);
break;
case V4L2_SEL_TGT_COMPOSE:
@@ -616,8 +609,8 @@ static int rvin_s_selection(struct file *file, void *fh,
vin->compose = s->r = r;
- vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n",
- r.width, r.height, r.left, r.top,
+ vin_dbg(vin, "Compose (%d,%d)/%ux%u in %dx%d\n",
+ r.left, r.top, r.width, r.height,
vin->format.width, vin->format.height);
break;
default:
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
index f87d4bc9e53e..83d1b2734c41 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
@@ -39,6 +39,7 @@ enum model_id {
RCAR_M1,
RCAR_GEN2,
RCAR_GEN3,
+ RCAR_GEN4,
};
enum rvin_csi_id {
@@ -62,39 +63,6 @@ enum rvin_isp_id {
(unsigned int)RVIN_CSI_MAX : (unsigned int)RVIN_ISP_MAX)
/**
- * enum rvin_dma_state - DMA states
- * @STOPPED: No operation in progress
- * @STARTING: Capture starting up
- * @RUNNING: Operation in progress have buffers
- * @STOPPING: Stopping operation
- * @SUSPENDED: Capture is suspended
- */
-enum rvin_dma_state {
- STOPPED = 0,
- STARTING,
- RUNNING,
- STOPPING,
- SUSPENDED,
-};
-
-/**
- * enum rvin_buffer_type
- *
- * Describes how a buffer is given to the hardware. To be able
- * to capture SEQ_TB/BT it's needed to capture to the same vb2
- * buffer twice so the type of buffer needs to be kept.
- *
- * @FULL: One capture fills the whole vb2 buffer
- * @HALF_TOP: One capture fills the top half of the vb2 buffer
- * @HALF_BOTTOM: One capture fills the bottom half of the vb2 buffer
- */
-enum rvin_buffer_type {
- FULL,
- HALF_TOP,
- HALF_BOTTOM,
-};
-
-/**
* struct rvin_video_format - Data format stored in memory
* @fourcc: Pixelformat
* @bpp: Bytes per pixel
@@ -194,11 +162,11 @@ struct rvin_info {
* @scratch: cpu address for scratch buffer
* @scratch_phys: physical address of the scratch buffer
*
- * @qlock: protects @buf_hw, @buf_list, @sequence and @state
+ * @qlock: Protects @buf_hw, @buf_list, @sequence and @running
* @buf_hw: Keeps track of buffers given to HW slot
* @buf_list: list of queued buffers
* @sequence: V4L2 buffers sequence number
- * @state: keeps track of operation state
+ * @running: Keeps track of if the VIN is running
*
* @is_csi: flag to mark the VIN as using a CSI-2 subdevice
* @chsel: Cached value of the current CSI-2 channel selection
@@ -237,12 +205,11 @@ struct rvin_dev {
spinlock_t qlock;
struct {
struct vb2_v4l2_buffer *buffer;
- enum rvin_buffer_type type;
dma_addr_t phys;
} buf_hw[HW_BUFFER_NUM];
struct list_head buf_list;
unsigned int sequence;
- enum rvin_dma_state state;
+ bool running;
bool is_csi;
unsigned int chsel;
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
index 89be584a4988..5fa73ab2db53 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
@@ -22,6 +22,7 @@
#include <media/v4l2-mc.h>
#include "rzg2l-cru.h"
+#include "rzg2l-cru-regs.h"
static inline struct rzg2l_cru_dev *notifier_to_cru(struct v4l2_async_notifier *n)
{
@@ -240,10 +241,11 @@ static int rzg2l_cru_media_init(struct rzg2l_cru_dev *cru)
static int rzg2l_cru_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rzg2l_cru_dev *cru;
int irq, ret;
- cru = devm_kzalloc(&pdev->dev, sizeof(*cru), GFP_KERNEL);
+ cru = devm_kzalloc(dev, sizeof(*cru), GFP_KERNEL);
if (!cru)
return -ENOMEM;
@@ -251,32 +253,32 @@ static int rzg2l_cru_probe(struct platform_device *pdev)
if (IS_ERR(cru->base))
return PTR_ERR(cru->base);
- cru->presetn = devm_reset_control_get_shared(&pdev->dev, "presetn");
+ cru->presetn = devm_reset_control_get_shared(dev, "presetn");
if (IS_ERR(cru->presetn))
- return dev_err_probe(&pdev->dev, PTR_ERR(cru->presetn),
+ return dev_err_probe(dev, PTR_ERR(cru->presetn),
"Failed to get cpg presetn\n");
- cru->aresetn = devm_reset_control_get_exclusive(&pdev->dev, "aresetn");
+ cru->aresetn = devm_reset_control_get_exclusive(dev, "aresetn");
if (IS_ERR(cru->aresetn))
- return dev_err_probe(&pdev->dev, PTR_ERR(cru->aresetn),
+ return dev_err_probe(dev, PTR_ERR(cru->aresetn),
"Failed to get cpg aresetn\n");
- cru->vclk = devm_clk_get(&pdev->dev, "video");
+ cru->vclk = devm_clk_get(dev, "video");
if (IS_ERR(cru->vclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(cru->vclk),
+ return dev_err_probe(dev, PTR_ERR(cru->vclk),
"Failed to get video clock\n");
- cru->dev = &pdev->dev;
- cru->info = of_device_get_match_data(&pdev->dev);
+ cru->dev = dev;
+ cru->info = of_device_get_match_data(dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- ret = devm_request_irq(&pdev->dev, irq, rzg2l_cru_irq, 0,
+ ret = devm_request_irq(dev, irq, cru->info->irq_handler, 0,
KBUILD_MODNAME, cru);
if (ret)
- return dev_err_probe(&pdev->dev, ret, "failed to request irq\n");
+ return dev_err_probe(dev, ret, "failed to request irq\n");
platform_set_drvdata(pdev, cru);
@@ -285,8 +287,10 @@ static int rzg2l_cru_probe(struct platform_device *pdev)
return ret;
cru->num_buf = RZG2L_CRU_HW_BUFFER_DEFAULT;
- pm_suspend_ignore_children(&pdev->dev, true);
- pm_runtime_enable(&pdev->dev);
+ pm_suspend_ignore_children(dev, true);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ goto error_dma_unregister;
ret = rzg2l_cru_media_init(cru);
if (ret)
@@ -296,7 +300,6 @@ static int rzg2l_cru_probe(struct platform_device *pdev)
error_dma_unregister:
rzg2l_cru_dma_unregister(cru);
- pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -305,8 +308,6 @@ static void rzg2l_cru_remove(struct platform_device *pdev)
{
struct rzg2l_cru_dev *cru = platform_get_drvdata(pdev);
- pm_runtime_disable(&pdev->dev);
-
v4l2_async_nf_unregister(&cru->notifier);
v4l2_async_nf_cleanup(&cru->notifier);
@@ -317,8 +318,112 @@ static void rzg2l_cru_remove(struct platform_device *pdev)
rzg2l_cru_dma_unregister(cru);
}
+static const u16 rzg3e_cru_regs[] = {
+ [CRUnCTRL] = 0x0,
+ [CRUnIE] = 0x4,
+ [CRUnIE2] = 0x8,
+ [CRUnINTS] = 0xc,
+ [CRUnINTS2] = 0x10,
+ [CRUnRST] = 0x18,
+ [AMnMB1ADDRL] = 0x40,
+ [AMnMB1ADDRH] = 0x44,
+ [AMnMB2ADDRL] = 0x48,
+ [AMnMB2ADDRH] = 0x4c,
+ [AMnMB3ADDRL] = 0x50,
+ [AMnMB3ADDRH] = 0x54,
+ [AMnMB4ADDRL] = 0x58,
+ [AMnMB4ADDRH] = 0x5c,
+ [AMnMB5ADDRL] = 0x60,
+ [AMnMB5ADDRH] = 0x64,
+ [AMnMB6ADDRL] = 0x68,
+ [AMnMB6ADDRH] = 0x6c,
+ [AMnMB7ADDRL] = 0x70,
+ [AMnMB7ADDRH] = 0x74,
+ [AMnMB8ADDRL] = 0x78,
+ [AMnMB8ADDRH] = 0x7c,
+ [AMnMBVALID] = 0x88,
+ [AMnMADRSL] = 0x8c,
+ [AMnMADRSH] = 0x90,
+ [AMnAXIATTR] = 0xec,
+ [AMnFIFOPNTR] = 0xf8,
+ [AMnAXISTP] = 0x110,
+ [AMnAXISTPACK] = 0x114,
+ [AMnIS] = 0x128,
+ [ICnEN] = 0x1f0,
+ [ICnSVCNUM] = 0x1f8,
+ [ICnSVC] = 0x1fc,
+ [ICnIPMC_C0] = 0x200,
+ [ICnMS] = 0x2d8,
+ [ICnDMR] = 0x304,
+};
+
+static const struct rzg2l_cru_info rzg3e_cru_info = {
+ .max_width = 4095,
+ .max_height = 4095,
+ .image_conv = ICnIPMC_C0,
+ .has_stride = true,
+ .regs = rzg3e_cru_regs,
+ .irq_handler = rzg3e_cru_irq,
+ .enable_interrupts = rzg3e_cru_enable_interrupts,
+ .disable_interrupts = rzg3e_cru_disable_interrupts,
+ .fifo_empty = rz3e_fifo_empty,
+ .csi_setup = rzg3e_cru_csi2_setup,
+};
+
+static const u16 rzg2l_cru_regs[] = {
+ [CRUnCTRL] = 0x0,
+ [CRUnIE] = 0x4,
+ [CRUnINTS] = 0x8,
+ [CRUnRST] = 0xc,
+ [AMnMB1ADDRL] = 0x100,
+ [AMnMB1ADDRH] = 0x104,
+ [AMnMB2ADDRL] = 0x108,
+ [AMnMB2ADDRH] = 0x10c,
+ [AMnMB3ADDRL] = 0x110,
+ [AMnMB3ADDRH] = 0x114,
+ [AMnMB4ADDRL] = 0x118,
+ [AMnMB4ADDRH] = 0x11c,
+ [AMnMB5ADDRL] = 0x120,
+ [AMnMB5ADDRH] = 0x124,
+ [AMnMB6ADDRL] = 0x128,
+ [AMnMB6ADDRH] = 0x12c,
+ [AMnMB7ADDRL] = 0x130,
+ [AMnMB7ADDRH] = 0x134,
+ [AMnMB8ADDRL] = 0x138,
+ [AMnMB8ADDRH] = 0x13c,
+ [AMnMBVALID] = 0x148,
+ [AMnMBS] = 0x14c,
+ [AMnAXIATTR] = 0x158,
+ [AMnFIFOPNTR] = 0x168,
+ [AMnAXISTP] = 0x174,
+ [AMnAXISTPACK] = 0x178,
+ [ICnEN] = 0x200,
+ [ICnMC] = 0x208,
+ [ICnMS] = 0x254,
+ [ICnDMR] = 0x26c,
+};
+
+static const struct rzg2l_cru_info rzgl2_cru_info = {
+ .max_width = 2800,
+ .max_height = 4095,
+ .image_conv = ICnMC,
+ .regs = rzg2l_cru_regs,
+ .irq_handler = rzg2l_cru_irq,
+ .enable_interrupts = rzg2l_cru_enable_interrupts,
+ .disable_interrupts = rzg2l_cru_disable_interrupts,
+ .fifo_empty = rzg2l_fifo_empty,
+ .csi_setup = rzg2l_cru_csi2_setup,
+};
+
static const struct of_device_id rzg2l_cru_of_id_table[] = {
- { .compatible = "renesas,rzg2l-cru", },
+ {
+ .compatible = "renesas,r9a09g047-cru",
+ .data = &rzg3e_cru_info,
+ },
+ {
+ .compatible = "renesas,rzg2l-cru",
+ .data = &rzgl2_cru_info,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_cru_of_id_table);
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h
index 1c9f22118a5d..a5a57369ef0e 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h
@@ -10,71 +10,102 @@
/* HW CRU Registers Definition */
-/* CRU Control Register */
-#define CRUnCTRL 0x0
#define CRUnCTRL_VINSEL(x) ((x) << 0)
-/* CRU Interrupt Enable Register */
-#define CRUnIE 0x4
#define CRUnIE_EFE BIT(17)
-/* CRU Interrupt Status Register */
-#define CRUnINTS 0x8
+#define CRUnIE2_FSxE(x) BIT(((x) * 3))
+#define CRUnIE2_FExE(x) BIT(((x) * 3) + 1)
+
#define CRUnINTS_SFS BIT(16)
-/* CRU Reset Register */
-#define CRUnRST 0xc
+#define CRUnINTS2_FSxS(x) BIT(((x) * 3))
+
#define CRUnRST_VRESETN BIT(0)
/* Memory Bank Base Address (Lower) Register for CRU Image Data */
-#define AMnMBxADDRL(x) (0x100 + ((x) * 8))
+#define AMnMBxADDRL(x) (AMnMB1ADDRL + (x) * 2)
/* Memory Bank Base Address (Higher) Register for CRU Image Data */
-#define AMnMBxADDRH(x) (0x104 + ((x) * 8))
+#define AMnMBxADDRH(x) (AMnMB1ADDRH + (x) * 2)
-/* Memory Bank Enable Register for CRU Image Data */
-#define AMnMBVALID 0x148
#define AMnMBVALID_MBVALID(x) GENMASK(x, 0)
-/* Memory Bank Status Register for CRU Image Data */
-#define AMnMBS 0x14c
#define AMnMBS_MBSTS 0x7
-/* AXI Master Transfer Setting Register for CRU Image Data */
-#define AMnAXIATTR 0x158
#define AMnAXIATTR_AXILEN_MASK GENMASK(3, 0)
#define AMnAXIATTR_AXILEN (0xf)
-/* AXI Master FIFO Pointer Register for CRU Image Data */
-#define AMnFIFOPNTR 0x168
#define AMnFIFOPNTR_FIFOWPNTR GENMASK(7, 0)
+#define AMnFIFOPNTR_FIFOWPNTR_B0 AMnFIFOPNTR_FIFOWPNTR
+#define AMnFIFOPNTR_FIFOWPNTR_B1 GENMASK(15, 8)
#define AMnFIFOPNTR_FIFORPNTR_Y GENMASK(23, 16)
+#define AMnFIFOPNTR_FIFORPNTR_B0 AMnFIFOPNTR_FIFORPNTR_Y
+#define AMnFIFOPNTR_FIFORPNTR_B1 GENMASK(31, 24)
+
+#define AMnIS_IS_MASK GENMASK(14, 7)
+#define AMnIS_IS(x) ((x) << 7)
-/* AXI Master Transfer Stop Register for CRU Image Data */
-#define AMnAXISTP 0x174
#define AMnAXISTP_AXI_STOP BIT(0)
-/* AXI Master Transfer Stop Status Register for CRU Image Data */
-#define AMnAXISTPACK 0x178
#define AMnAXISTPACK_AXI_STOP_ACK BIT(0)
-/* CRU Image Processing Enable Register */
-#define ICnEN 0x200
#define ICnEN_ICEN BIT(0)
-/* CRU Image Processing Main Control Register */
-#define ICnMC 0x208
+#define ICnSVC_SVC0(x) (x)
+#define ICnSVC_SVC1(x) ((x) << 4)
+#define ICnSVC_SVC2(x) ((x) << 8)
+#define ICnSVC_SVC3(x) ((x) << 12)
+
#define ICnMC_CSCTHR BIT(5)
#define ICnMC_INF(x) ((x) << 16)
#define ICnMC_VCSEL(x) ((x) << 22)
#define ICnMC_INF_MASK GENMASK(21, 16)
-/* CRU Module Status Register */
-#define ICnMS 0x254
#define ICnMS_IA BIT(2)
-/* CRU Data Output Mode Register */
-#define ICnDMR 0x26c
#define ICnDMR_YCMODE_UYVY (1 << 4)
+enum rzg2l_cru_common_regs {
+ CRUnCTRL, /* CRU Control */
+ CRUnIE, /* CRU Interrupt Enable */
+ CRUnIE2, /* CRU Interrupt Enable(2) */
+ CRUnINTS, /* CRU Interrupt Status */
+ CRUnINTS2, /* CRU Interrupt Status(2) */
+ CRUnRST, /* CRU Reset */
+ AMnMB1ADDRL, /* Bank 1 Address (Lower) for CRU Image Data */
+ AMnMB1ADDRH, /* Bank 1 Address (Higher) for CRU Image Data */
+ AMnMB2ADDRL, /* Bank 2 Address (Lower) for CRU Image Data */
+ AMnMB2ADDRH, /* Bank 2 Address (Higher) for CRU Image Data */
+ AMnMB3ADDRL, /* Bank 3 Address (Lower) for CRU Image Data */
+ AMnMB3ADDRH, /* Bank 3 Address (Higher) for CRU Image Data */
+ AMnMB4ADDRL, /* Bank 4 Address (Lower) for CRU Image Data */
+ AMnMB4ADDRH, /* Bank 4 Address (Higher) for CRU Image Data */
+ AMnMB5ADDRL, /* Bank 5 Address (Lower) for CRU Image Data */
+ AMnMB5ADDRH, /* Bank 5 Address (Higher) for CRU Image Data */
+ AMnMB6ADDRL, /* Bank 6 Address (Lower) for CRU Image Data */
+ AMnMB6ADDRH, /* Bank 6 Address (Higher) for CRU Image Data */
+ AMnMB7ADDRL, /* Bank 7 Address (Lower) for CRU Image Data */
+ AMnMB7ADDRH, /* Bank 7 Address (Higher) for CRU Image Data */
+ AMnMB8ADDRL, /* Bank 8 Address (Lower) for CRU Image Data */
+ AMnMB8ADDRH, /* Bank 8 Address (Higher) for CRU Image Data */
+ AMnMBVALID, /* Memory Bank Enable for CRU Image Data */
+ AMnMBS, /* Memory Bank Status for CRU Image Data */
+ AMnMADRSL, /* VD Memory Address Lower Status Register */
+ AMnMADRSH, /* VD Memory Address Higher Status Register */
+ AMnAXIATTR, /* AXI Master Transfer Setting Register for CRU Image Data */
+ AMnFIFOPNTR, /* AXI Master FIFO Pointer for CRU Image Data */
+ AMnAXISTP, /* AXI Master Transfer Stop for CRU Image Data */
+ AMnAXISTPACK, /* AXI Master Transfer Stop Status for CRU Image Data */
+ AMnIS, /* Image Stride Setting Register */
+ ICnEN, /* CRU Image Processing Enable */
+ ICnSVCNUM, /* CRU SVC Number Register */
+ ICnSVC, /* CRU VC Select Register */
+ ICnMC, /* CRU Image Processing Main Control */
+ ICnIPMC_C0, /* CRU Image Converter Main Control 0 */
+ ICnMS, /* CRU Module Status */
+ ICnDMR, /* CRU Data Output Mode */
+ RZG2L_CRU_MAX_REG,
+};
+
#endif /* __RZG2L_CRU_REGS_H__ */
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
index 8b898ce05b84..c30f3b281284 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
@@ -27,15 +27,15 @@
#define RZG2L_CRU_CSI2_VCHANNEL 4
#define RZG2L_CRU_MIN_INPUT_WIDTH 320
-#define RZG2L_CRU_MAX_INPUT_WIDTH 2800
#define RZG2L_CRU_MIN_INPUT_HEIGHT 240
-#define RZG2L_CRU_MAX_INPUT_HEIGHT 4095
enum rzg2l_csi2_pads {
RZG2L_CRU_IP_SINK = 0,
RZG2L_CRU_IP_SOURCE,
};
+struct rzg2l_cru_dev;
+
/**
* enum rzg2l_cru_dma_state - DMA states
* @RZG2L_CRU_DMA_STOPPED: No operation in progress
@@ -80,6 +80,21 @@ struct rzg2l_cru_ip_format {
bool yuv;
};
+struct rzg2l_cru_info {
+ unsigned int max_width;
+ unsigned int max_height;
+ u16 image_conv;
+ const u16 *regs;
+ bool has_stride;
+ irqreturn_t (*irq_handler)(int irq, void *data);
+ void (*enable_interrupts)(struct rzg2l_cru_dev *cru);
+ void (*disable_interrupts)(struct rzg2l_cru_dev *cru);
+ bool (*fifo_empty)(struct rzg2l_cru_dev *cru);
+ void (*csi_setup)(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc);
+};
+
/**
* struct rzg2l_cru_dev - Renesas CRU device structure
* @dev: (OF) device
@@ -94,6 +109,8 @@ struct rzg2l_cru_ip_format {
* @vdev: V4L2 video device associated with CRU
* @v4l2_dev: V4L2 device
* @num_buf: Holds the current number of buffers enabled
+ * @svc_channel: SVC0/1/2/3 to use for RZ/G3E
+ * @buf_addr: Memory addresses where current video data is written.
* @notifier: V4L2 asynchronous subdevs notifier
*
* @ip: Image processing subdev info
@@ -130,6 +147,9 @@ struct rzg2l_cru_dev {
struct v4l2_device v4l2_dev;
u8 num_buf;
+ u8 svc_channel;
+ dma_addr_t buf_addr[RZG2L_CRU_HW_BUFFER_DEFAULT];
+
struct v4l2_async_notifier notifier;
struct rzg2l_cru_ip ip;
@@ -161,6 +181,7 @@ void rzg2l_cru_dma_unregister(struct rzg2l_cru_dev *cru);
int rzg2l_cru_video_register(struct rzg2l_cru_dev *cru);
void rzg2l_cru_video_unregister(struct rzg2l_cru_dev *cru);
irqreturn_t rzg2l_cru_irq(int irq, void *data);
+irqreturn_t rzg3e_cru_irq(int irq, void *data);
const struct v4l2_format_info *rzg2l_cru_format_from_pixel(u32 format);
@@ -172,4 +193,18 @@ const struct rzg2l_cru_ip_format *rzg2l_cru_ip_code_to_fmt(unsigned int code);
const struct rzg2l_cru_ip_format *rzg2l_cru_ip_format_to_fmt(u32 format);
const struct rzg2l_cru_ip_format *rzg2l_cru_ip_index_to_fmt(u32 index);
+void rzg2l_cru_enable_interrupts(struct rzg2l_cru_dev *cru);
+void rzg2l_cru_disable_interrupts(struct rzg2l_cru_dev *cru);
+void rzg3e_cru_enable_interrupts(struct rzg2l_cru_dev *cru);
+void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru);
+
+bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru);
+bool rz3e_fifo_empty(struct rzg2l_cru_dev *cru);
+void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc);
+void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc);
+
#endif
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index 881e910dce02..9243306e2aa9 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -85,6 +85,15 @@
CSIDPHYSKW0_UTIL_DL2_SKW_ADJ(1) | \
CSIDPHYSKW0_UTIL_DL3_SKW_ADJ(1))
+/* DPHY registers on RZ/V2H(P) SoC */
+#define CRUm_S_TIMCTL 0x41c
+#define CRUm_S_TIMCTL_S_HSSETTLECTL(x) ((x) << 8)
+
+#define CRUm_S_DPHYCTL_MSB 0x434
+#define CRUm_S_DPHYCTL_MSB_DESKEW BIT(1)
+
+#define CRUm_SWAPCTL 0x438
+
#define VSRSTS_RETRIES 20
#define RZG2L_CSI2_MIN_WIDTH 320
@@ -107,6 +116,7 @@ struct rzg2l_csi2 {
void __iomem *base;
struct reset_control *presetn;
struct reset_control *cmn_rstb;
+ const struct rzg2l_csi2_info *info;
struct clk *sysclk;
struct clk *vclk;
unsigned long vclk_rate;
@@ -123,6 +133,12 @@ struct rzg2l_csi2 {
bool dphy_enabled;
};
+struct rzg2l_csi2_info {
+ int (*dphy_enable)(struct rzg2l_csi2 *csi2);
+ int (*dphy_disable)(struct rzg2l_csi2 *csi2);
+ bool has_system_clk;
+};
+
struct rzg2l_csi2_timings {
u32 t_init;
u32 tclk_miss;
@@ -133,6 +149,30 @@ struct rzg2l_csi2_timings {
u32 max_hsfreq;
};
+struct rzv2h_csi2_s_hssettlectl {
+ unsigned int hsfreq;
+ u16 s_hssettlectl;
+};
+
+static const struct rzv2h_csi2_s_hssettlectl rzv2h_s_hssettlectl[] = {
+ { 90, 1 }, { 130, 2 }, { 180, 3 },
+ { 220, 4 }, { 270, 5 }, { 310, 6 },
+ { 360, 7 }, { 400, 8 }, { 450, 9 },
+ { 490, 10 }, { 540, 11 }, { 580, 12 },
+ { 630, 13 }, { 670, 14 }, { 720, 15 },
+ { 760, 16 }, { 810, 17 }, { 850, 18 },
+ { 900, 19 }, { 940, 20 }, { 990, 21 },
+ { 1030, 22 }, { 1080, 23 }, { 1120, 24 },
+ { 1170, 25 }, { 1220, 26 }, { 1260, 27 },
+ { 1310, 28 }, { 1350, 29 }, { 1400, 30 },
+ { 1440, 31 }, { 1490, 32 }, { 1530, 33 },
+ { 1580, 34 }, { 1620, 35 }, { 1670, 36 },
+ { 1710, 37 }, { 1760, 38 }, { 1800, 39 },
+ { 1850, 40 }, { 1890, 41 }, { 1940, 42 },
+ { 1980, 43 }, { 2030, 44 }, { 2070, 45 },
+ { 2100, 46 },
+};
+
static const struct rzg2l_csi2_timings rzg2l_csi2_global_timings[] = {
{
.max_hsfreq = 80,
@@ -355,14 +395,20 @@ static int rzg2l_csi2_dphy_enable(struct rzg2l_csi2 *csi2)
return ret;
}
+static const struct rzg2l_csi2_info rzg2l_csi2_info = {
+ .dphy_enable = rzg2l_csi2_dphy_enable,
+ .dphy_disable = rzg2l_csi2_dphy_disable,
+ .has_system_clk = true,
+};
+
static int rzg2l_csi2_dphy_setting(struct v4l2_subdev *sd, bool on)
{
struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
if (on)
- return rzg2l_csi2_dphy_enable(csi2);
+ return csi2->info->dphy_enable(csi2);
- return rzg2l_csi2_dphy_disable(csi2);
+ return csi2->info->dphy_disable(csi2);
}
static int rzg2l_csi2_mipi_link_enable(struct rzg2l_csi2 *csi2)
@@ -421,6 +467,64 @@ static int rzg2l_csi2_mipi_link_disable(struct rzg2l_csi2 *csi2)
return 0;
}
+static int rzv2h_csi2_dphy_disable(struct rzg2l_csi2 *csi2)
+{
+ int ret;
+
+ /* Reset the CRU (D-PHY) */
+ ret = reset_control_assert(csi2->cmn_rstb);
+ if (ret)
+ return ret;
+
+ csi2->dphy_enabled = false;
+
+ return 0;
+}
+
+static int rzv2h_csi2_dphy_enable(struct rzg2l_csi2 *csi2)
+{
+ unsigned int i;
+ u16 hssettle;
+ int mbps;
+
+ mbps = rzg2l_csi2_calc_mbps(csi2);
+ if (mbps < 0)
+ return mbps;
+
+ csi2->hsfreq = mbps;
+
+ for (i = 0; i < ARRAY_SIZE(rzv2h_s_hssettlectl); i++) {
+ if (csi2->hsfreq <= rzv2h_s_hssettlectl[i].hsfreq)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(rzv2h_s_hssettlectl))
+ return -EINVAL;
+
+ rzg2l_csi2_write(csi2, CRUm_SWAPCTL, 0);
+
+ hssettle = rzv2h_s_hssettlectl[i].s_hssettlectl;
+ rzg2l_csi2_write(csi2, CRUm_S_TIMCTL,
+ CRUm_S_TIMCTL_S_HSSETTLECTL(hssettle));
+
+ if (csi2->hsfreq > 1500)
+ rzg2l_csi2_set(csi2, CRUm_S_DPHYCTL_MSB,
+ CRUm_S_DPHYCTL_MSB_DESKEW);
+ else
+ rzg2l_csi2_clr(csi2, CRUm_S_DPHYCTL_MSB,
+ CRUm_S_DPHYCTL_MSB_DESKEW);
+
+ csi2->dphy_enabled = true;
+
+ return 0;
+}
+
+static const struct rzg2l_csi2_info rzv2h_csi2_info = {
+ .dphy_enable = rzv2h_csi2_dphy_enable,
+ .dphy_disable = rzv2h_csi2_dphy_disable,
+ .has_system_clk = false,
+};
+
static int rzg2l_csi2_mipi_link_setting(struct v4l2_subdev *sd, bool on)
{
struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
@@ -764,39 +868,46 @@ static const struct media_entity_operations rzg2l_csi2_entity_ops = {
static int rzg2l_csi2_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rzg2l_csi2 *csi2;
int ret;
- csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
+ csi2 = devm_kzalloc(dev, sizeof(*csi2), GFP_KERNEL);
if (!csi2)
return -ENOMEM;
+ csi2->info = of_device_get_match_data(dev);
+ if (!csi2->info)
+ return dev_err_probe(dev, -EINVAL, "Failed to get OF match data\n");
+
csi2->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi2->base))
return PTR_ERR(csi2->base);
- csi2->cmn_rstb = devm_reset_control_get_exclusive(&pdev->dev, "cmn-rstb");
+ csi2->cmn_rstb = devm_reset_control_get_exclusive(dev, "cmn-rstb");
if (IS_ERR(csi2->cmn_rstb))
- return dev_err_probe(&pdev->dev, PTR_ERR(csi2->cmn_rstb),
+ return dev_err_probe(dev, PTR_ERR(csi2->cmn_rstb),
"Failed to get cpg cmn-rstb\n");
- csi2->presetn = devm_reset_control_get_shared(&pdev->dev, "presetn");
+ csi2->presetn = devm_reset_control_get_shared(dev, "presetn");
if (IS_ERR(csi2->presetn))
- return dev_err_probe(&pdev->dev, PTR_ERR(csi2->presetn),
+ return dev_err_probe(dev, PTR_ERR(csi2->presetn),
"Failed to get cpg presetn\n");
- csi2->sysclk = devm_clk_get(&pdev->dev, "system");
- if (IS_ERR(csi2->sysclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(csi2->sysclk),
- "Failed to get system clk\n");
+ if (csi2->info->has_system_clk) {
+ csi2->sysclk = devm_clk_get(dev, "system");
+ if (IS_ERR(csi2->sysclk))
+ return dev_err_probe(dev, PTR_ERR(csi2->sysclk),
+ "Failed to get system clk\n");
+ }
- csi2->vclk = devm_clk_get(&pdev->dev, "video");
+ csi2->vclk = devm_clk_get(dev, "video");
if (IS_ERR(csi2->vclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(csi2->vclk),
+ return dev_err_probe(dev, PTR_ERR(csi2->vclk),
"Failed to get video clock\n");
csi2->vclk_rate = clk_get_rate(csi2->vclk);
- csi2->dev = &pdev->dev;
+ csi2->dev = dev;
platform_set_drvdata(pdev, csi2);
@@ -804,18 +915,20 @@ static int rzg2l_csi2_probe(struct platform_device *pdev)
if (ret)
return ret;
- pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
ret = rzg2l_validate_csi2_lanes(csi2);
if (ret)
- goto error_pm;
+ return ret;
- csi2->subdev.dev = &pdev->dev;
+ csi2->subdev.dev = dev;
v4l2_subdev_init(&csi2->subdev, &rzg2l_csi2_subdev_ops);
csi2->subdev.internal_ops = &rzg2l_csi2_internal_ops;
- v4l2_set_subdevdata(&csi2->subdev, &pdev->dev);
+ v4l2_set_subdevdata(&csi2->subdev, dev);
snprintf(csi2->subdev.name, sizeof(csi2->subdev.name),
- "csi-%s", dev_name(&pdev->dev));
+ "csi-%s", dev_name(dev));
csi2->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
csi2->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
@@ -833,7 +946,7 @@ static int rzg2l_csi2_probe(struct platform_device *pdev)
ret = media_entity_pads_init(&csi2->subdev.entity, ARRAY_SIZE(csi2->pads),
csi2->pads);
if (ret)
- goto error_pm;
+ return ret;
ret = v4l2_subdev_init_finalize(&csi2->subdev);
if (ret < 0)
@@ -851,8 +964,6 @@ error_async:
v4l2_async_nf_unregister(&csi2->notifier);
v4l2_async_nf_cleanup(&csi2->notifier);
media_entity_cleanup(&csi2->subdev.entity);
-error_pm:
- pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -866,7 +977,6 @@ static void rzg2l_csi2_remove(struct platform_device *pdev)
v4l2_async_unregister_subdev(&csi2->subdev);
v4l2_subdev_cleanup(&csi2->subdev);
media_entity_cleanup(&csi2->subdev.entity);
- pm_runtime_disable(&pdev->dev);
}
static int rzg2l_csi2_pm_runtime_suspend(struct device *dev)
@@ -891,7 +1001,14 @@ static const struct dev_pm_ops rzg2l_csi2_pm_ops = {
};
static const struct of_device_id rzg2l_csi2_of_table[] = {
- { .compatible = "renesas,rzg2l-csi2", },
+ {
+ .compatible = "renesas,r9a09g057-csi2",
+ .data = &rzv2h_csi2_info,
+ },
+ {
+ .compatible = "renesas,rzg2l-csi2",
+ .data = &rzg2l_csi2_info,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_csi2_of_table);
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
index 76a2b451f1da..7836c7cd53dc 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
@@ -148,6 +148,8 @@ static int rzg2l_cru_ip_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *fmt)
{
+ struct rzg2l_cru_dev *cru = v4l2_get_subdevdata(sd);
+ const struct rzg2l_cru_info *info = cru->info;
struct v4l2_mbus_framefmt *src_format;
struct v4l2_mbus_framefmt *sink_format;
@@ -170,9 +172,9 @@ static int rzg2l_cru_ip_set_format(struct v4l2_subdev *sd,
sink_format->ycbcr_enc = fmt->format.ycbcr_enc;
sink_format->quantization = fmt->format.quantization;
sink_format->width = clamp_t(u32, fmt->format.width,
- RZG2L_CRU_MIN_INPUT_WIDTH, RZG2L_CRU_MAX_INPUT_WIDTH);
+ RZG2L_CRU_MIN_INPUT_WIDTH, info->max_width);
sink_format->height = clamp_t(u32, fmt->format.height,
- RZG2L_CRU_MIN_INPUT_HEIGHT, RZG2L_CRU_MAX_INPUT_HEIGHT);
+ RZG2L_CRU_MIN_INPUT_HEIGHT, info->max_height);
fmt->format = *sink_format;
@@ -197,6 +199,9 @@ static int rzg2l_cru_ip_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct rzg2l_cru_dev *cru = v4l2_get_subdevdata(sd);
+ const struct rzg2l_cru_info *info = cru->info;
+
if (fse->index != 0)
return -EINVAL;
@@ -205,8 +210,8 @@ static int rzg2l_cru_ip_enum_frame_size(struct v4l2_subdev *sd,
fse->min_width = RZG2L_CRU_MIN_INPUT_WIDTH;
fse->min_height = RZG2L_CRU_MIN_INPUT_HEIGHT;
- fse->max_width = RZG2L_CRU_MAX_INPUT_WIDTH;
- fse->max_height = RZG2L_CRU_MAX_INPUT_HEIGHT;
+ fse->max_width = info->max_width;
+ fse->max_height = info->max_height;
return 0;
}
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
index cd69c8a686d3..067c6af14e95 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -31,6 +31,9 @@
#define RZG2L_CRU_DEFAULT_FIELD V4L2_FIELD_NONE
#define RZG2L_CRU_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB
+#define RZG2L_CRU_STRIDE_MAX 32640
+#define RZG2L_CRU_STRIDE_ALIGN 128
+
struct rzg2l_cru_buffer {
struct vb2_v4l2_buffer vb;
struct list_head list;
@@ -42,16 +45,66 @@ struct rzg2l_cru_buffer {
/* -----------------------------------------------------------------------------
* DMA operations
*/
-static void rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
+static void __rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
{
- iowrite32(value, cru->base + offset);
+ const u16 *regs = cru->info->regs;
+
+ /*
+ * CRUnCTRL is a first register on all CRU supported SoCs so validate
+ * rest of the registers have valid offset being set in cru->info->regs.
+ */
+ if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) ||
+ WARN_ON(offset != CRUnCTRL && regs[offset] == 0))
+ return;
+
+ iowrite32(value, cru->base + regs[offset]);
}
-static u32 rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset)
+static u32 __rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset)
{
- return ioread32(cru->base + offset);
+ const u16 *regs = cru->info->regs;
+
+ /*
+ * CRUnCTRL is a first register on all CRU supported SoCs so validate
+ * rest of the registers have valid offset being set in cru->info->regs.
+ */
+ if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) ||
+ WARN_ON(offset != CRUnCTRL && regs[offset] == 0))
+ return 0;
+
+ return ioread32(cru->base + regs[offset]);
}
+static __always_inline void
+__rzg2l_cru_write_constant(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
+{
+ const u16 *regs = cru->info->regs;
+
+ BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG);
+
+ iowrite32(value, cru->base + regs[offset]);
+}
+
+static __always_inline u32
+__rzg2l_cru_read_constant(struct rzg2l_cru_dev *cru, u32 offset)
+{
+ const u16 *regs = cru->info->regs;
+
+ BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG);
+
+ return ioread32(cru->base + regs[offset]);
+}
+
+#define rzg2l_cru_write(cru, offset, value) \
+ (__builtin_constant_p(offset) ? \
+ __rzg2l_cru_write_constant(cru, offset, value) : \
+ __rzg2l_cru_write(cru, offset, value))
+
+#define rzg2l_cru_read(cru, offset) \
+ (__builtin_constant_p(offset) ? \
+ __rzg2l_cru_read_constant(cru, offset) : \
+ __rzg2l_cru_read(cru, offset))
+
/* Need to hold qlock before calling */
static void return_unused_buffers(struct rzg2l_cru_dev *cru,
enum vb2_buffer_state state)
@@ -134,6 +187,8 @@ static void rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev *cru,
/* Currently, we just use the buffer in 32 bits address */
rzg2l_cru_write(cru, AMnMBxADDRL(slot), addr);
rzg2l_cru_write(cru, AMnMBxADDRH(slot), 0);
+
+ cru->buf_addr[slot] = addr;
}
/*
@@ -174,6 +229,7 @@ static void rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev *cru, int slot)
static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
{
+ const struct rzg2l_cru_info *info = cru->info;
unsigned int slot;
u32 amnaxiattr;
@@ -186,35 +242,64 @@ static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
for (slot = 0; slot < cru->num_buf; slot++)
rzg2l_cru_fill_hw_slot(cru, slot);
+ if (info->has_stride) {
+ u32 stride = cru->format.bytesperline;
+ u32 amnis;
+
+ stride /= RZG2L_CRU_STRIDE_ALIGN;
+ amnis = rzg2l_cru_read(cru, AMnIS) & ~AMnIS_IS_MASK;
+ rzg2l_cru_write(cru, AMnIS, amnis | AMnIS_IS(stride));
+ }
+
/* Set AXI burst max length to recommended setting */
amnaxiattr = rzg2l_cru_read(cru, AMnAXIATTR) & ~AMnAXIATTR_AXILEN_MASK;
amnaxiattr |= AMnAXIATTR_AXILEN;
rzg2l_cru_write(cru, AMnAXIATTR, amnaxiattr);
}
-static void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc)
+void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc)
{
+ const struct rzg2l_cru_info *info = cru->info;
u32 icnmc = ICnMC_INF(ip_fmt->datatype);
- icnmc |= (rzg2l_cru_read(cru, ICnMC) & ~ICnMC_INF_MASK);
+ icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
/* Set virtual channel CSI2 */
icnmc |= ICnMC_VCSEL(csi_vc);
- rzg2l_cru_write(cru, ICnMC, icnmc);
+ rzg2l_cru_write(cru, ICnSVCNUM, csi_vc);
+ rzg2l_cru_write(cru, ICnSVC, ICnSVC_SVC0(0) | ICnSVC_SVC1(1) |
+ ICnSVC_SVC2(2) | ICnSVC_SVC3(3));
+ rzg2l_cru_write(cru, info->image_conv, icnmc);
+}
+
+void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc)
+{
+ const struct rzg2l_cru_info *info = cru->info;
+ u32 icnmc = ICnMC_INF(ip_fmt->datatype);
+
+ icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
+
+ /* Set virtual channel CSI2 */
+ icnmc |= ICnMC_VCSEL(csi_vc);
+
+ rzg2l_cru_write(cru, info->image_conv, icnmc);
}
static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
struct v4l2_mbus_framefmt *ip_sd_fmt,
u8 csi_vc)
{
+ const struct rzg2l_cru_info *info = cru->info;
const struct rzg2l_cru_ip_format *cru_video_fmt;
const struct rzg2l_cru_ip_format *cru_ip_fmt;
cru_ip_fmt = rzg2l_cru_ip_code_to_fmt(ip_sd_fmt->code);
- rzg2l_cru_csi2_setup(cru, cru_ip_fmt, csi_vc);
+ info->csi_setup(cru, cru_ip_fmt, csi_vc);
/* Output format */
cru_video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat);
@@ -226,11 +311,11 @@ static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
/* If input and output use same colorspace, do bypass mode */
if (cru_ip_fmt->yuv == cru_video_fmt->yuv)
- rzg2l_cru_write(cru, ICnMC,
- rzg2l_cru_read(cru, ICnMC) | ICnMC_CSCTHR);
+ rzg2l_cru_write(cru, info->image_conv,
+ rzg2l_cru_read(cru, info->image_conv) | ICnMC_CSCTHR);
else
- rzg2l_cru_write(cru, ICnMC,
- rzg2l_cru_read(cru, ICnMC) & (~ICnMC_CSCTHR));
+ rzg2l_cru_write(cru, info->image_conv,
+ rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_CSCTHR);
/* Set output data format */
rzg2l_cru_write(cru, ICnDMR, cru_video_fmt->icndmr);
@@ -238,9 +323,36 @@ static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
return 0;
}
-void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
+bool rz3e_fifo_empty(struct rzg2l_cru_dev *cru)
+{
+ u32 amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
+
+ if ((((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B1) >> 24) ==
+ ((amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B1) >> 8)) &&
+ (((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B0) >> 16) ==
+ (amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B0)))
+ return true;
+
+ return false;
+}
+
+bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru)
{
u32 amnfifopntr, amnfifopntr_w, amnfifopntr_r_y;
+
+ amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
+
+ amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR;
+ amnfifopntr_r_y =
+ (amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16;
+ if (amnfifopntr_w == amnfifopntr_r_y)
+ return true;
+
+ return amnfifopntr_w == amnfifopntr_r_y;
+}
+
+void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
+{
unsigned int retries = 0;
unsigned long flags;
u32 icnms;
@@ -248,8 +360,7 @@ void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
spin_lock_irqsave(&cru->qlock, flags);
/* Disable and clear the interrupt */
- rzg2l_cru_write(cru, CRUnIE, 0);
- rzg2l_cru_write(cru, CRUnINTS, 0x001F0F0F);
+ cru->info->disable_interrupts(cru);
/* Stop the operation of image conversion */
rzg2l_cru_write(cru, ICnEN, 0);
@@ -269,12 +380,7 @@ void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
/* Wait until the FIFO becomes empty */
for (retries = 5; retries > 0; retries--) {
- amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
-
- amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR;
- amnfifopntr_r_y =
- (amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16;
- if (amnfifopntr_w == amnfifopntr_r_y)
+ if (cru->info->fifo_empty(cru))
break;
usleep_range(10, 20);
@@ -341,6 +447,31 @@ static int rzg2l_cru_get_virtual_channel(struct rzg2l_cru_dev *cru)
return fd.entry[0].bus.csi2.vc;
}
+void rzg3e_cru_enable_interrupts(struct rzg2l_cru_dev *cru)
+{
+ rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FSxE(cru->svc_channel));
+ rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FExE(cru->svc_channel));
+}
+
+void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru)
+{
+ rzg2l_cru_write(cru, CRUnIE, 0);
+ rzg2l_cru_write(cru, CRUnIE2, 0);
+ rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS));
+ rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2));
+}
+
+void rzg2l_cru_enable_interrupts(struct rzg2l_cru_dev *cru)
+{
+ rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE);
+}
+
+void rzg2l_cru_disable_interrupts(struct rzg2l_cru_dev *cru)
+{
+ rzg2l_cru_write(cru, CRUnIE, 0);
+ rzg2l_cru_write(cru, CRUnINTS, 0x001f000f);
+}
+
int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
{
struct v4l2_mbus_framefmt *fmt = rzg2l_cru_ip_get_src_fmt(cru);
@@ -352,6 +483,7 @@ int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
if (ret < 0)
return ret;
csi_vc = ret;
+ cru->svc_channel = csi_vc;
spin_lock_irqsave(&cru->qlock, flags);
@@ -362,8 +494,7 @@ int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
rzg2l_cru_write(cru, CRUnRST, CRUnRST_VRESETN);
/* Disable and clear the interrupt before using */
- rzg2l_cru_write(cru, CRUnIE, 0);
- rzg2l_cru_write(cru, CRUnINTS, 0x001f000f);
+ cru->info->disable_interrupts(cru);
/* Initialize the AXI master */
rzg2l_cru_initialize_axi(cru);
@@ -376,7 +507,7 @@ int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
}
/* Enable interrupt */
- rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE);
+ cru->info->enable_interrupts(cru);
/* Enable image processing reception */
rzg2l_cru_write(cru, ICnEN, ICnEN_ICEN);
@@ -531,6 +662,104 @@ done:
return IRQ_RETVAL(handled);
}
+static int rzg3e_cru_get_current_slot(struct rzg2l_cru_dev *cru)
+{
+ u64 amnmadrs;
+ int slot;
+
+ /*
+ * When AMnMADRSL is read, AMnMADRSH of the higher-order
+ * address also latches the address.
+ *
+ * AMnMADRSH must be read after AMnMADRSL has been read.
+ */
+ amnmadrs = rzg2l_cru_read(cru, AMnMADRSL);
+ amnmadrs |= (u64)rzg2l_cru_read(cru, AMnMADRSH) << 32;
+
+ /* Ensure amnmadrs is within this buffer range */
+ for (slot = 0; slot < cru->num_buf; slot++) {
+ if (amnmadrs >= cru->buf_addr[slot] &&
+ amnmadrs < cru->buf_addr[slot] + cru->format.sizeimage)
+ return slot;
+ }
+
+ dev_err(cru->dev, "Invalid MB address 0x%llx (out of range)\n", amnmadrs);
+ return -EINVAL;
+}
+
+irqreturn_t rzg3e_cru_irq(int irq, void *data)
+{
+ struct rzg2l_cru_dev *cru = data;
+ u32 irq_status;
+ int slot;
+
+ scoped_guard(spinlock, &cru->qlock) {
+ irq_status = rzg2l_cru_read(cru, CRUnINTS2);
+ if (!irq_status)
+ return IRQ_NONE;
+
+ dev_dbg(cru->dev, "CRUnINTS2 0x%x\n", irq_status);
+
+ rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2));
+
+ /* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */
+ if (cru->state == RZG2L_CRU_DMA_STOPPED) {
+ dev_dbg(cru->dev, "IRQ while state stopped\n");
+ return IRQ_HANDLED;
+ }
+
+ if (cru->state == RZG2L_CRU_DMA_STOPPING) {
+ if (irq_status & CRUnINTS2_FSxS(0) ||
+ irq_status & CRUnINTS2_FSxS(1) ||
+ irq_status & CRUnINTS2_FSxS(2) ||
+ irq_status & CRUnINTS2_FSxS(3))
+ dev_dbg(cru->dev, "IRQ while state stopping\n");
+ return IRQ_HANDLED;
+ }
+
+ slot = rzg3e_cru_get_current_slot(cru);
+ if (slot < 0)
+ return IRQ_HANDLED;
+
+ dev_dbg(cru->dev, "Current written slot: %d\n", slot);
+ cru->buf_addr[slot] = 0;
+
+ /*
+ * To hand buffers back in a known order to userspace start
+ * to capture first from slot 0.
+ */
+ if (cru->state == RZG2L_CRU_DMA_STARTING) {
+ if (slot != 0) {
+ dev_dbg(cru->dev, "Starting sync slot: %d\n", slot);
+ return IRQ_HANDLED;
+ }
+ dev_dbg(cru->dev, "Capture start synced!\n");
+ cru->state = RZG2L_CRU_DMA_RUNNING;
+ }
+
+ /* Capture frame */
+ if (cru->queue_buf[slot]) {
+ struct vb2_v4l2_buffer *buf = cru->queue_buf[slot];
+
+ buf->field = cru->format.field;
+ buf->sequence = cru->sequence;
+ buf->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&buf->vb2_buf, VB2_BUF_STATE_DONE);
+ cru->queue_buf[slot] = NULL;
+ } else {
+ /* Scratch buffer was used, dropping frame. */
+ dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence);
+ }
+
+ cru->sequence++;
+
+ /* Prepare for next frame */
+ rzg2l_cru_fill_hw_slot(cru, slot);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count)
{
struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
@@ -686,6 +915,7 @@ error:
static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru,
struct v4l2_pix_format *pix)
{
+ const struct rzg2l_cru_info *info = cru->info;
const struct rzg2l_cru_ip_format *fmt;
fmt = rzg2l_cru_ip_format_to_fmt(pix->pixelformat);
@@ -708,10 +938,17 @@ static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru,
}
/* Limit to CRU capabilities */
- v4l_bound_align_image(&pix->width, 320, RZG2L_CRU_MAX_INPUT_WIDTH, 1,
- &pix->height, 240, RZG2L_CRU_MAX_INPUT_HEIGHT, 2, 0);
+ v4l_bound_align_image(&pix->width, 320, info->max_width, 1,
+ &pix->height, 240, info->max_height, 2, 0);
+
+ if (info->has_stride) {
+ u32 stride = clamp(pix->bytesperline, pix->width * fmt->bpp,
+ RZG2L_CRU_STRIDE_MAX);
+ pix->bytesperline = round_up(stride, RZG2L_CRU_STRIDE_ALIGN);
+ } else {
+ pix->bytesperline = pix->width * fmt->bpp;
+ }
- pix->bytesperline = pix->width * fmt->bpp;
pix->sizeimage = pix->bytesperline * pix->height;
dev_dbg(cru->dev, "Format %ux%u bpl: %u size: %u\n",
diff --git a/drivers/media/platform/renesas/vsp1/Makefile b/drivers/media/platform/renesas/vsp1/Makefile
index 4bb4dcbef7b5..de8c802e1d1a 100644
--- a/drivers/media/platform/renesas/vsp1/Makefile
+++ b/drivers/media/platform/renesas/vsp1/Makefile
@@ -5,6 +5,6 @@ vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
vsp1-y += vsp1_clu.o vsp1_hsit.o vsp1_lut.o
vsp1-y += vsp1_brx.o vsp1_sru.o vsp1_uds.o
vsp1-y += vsp1_hgo.o vsp1_hgt.o vsp1_histo.o
-vsp1-y += vsp1_lif.o vsp1_uif.o
+vsp1-y += vsp1_iif.o vsp1_lif.o vsp1_uif.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/renesas/vsp1/vsp1.h b/drivers/media/platform/renesas/vsp1/vsp1.h
index 2f6f0c6ae555..f97a1a31bfab 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1.h
@@ -32,6 +32,7 @@ struct vsp1_clu;
struct vsp1_hgo;
struct vsp1_hgt;
struct vsp1_hsit;
+struct vsp1_iif;
struct vsp1_lif;
struct vsp1_lut;
struct vsp1_rwpf;
@@ -56,6 +57,8 @@ struct vsp1_uif;
#define VSP1_HAS_BRS BIT(9)
#define VSP1_HAS_EXT_DL BIT(10)
#define VSP1_HAS_NON_ZERO_LBA BIT(11)
+#define VSP1_HAS_IIF BIT(12)
+#define VSP1_HAS_HSIT BIT(13)
struct vsp1_device_info {
u32 version;
@@ -91,6 +94,7 @@ struct vsp1_device {
struct vsp1_hgt *hgt;
struct vsp1_hsit *hsi;
struct vsp1_hsit *hst;
+ struct vsp1_iif *iif;
struct vsp1_lif *lif[VSP1_MAX_LIF];
struct vsp1_lut *lut;
struct vsp1_rwpf *rpf[VSP1_MAX_RPF];
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_brx.c b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
index 5dee0490c593..5fc2e5a3bb30 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_brx.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
@@ -15,6 +15,7 @@
#include "vsp1.h"
#include "vsp1_brx.h"
#include "vsp1_dl.h"
+#include "vsp1_entity.h"
#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_video.h"
@@ -108,6 +109,8 @@ static void brx_try_format(struct vsp1_brx *brx,
if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+
+ vsp1_entity_adjust_color_space(fmt);
break;
default:
@@ -115,13 +118,17 @@ static void brx_try_format(struct vsp1_brx *brx,
format = v4l2_subdev_state_get_format(sd_state,
BRX_PAD_SINK(0));
fmt->code = format->code;
+
+ fmt->colorspace = format->colorspace;
+ fmt->xfer_func = format->xfer_func;
+ fmt->ycbcr_enc = format->ycbcr_enc;
+ fmt->quantization = format->quantization;
break;
}
fmt->width = clamp(fmt->width, BRX_MIN_SIZE, BRX_MAX_SIZE);
fmt->height = clamp(fmt->height, BRX_MIN_SIZE, BRX_MAX_SIZE);
fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
static int brx_set_format(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_dl.c b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
index ad3fa1c9cc73..bb8228b19824 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
@@ -1099,7 +1099,12 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
return NULL;
dlm->index = index;
- dlm->singleshot = vsp1->info->uapi;
+ /*
+ * uapi = single shot mode;
+ * DRM = continuous mode;
+ * VSPX = single shot mode;
+ */
+ dlm->singleshot = vsp1->info->uapi || vsp1->iif;
dlm->vsp1 = vsp1;
spin_lock_init(&dlm->lock);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
index b5d1f238f7be..fe55e8747b05 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
@@ -118,26 +118,26 @@ static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1,
struct vsp1_entity *uif,
unsigned int brx_input)
{
+ const struct vsp1_drm_input *input = &vsp1->drm->inputs[rpf->entity.index];
struct v4l2_subdev_selection sel = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- const struct v4l2_rect *crop;
int ret;
/*
* Configure the format on the RPF sink pad and propagate it up to the
* BRx sink pad.
*/
- crop = &vsp1->drm->inputs[rpf->entity.index].crop;
-
format.pad = RWPF_PAD_SINK;
- format.format.width = crop->width + crop->left;
- format.format.height = crop->height + crop->top;
+ format.format.width = input->crop.width + input->crop.left;
+ format.format.height = input->crop.height + input->crop.top;
format.format.code = rpf->fmtinfo->mbus;
format.format.field = V4L2_FIELD_NONE;
+ format.format.ycbcr_enc = input->ycbcr_enc;
+ format.format.quantization = input->quantization;
ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
&format);
@@ -151,7 +151,7 @@ static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1,
sel.pad = RWPF_PAD_SINK;
sel.target = V4L2_SEL_TGT_CROP;
- sel.r = *crop;
+ sel.r = input->crop;
ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_selection, NULL,
&sel);
@@ -593,8 +593,8 @@ static int vsp1_du_pipeline_set_rwpf_format(struct vsp1_device *vsp1,
fmtinfo = vsp1_get_format_info(vsp1, pixelformat);
if (!fmtinfo) {
- dev_dbg(vsp1->dev, "Unsupported pixel format %08x\n",
- pixelformat);
+ dev_dbg(vsp1->dev, "Unsupported pixel format %p4cc\n",
+ &pixelformat);
return -EINVAL;
}
@@ -826,12 +826,14 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
+ struct vsp1_drm_input *input;
struct vsp1_rwpf *rpf;
int ret;
if (rpf_index >= vsp1->info->rpf_count)
return -EINVAL;
+ input = &vsp1->drm->inputs[rpf_index];
rpf = vsp1->rpf[rpf_index];
if (!cfg) {
@@ -849,11 +851,11 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
}
dev_dbg(vsp1->dev,
- "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad, %pad } zpos %u\n",
+ "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%p4cc), pitch %u dma { %pad, %pad, %pad } zpos %u\n",
__func__, rpf_index,
cfg->src.left, cfg->src.top, cfg->src.width, cfg->src.height,
cfg->dst.left, cfg->dst.top, cfg->dst.width, cfg->dst.height,
- cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1],
+ &cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1],
&cfg->mem[2], cfg->zpos);
/*
@@ -873,9 +875,11 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
rpf->format.flags = cfg->premult ? V4L2_PIX_FMT_FLAG_PREMUL_ALPHA : 0;
- vsp1->drm->inputs[rpf_index].crop = cfg->src;
- vsp1->drm->inputs[rpf_index].compose = cfg->dst;
- vsp1->drm->inputs[rpf_index].zpos = cfg->zpos;
+ input->crop = cfg->src;
+ input->compose = cfg->dst;
+ input->zpos = cfg->zpos;
+ input->ycbcr_enc = cfg->color_encoding;
+ input->quantization = cfg->color_range;
drm_pipe->pipe.inputs[rpf_index] = rpf;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.h b/drivers/media/platform/renesas/vsp1/vsp1_drm.h
index 3fd95b53f27e..07a5d0adbd08 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drm.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.h
@@ -52,17 +52,19 @@ struct vsp1_drm_pipeline {
* struct vsp1_drm - State for the API exposed to the DRM driver
* @pipe: the VSP1 DRM pipeline used for display
* @lock: protects the BRU and BRS allocation
- * @inputs: source crop rectangle, destination compose rectangle and z-order
- * position for every input (indexed by RPF index)
+ * @inputs: source crop rectangle, destination compose rectangle, z-order
+ * position and colorspace for every input (indexed by RPF index)
*/
struct vsp1_drm {
struct vsp1_drm_pipeline pipe[VSP1_MAX_LIF];
struct mutex lock;
- struct {
+ struct vsp1_drm_input {
struct v4l2_rect crop;
struct v4l2_rect compose;
unsigned int zpos;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
} inputs[VSP1_MAX_RPF];
};
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index 9fc6bf624a52..8270a9d207cb 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -29,6 +29,7 @@
#include "vsp1_hgo.h"
#include "vsp1_hgt.h"
#include "vsp1_hsit.h"
+#include "vsp1_iif.h"
#include "vsp1_lif.h"
#include "vsp1_lut.h"
#include "vsp1_pipe.h"
@@ -302,22 +303,6 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->clu->entity.list_dev, &vsp1->entities);
}
- vsp1->hsi = vsp1_hsit_create(vsp1, true);
- if (IS_ERR(vsp1->hsi)) {
- ret = PTR_ERR(vsp1->hsi);
- goto done;
- }
-
- list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
-
- vsp1->hst = vsp1_hsit_create(vsp1, false);
- if (IS_ERR(vsp1->hst)) {
- ret = PTR_ERR(vsp1->hst);
- goto done;
- }
-
- list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
-
if (vsp1_feature(vsp1, VSP1_HAS_HGO) && vsp1->info->uapi) {
vsp1->hgo = vsp1_hgo_create(vsp1);
if (IS_ERR(vsp1->hgo)) {
@@ -340,6 +325,34 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
&vsp1->entities);
}
+ if (vsp1_feature(vsp1, VSP1_HAS_IIF)) {
+ vsp1->iif = vsp1_iif_create(vsp1);
+ if (IS_ERR(vsp1->iif)) {
+ ret = PTR_ERR(vsp1->iif);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->iif->entity.list_dev, &vsp1->entities);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HSIT)) {
+ vsp1->hsi = vsp1_hsit_create(vsp1, true);
+ if (IS_ERR(vsp1->hsi)) {
+ ret = PTR_ERR(vsp1->hsi);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
+
+ vsp1->hst = vsp1_hsit_create(vsp1, false);
+ if (IS_ERR(vsp1->hst)) {
+ ret = PTR_ERR(vsp1->hst);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
+ }
+
/*
* The LIFs are only supported when used in conjunction with the DU, in
* which case the userspace API is disabled. If the userspace API is
@@ -683,8 +696,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.model = "VSP1-S",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
- | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
- | VSP1_HAS_WPF_VFLIP,
+ | VSP1_HAS_HGT | VSP1_HAS_HSIT | VSP1_HAS_LUT
+ | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 3,
.wpf_count = 4,
@@ -694,7 +707,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPR_H2,
.model = "VSP1-R",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_BRU | VSP1_HAS_HSIT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 3,
.wpf_count = 4,
@@ -704,7 +718,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
.model = "VSP1-D",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT,
+ .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_HSIT
+ | VSP1_HAS_LUT,
.lif_count = 1,
.rpf_count = 4,
.uds_count = 1,
@@ -716,8 +731,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.model = "VSP1-S",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
- | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
- | VSP1_HAS_WPF_VFLIP,
+ | VSP1_HAS_HGT | VSP1_HAS_HSIT | VSP1_HAS_LUT
+ | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 1,
.wpf_count = 4,
@@ -727,8 +742,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPS_V2H,
.model = "VSP1V-S",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
- | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HSIT
+ | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 4,
.uds_count = 1,
.wpf_count = 4,
@@ -738,7 +753,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPD_V2H,
.model = "VSP1V-D",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HSIT
+ | VSP1_HAS_LUT,
.lif_count = 1,
.rpf_count = 4,
.uds_count = 1,
@@ -750,8 +766,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.model = "VSP2-I",
.gen = 3,
.features = VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT
- | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_HFLIP
- | VSP1_HAS_WPF_VFLIP,
+ | VSP1_HAS_HSIT | VSP1_HAS_LUT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_HFLIP | VSP1_HAS_WPF_VFLIP,
.rpf_count = 1,
.uds_count = 1,
.wpf_count = 1,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.c b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
index 8b8945bd8f10..a6680d531872 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
@@ -63,9 +63,14 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity,
/*
* The ILV and BRS share the same data path route. The extra BRSSEL bit
* selects between the ILV and BRS.
+ *
+ * The BRU and IIF share the same data path route. The extra IIFSEL bit
+ * selects between the IIF and BRU.
*/
if (source->type == VSP1_ENTITY_BRS)
route |= VI6_DPR_ROUTE_BRSSEL;
+ else if (source->type == VSP1_ENTITY_IIF)
+ route |= VI6_DPR_ROUTE_IIFSEL;
vsp1_dl_body_write(dlb, source->route->reg, route);
}
@@ -99,6 +104,20 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
dl, dlb);
}
+void vsp1_entity_adjust_color_space(struct v4l2_mbus_framefmt *format)
+{
+ u8 xfer_func = format->xfer_func;
+ u8 ycbcr_enc = format->ycbcr_enc;
+ u8 quantization = format->quantization;
+
+ vsp1_adjust_color_space(format->code, &format->colorspace, &xfer_func,
+ &ycbcr_enc, &quantization);
+
+ format->xfer_func = xfer_func;
+ format->ycbcr_enc = ycbcr_enc;
+ format->quantization = quantization;
+}
+
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Operations
*/
@@ -329,7 +348,13 @@ int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
format->height = clamp_t(unsigned int, fmt->format.height,
min_height, max_height);
format->field = V4L2_FIELD_NONE;
- format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->colorspace = fmt->format.colorspace;
+ format->xfer_func = fmt->format.xfer_func;
+ format->ycbcr_enc = fmt->format.ycbcr_enc;
+ format->quantization = fmt->format.quantization;
+
+ vsp1_entity_adjust_color_space(format);
fmt->format = *format;
@@ -528,6 +553,9 @@ struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad)
{ VI6_DPR_NODE_WPF(idx) }, VI6_DPR_NODE_WPF(idx) }
static const struct vsp1_route vsp1_routes[] = {
+ { VSP1_ENTITY_IIF, 0, VI6_DPR_BRU_ROUTE,
+ { VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1),
+ VI6_DPR_NODE_BRU_IN(3) }, VI6_DPR_NODE_WPF(0) },
{ VSP1_ENTITY_BRS, 0, VI6_DPR_ILV_BRS_ROUTE,
{ VI6_DPR_NODE_BRS_IN(0), VI6_DPR_NODE_BRS_IN(1) }, 0 },
{ VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.h b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
index 1bcc9e27dfdc..b7c72d0b7f8e 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
@@ -28,6 +28,7 @@ enum vsp1_entity_type {
VSP1_ENTITY_HGT,
VSP1_ENTITY_HSI,
VSP1_ENTITY_HST,
+ VSP1_ENTITY_IIF,
VSP1_ENTITY_LIF,
VSP1_ENTITY_LUT,
VSP1_ENTITY_RPF,
@@ -170,6 +171,8 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb);
+void vsp1_entity_adjust_color_space(struct v4l2_mbus_framefmt *format);
+
struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad);
int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hsit.c b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
index 8ba2a7c7305c..1fcd1967d3b2 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
@@ -14,6 +14,7 @@
#include "vsp1.h"
#include "vsp1_dl.h"
+#include "vsp1_entity.h"
#include "vsp1_hsit.h"
#define HSIT_MIN_SIZE 4U
@@ -96,7 +97,13 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
format->height = clamp_t(unsigned int, fmt->format.height,
HSIT_MIN_SIZE, HSIT_MAX_SIZE);
format->field = V4L2_FIELD_NONE;
- format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->colorspace = fmt->format.colorspace;
+ format->xfer_func = fmt->format.xfer_func;
+ format->ycbcr_enc = fmt->format.ycbcr_enc;
+ format->quantization = fmt->format.quantization;
+
+ vsp1_entity_adjust_color_space(format);
fmt->format = *format;
@@ -106,6 +113,8 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
: MEDIA_BUS_FMT_AHSV8888_1X32;
+ vsp1_entity_adjust_color_space(format);
+
done:
mutex_unlock(&hsit->entity.lock);
return ret;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_iif.c b/drivers/media/platform/renesas/vsp1/vsp1_iif.c
new file mode 100644
index 000000000000..5dd62bebbe8c
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_iif.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_iif.c -- R-Car VSP1 IIF (ISP Interface)
+ *
+ * Copyright (C) 2025 Ideas On Board Oy
+ * Copyright (C) 2025 Renesas Corporation
+ */
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_iif.h"
+
+#define IIF_MIN_WIDTH 128U
+#define IIF_MIN_HEIGHT 32U
+#define IIF_MAX_WIDTH 5120U
+#define IIF_MAX_HEIGHT 4096U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_iif_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const unsigned int iif_codes[] = {
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12,
+ MEDIA_BUS_FMT_Y16_1X16,
+ MEDIA_BUS_FMT_METADATA_FIXED
+};
+
+static int iif_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, iif_codes,
+ ARRAY_SIZE(iif_codes));
+}
+
+static int iif_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ IIF_MIN_WIDTH, IIF_MIN_HEIGHT,
+ IIF_MAX_WIDTH, IIF_MAX_HEIGHT);
+}
+
+static int iif_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, iif_codes,
+ ARRAY_SIZE(iif_codes),
+ IIF_MIN_WIDTH, IIF_MIN_HEIGHT,
+ IIF_MAX_WIDTH, IIF_MAX_HEIGHT);
+}
+
+static const struct v4l2_subdev_pad_ops iif_pad_ops = {
+ .enum_mbus_code = iif_enum_mbus_code,
+ .enum_frame_size = iif_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = iif_set_format,
+};
+
+static const struct v4l2_subdev_ops iif_ops = {
+ .pad = &iif_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void iif_configure_stream(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *state,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ vsp1_iif_write(dlb, VI6_IIF_CTRL, VI6_IIF_CTRL_CTRL);
+}
+
+static const struct vsp1_entity_operations iif_entity_ops = {
+ .configure_stream = iif_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_iif *vsp1_iif_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_iif *iif;
+ int ret;
+
+ iif = devm_kzalloc(vsp1->dev, sizeof(*iif), GFP_KERNEL);
+ if (!iif)
+ return ERR_PTR(-ENOMEM);
+
+ iif->entity.ops = &iif_entity_ops;
+ iif->entity.type = VSP1_ENTITY_IIF;
+
+ /*
+ * The IIF is never exposed to userspace, but media entity registration
+ * requires a function to be set. Use PROC_VIDEO_PIXEL_FORMATTER just to
+ * avoid triggering a WARN_ON(), the value won't be seen anywhere.
+ */
+ ret = vsp1_entity_init(vsp1, &iif->entity, "iif", 3, &iif_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return iif;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_iif.h b/drivers/media/platform/renesas/vsp1/vsp1_iif.h
new file mode 100644
index 000000000000..46f327851c35
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_iif.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_iif.h -- R-Car VSP1 IIF (ISP Interface)
+ *
+ * Copyright (C) 2025 Ideas On Board Oy
+ * Copyright (C) 2025 Renesas Corporation
+ */
+#ifndef __VSP1_IIF_H__
+#define __VSP1_IIF_H__
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+#define VSPX_IIF_SINK_PAD_IMG 0
+#define VSPX_IIF_SINK_PAD_CONFIG 2
+
+struct vsp1_iif {
+ struct vsp1_entity entity;
+};
+
+static inline struct vsp1_iif *to_iif(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_iif, entity.subdev);
+}
+
+struct vsp1_iif *vsp1_iif_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_IIF_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
index bb0739f684f3..3cbb768cf6ad 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
@@ -138,14 +138,6 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 32, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32,
- VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 24, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32,
- VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 32, 0, 0 }, false, false, 1, 1, false },
{ V4L2_PIX_FMT_RGBX1010102, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_RGB10_RGB10A2_A2RGB10,
VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
@@ -162,10 +154,6 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 16, 0, 0 }, false, false, 2, 1, false },
- { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 16, 0, 0 }, false, true, 2, 1, false },
{ V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
@@ -222,6 +210,24 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
1, { 32, 0, 0 }, false, false, 2, 1, false },
};
+static const struct vsp1_format_info vsp1_video_gen2_formats[] = {
+ { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, true, 2, 1, false },
+};
+
+static const struct vsp1_format_info vsp1_video_hsit_formats[] = {
+ { V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+};
+
/**
* vsp1_get_format_info - Retrieve format information for a 4CC
* @vsp1: the VSP1 device
@@ -235,26 +241,164 @@ const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
{
unsigned int i;
- /* Special case, the VYUY and HSV formats are supported on Gen2 only. */
- if (vsp1->info->gen != 2) {
- switch (fourcc) {
- case V4L2_PIX_FMT_VYUY:
- case V4L2_PIX_FMT_HSV24:
- case V4L2_PIX_FMT_HSV32:
- return NULL;
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
+ const struct vsp1_format_info *info = &vsp1_video_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
+ }
+
+ if (vsp1->info->gen == 2) {
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_gen2_formats); ++i) {
+ const struct vsp1_format_info *info =
+ &vsp1_video_gen2_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
+ }
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HSIT)) {
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_hsit_formats); ++i) {
+ const struct vsp1_format_info *info =
+ &vsp1_video_hsit_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
}
}
+ return NULL;
+}
+
+/**
+ * vsp1_get_format_info_by_index - Enumerate format information
+ * @vsp1: the VSP1 device
+ * @index: the format index
+ * @code: media bus code to limit enumeration
+ *
+ * Return a pointer to the format information structure corresponding to the
+ * given index, or NULL if the index exceeds the supported formats list. If the
+ * @code parameter is not zero, only formats compatible with the media bus code
+ * will be enumerated.
+ */
+const struct vsp1_format_info *
+vsp1_get_format_info_by_index(struct vsp1_device *vsp1, unsigned int index,
+ u32 code)
+{
+ unsigned int i;
+
+ if (!code) {
+ if (index < ARRAY_SIZE(vsp1_video_formats))
+ return &vsp1_video_formats[index];
+
+ if (vsp1->info->gen == 2) {
+ index -= ARRAY_SIZE(vsp1_video_formats);
+ if (index < ARRAY_SIZE(vsp1_video_gen2_formats))
+ return &vsp1_video_gen2_formats[index];
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HSIT)) {
+ index -= ARRAY_SIZE(vsp1_video_gen2_formats);
+ if (index < ARRAY_SIZE(vsp1_video_hsit_formats))
+ return &vsp1_video_hsit_formats[index];
+ }
+
+ return NULL;
+ }
+
for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
const struct vsp1_format_info *info = &vsp1_video_formats[i];
- if (info->fourcc == fourcc)
- return info;
+ if (info->mbus == code) {
+ if (!index)
+ return info;
+ index--;
+ }
+ }
+
+ if (vsp1->info->gen == 2) {
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_gen2_formats); ++i) {
+ const struct vsp1_format_info *info =
+ &vsp1_video_gen2_formats[i];
+
+ if (info->mbus == code) {
+ if (!index)
+ return info;
+ index--;
+ }
+ }
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HSIT)) {
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_hsit_formats); ++i) {
+ const struct vsp1_format_info *info =
+ &vsp1_video_hsit_formats[i];
+
+ if (info->mbus == code) {
+ if (!index)
+ return info;
+ index--;
+ }
+ }
}
return NULL;
}
+/**
+ * vsp1_adjust_color_space - Adjust color space fields in a format
+ * @code: the media bus code
+ * @colorspace: the colorspace
+ * @xfer_func: the transfer function
+ * @encoding: the encoding
+ * @quantization: the quantization
+ *
+ * This function adjusts all color space fields of a video device of subdev
+ * format structure, taking into account the requested format, requested color
+ * space and limitations of the VSP1. It should be used in the video device and
+ * subdev set format handlers.
+ *
+ * The colorspace and xfer_func fields are freely configurable, as they are out
+ * of scope for VSP processing. The encoding and quantization is hardcoded for
+ * non-YUV formats, and can be configured for YUV formats.
+ */
+void vsp1_adjust_color_space(u32 code, u32 *colorspace, u8 *xfer_func,
+ u8 *encoding, u8 *quantization)
+{
+ if (*colorspace == V4L2_COLORSPACE_DEFAULT ||
+ *colorspace >= V4L2_COLORSPACE_LAST)
+ *colorspace = code == MEDIA_BUS_FMT_AYUV8_1X32
+ ? V4L2_COLORSPACE_SMPTE170M
+ : V4L2_COLORSPACE_SRGB;
+
+ if (*xfer_func == V4L2_XFER_FUNC_DEFAULT ||
+ *xfer_func >= V4L2_XFER_FUNC_LAST)
+ *xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(*colorspace);
+
+ switch (code) {
+ case MEDIA_BUS_FMT_ARGB8888_1X32:
+ default:
+ *encoding = V4L2_YCBCR_ENC_601;
+ *quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ break;
+
+ case MEDIA_BUS_FMT_AHSV8888_1X32:
+ *encoding = V4L2_HSV_ENC_256;
+ *quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ break;
+
+ case MEDIA_BUS_FMT_AYUV8_1X32:
+ if (*encoding != V4L2_YCBCR_ENC_601 &&
+ *encoding != V4L2_YCBCR_ENC_709)
+ *encoding = V4L2_YCBCR_ENC_601;
+ if (*quantization != V4L2_QUANTIZATION_FULL_RANGE &&
+ *quantization != V4L2_QUANTIZATION_LIM_RANGE)
+ *quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ break;
+ }
+}
+
/* -----------------------------------------------------------------------------
* Pipeline Management
*/
@@ -286,6 +430,7 @@ void vsp1_pipeline_reset(struct vsp1_pipeline *pipe)
pipe->brx = NULL;
pipe->hgo = NULL;
pipe->hgt = NULL;
+ pipe->iif = NULL;
pipe->lif = NULL;
pipe->uds = NULL;
}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
index 1ba7bdbad5a8..7f623b8cbe5c 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
@@ -119,6 +119,7 @@ struct vsp1_pipeline {
struct vsp1_entity *brx;
struct vsp1_entity *hgo;
struct vsp1_entity *hgt;
+ struct vsp1_entity *iif;
struct vsp1_entity *lif;
struct vsp1_entity *uds;
struct vsp1_entity *uds_input;
@@ -179,5 +180,10 @@ void vsp1_pipeline_calculate_partition(struct vsp1_pipeline *pipe,
const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
u32 fourcc);
+const struct vsp1_format_info *
+vsp1_get_format_info_by_index(struct vsp1_device *vsp1, unsigned int index,
+ u32 code);
+void vsp1_adjust_color_space(u32 code, u32 *colorspace, u8 *xfer_func,
+ u8 *encoding, u8 *quantization);
#endif /* __VSP1_PIPE_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_regs.h b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
index 7eca82e0ba7e..86e47c2d991f 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
@@ -253,6 +253,13 @@
#define VI6_RPF_BRDITH_CTRL_CBRM BIT(0)
/* -----------------------------------------------------------------------------
+ * IIF Control Registers
+ */
+
+#define VI6_IIF_CTRL 0x0608
+#define VI6_IIF_CTRL_CTRL 0x13
+
+/* -----------------------------------------------------------------------------
* WPF Control Registers
*/
@@ -388,6 +395,7 @@
#define VI6_DPR_HST_ROUTE 0x2044
#define VI6_DPR_HSI_ROUTE 0x2048
#define VI6_DPR_BRU_ROUTE 0x204c
+#define VI6_DPR_ROUTE_IIFSEL BIT(28)
#define VI6_DPR_ILV_BRS_ROUTE 0x2050
#define VI6_DPR_ROUTE_BRSSEL BIT(28)
#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
index 5c8b3ba1bd3c..811f2b7c5cc5 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
@@ -84,7 +84,7 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
sink_format = v4l2_subdev_state_get_format(state, RWPF_PAD_SINK);
source_format = v4l2_subdev_state_get_format(state, RWPF_PAD_SOURCE);
- infmt = VI6_RPF_INFMT_CIPM
+ infmt = (pipe->iif ? 0 : VI6_RPF_INFMT_CIPM)
| (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT);
if (fmtinfo->swap_yc)
@@ -92,12 +92,44 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
if (fmtinfo->swap_uv)
infmt |= VI6_RPF_INFMT_SPUVS;
- if (sink_format->code != source_format->code)
- infmt |= VI6_RPF_INFMT_CSC;
+ if (sink_format->code != source_format->code) {
+ u16 ycbcr_enc;
+ u16 quantization;
+ u32 rdtm;
+
+ if (sink_format->code == MEDIA_BUS_FMT_AYUV8_1X32) {
+ ycbcr_enc = sink_format->ycbcr_enc;
+ quantization = sink_format->quantization;
+ } else {
+ ycbcr_enc = source_format->ycbcr_enc;
+ quantization = source_format->quantization;
+ }
+
+ if (ycbcr_enc == V4L2_YCBCR_ENC_601 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE)
+ rdtm = VI6_RPF_INFMT_RDTM_BT601;
+ else if (ycbcr_enc == V4L2_YCBCR_ENC_601 &&
+ quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ rdtm = VI6_RPF_INFMT_RDTM_BT601_EXT;
+ else if (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE)
+ rdtm = VI6_RPF_INFMT_RDTM_BT709;
+ else
+ rdtm = VI6_RPF_INFMT_RDTM_BT709_EXT;
+
+ infmt |= VI6_RPF_INFMT_CSC | rdtm;
+ }
vsp1_rpf_write(rpf, dlb, VI6_RPF_INFMT, infmt);
vsp1_rpf_write(rpf, dlb, VI6_RPF_DSWAP, fmtinfo->swap);
+ /* No further configuration for VSPX. */
+ if (pipe->iif) {
+ /* VSPX wants alpha_sel to be set to 0. */
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_ALPH_SEL, 0);
+ return;
+ }
+
if (entity->vsp1->info->gen == 4) {
u32 ext_infmt0;
u32 ext_infmt1;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
index 9d38203e73d0..9c8085d5d306 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
@@ -10,6 +10,7 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_entity.h"
#include "vsp1_rwpf.h"
#include "vsp1_video.h"
@@ -35,6 +36,11 @@ static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
code->code = codes[code->index];
+ if (code->pad == RWPF_PAD_SOURCE &&
+ code->code == MEDIA_BUS_FMT_AYUV8_1X32)
+ code->flags = V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC
+ | V4L2_SUBDEV_MBUS_CODE_CSC_QUANTIZATION;
+
return 0;
}
@@ -76,12 +82,45 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
format = v4l2_subdev_state_get_format(state, fmt->pad);
if (fmt->pad == RWPF_PAD_SOURCE) {
+ const struct v4l2_mbus_framefmt *sink_format =
+ v4l2_subdev_state_get_format(state, RWPF_PAD_SINK);
+ u16 flags = fmt->format.flags & V4L2_MBUS_FRAMEFMT_SET_CSC;
+ bool csc;
+
/*
* The RWPF performs format conversion but can't scale, only the
- * format code can be changed on the source pad.
+ * format code, encoding and quantization can be changed on the
+ * source pad when converting between RGB and YUV.
+ */
+ if (sink_format->code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32)
+ format->code = fmt->format.code;
+ else
+ format->code = sink_format->code;
+
+ /*
+ * Encoding and quantization can only be configured when YCbCr
+ * <-> RGB is enabled. The V4L2 API requires userspace to set
+ * the V4L2_MBUS_FRAMEFMT_SET_CSC flag. If either of these
+ * conditions is not met, use the encoding and quantization
+ * values from the sink pad.
*/
- format->code = fmt->format.code;
+ csc = (format->code == MEDIA_BUS_FMT_AYUV8_1X32) !=
+ (sink_format->code == MEDIA_BUS_FMT_AYUV8_1X32);
+
+ if (csc && (flags & V4L2_MBUS_FRAMEFMT_SET_CSC)) {
+ format->ycbcr_enc = fmt->format.ycbcr_enc;
+ format->quantization = fmt->format.quantization;
+ } else {
+ format->ycbcr_enc = sink_format->ycbcr_enc;
+ format->quantization = sink_format->quantization;
+ }
+
+ vsp1_entity_adjust_color_space(format);
+
fmt->format = *format;
+ fmt->format.flags = flags;
+
goto done;
}
@@ -91,7 +130,13 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
format->height = clamp_t(unsigned int, fmt->format.height,
RWPF_MIN_HEIGHT, rwpf->max_height);
format->field = V4L2_FIELD_NONE;
- format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->colorspace = fmt->format.colorspace;
+ format->xfer_func = fmt->format.xfer_func;
+ format->ycbcr_enc = fmt->format.ycbcr_enc;
+ format->quantization = fmt->format.quantization;
+
+ vsp1_entity_adjust_color_space(format);
fmt->format = *format;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_sru.c b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
index 1759ce642e6e..bba2872afaf2 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
@@ -14,6 +14,7 @@
#include "vsp1.h"
#include "vsp1_dl.h"
+#include "vsp1_entity.h"
#include "vsp1_pipe.h"
#include "vsp1_sru.h"
@@ -178,6 +179,8 @@ static void sru_try_format(struct vsp1_sru *sru,
fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+ vsp1_entity_adjust_color_space(fmt);
+
fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE);
fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE);
break;
@@ -187,6 +190,11 @@ static void sru_try_format(struct vsp1_sru *sru,
format = v4l2_subdev_state_get_format(sd_state, SRU_PAD_SINK);
fmt->code = format->code;
+ fmt->colorspace = format->colorspace;
+ fmt->xfer_func = format->xfer_func;
+ fmt->ycbcr_enc = format->ycbcr_enc;
+ fmt->quantization = format->quantization;
+
/*
* We can upscale by 2 in both direction, but not independently.
* Compare the input and output rectangles areas (avoiding
@@ -211,7 +219,6 @@ static void sru_try_format(struct vsp1_sru *sru,
}
fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
static int sru_set_format(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_uds.c b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
index c5a38478cf8c..2db473b6f83c 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
@@ -14,6 +14,7 @@
#include "vsp1.h"
#include "vsp1_dl.h"
+#include "vsp1_entity.h"
#include "vsp1_pipe.h"
#include "vsp1_uds.h"
@@ -177,6 +178,8 @@ static void uds_try_format(struct vsp1_uds *uds,
fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+ vsp1_entity_adjust_color_space(fmt);
+
fmt->width = clamp(fmt->width, UDS_MIN_SIZE, UDS_MAX_SIZE);
fmt->height = clamp(fmt->height, UDS_MIN_SIZE, UDS_MAX_SIZE);
break;
@@ -186,6 +189,11 @@ static void uds_try_format(struct vsp1_uds *uds,
format = v4l2_subdev_state_get_format(sd_state, UDS_PAD_SINK);
fmt->code = format->code;
+ fmt->colorspace = format->colorspace;
+ fmt->xfer_func = format->xfer_func;
+ fmt->ycbcr_enc = format->ycbcr_enc;
+ fmt->quantization = format->quantization;
+
uds_output_limits(format->width, &minimum, &maximum);
fmt->width = clamp(fmt->width, minimum, maximum);
uds_output_limits(format->height, &minimum, &maximum);
@@ -194,7 +202,6 @@ static void uds_try_format(struct vsp1_uds *uds,
}
fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
static int uds_set_format(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index 03f4efd6b82b..bc66fbdde3cc 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -127,12 +127,24 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
pix->pixelformat = info->fourcc;
- pix->colorspace = V4L2_COLORSPACE_SRGB;
pix->field = V4L2_FIELD_NONE;
- if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
- info->fourcc == V4L2_PIX_FMT_HSV32)
- pix->hsv_enc = V4L2_HSV_ENC_256;
+ /*
+ * Adjust the colour space fields. On capture devices, userspace needs
+ * to set the V4L2_PIX_FMT_FLAG_SET_CSC to override the defaults. Reset
+ * all fields to *_DEFAULT if the flag isn't set, to then handle
+ * capture and output devices in the same way.
+ */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ !(pix->flags & V4L2_PIX_FMT_FLAG_SET_CSC)) {
+ pix->colorspace = V4L2_COLORSPACE_DEFAULT;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ vsp1_adjust_color_space(info->mbus, &pix->colorspace, &pix->xfer_func,
+ &pix->ycbcr_enc, &pix->quantization);
memset(pix->reserved, 0, sizeof(pix->reserved));
@@ -888,16 +900,36 @@ vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
- | V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_IO_MC | V4L2_CAP_VIDEO_CAPTURE_MPLANE
| V4L2_CAP_VIDEO_OUTPUT_MPLANE;
-
strscpy(cap->driver, "vsp1", sizeof(cap->driver));
strscpy(cap->card, video->video.name, sizeof(cap->card));
return 0;
}
+static int vsp1_video_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+ const struct vsp1_format_info *info;
+
+ info = vsp1_get_format_info_by_index(video->vsp1, f->index, f->mbus_code);
+ if (!info)
+ return -EINVAL;
+
+ f->pixelformat = info->fourcc;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ info->mbus == MEDIA_BUS_FMT_AYUV8_1X32)
+ f->flags = V4L2_FMT_FLAG_CSC_YCBCR_ENC
+ | V4L2_FMT_FLAG_CSC_QUANTIZATION;
+
+ return 0;
+}
+
static int
vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
{
@@ -1013,6 +1045,8 @@ err_pipe:
static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
.vidioc_querycap = vsp1_video_querycap,
+ .vidioc_enum_fmt_vid_cap = vsp1_video_enum_format,
+ .vidioc_enum_fmt_vid_out = vsp1_video_enum_format,
.vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
.vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
.vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
@@ -1207,14 +1241,14 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
video->pad.flags = MEDIA_PAD_FL_SOURCE;
video->video.vfl_dir = VFL_DIR_TX;
video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_STREAMING;
+ V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
} else {
direction = "output";
video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
video->pad.flags = MEDIA_PAD_FL_SINK;
video->video.vfl_dir = VFL_DIR_RX;
video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_STREAMING;
+ V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
}
mutex_init(&video->lock);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
index f176750ccd98..30662cfdf837 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
@@ -133,6 +133,7 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
{
struct vsp1_device *vsp1 = wpf->entity.vsp1;
unsigned int num_flip_ctrls;
+ int ret;
spin_lock_init(&wpf->flip.lock);
@@ -156,7 +157,9 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
num_flip_ctrls = 0;
}
- vsp1_rwpf_init_ctrls(wpf, num_flip_ctrls);
+ ret = vsp1_rwpf_init_ctrls(wpf, num_flip_ctrls);
+ if (ret < 0)
+ return ret;
if (num_flip_ctrls >= 1) {
wpf->flip.ctrls.vflip =
@@ -174,11 +177,8 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
v4l2_ctrl_cluster(3, &wpf->flip.ctrls.vflip);
}
- if (wpf->ctrls.error) {
- dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
- wpf->entity.index);
+ if (wpf->ctrls.error)
return wpf->ctrls.error;
- }
return 0;
}
@@ -247,8 +247,11 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
sink_format = v4l2_subdev_state_get_format(state, RWPF_PAD_SINK);
source_format = v4l2_subdev_state_get_format(state, RWPF_PAD_SOURCE);
- /* Format */
- if (!pipe->lif || wpf->writeback) {
+ /*
+ * Format configuration. Skip for IIF (VSPX) or if the pipe doesn't
+ * write to memory.
+ */
+ if (!pipe->iif && (!pipe->lif || wpf->writeback)) {
const struct v4l2_pix_format_mplane *format = &wpf->format;
const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
@@ -279,8 +282,33 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
(256 << VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT));
}
- if (sink_format->code != source_format->code)
- outfmt |= VI6_WPF_OUTFMT_CSC;
+ if (sink_format->code != source_format->code) {
+ u16 ycbcr_enc;
+ u16 quantization;
+ u32 wrtm;
+
+ if (sink_format->code == MEDIA_BUS_FMT_AYUV8_1X32) {
+ ycbcr_enc = sink_format->ycbcr_enc;
+ quantization = sink_format->quantization;
+ } else {
+ ycbcr_enc = source_format->ycbcr_enc;
+ quantization = source_format->quantization;
+ }
+
+ if (ycbcr_enc == V4L2_YCBCR_ENC_601 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE)
+ wrtm = VI6_WPF_OUTFMT_WRTM_BT601;
+ else if (ycbcr_enc == V4L2_YCBCR_ENC_601 &&
+ quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ wrtm = VI6_WPF_OUTFMT_WRTM_BT601_EXT;
+ else if (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE)
+ wrtm = VI6_WPF_OUTFMT_WRTM_BT709;
+ else
+ wrtm = VI6_WPF_OUTFMT_WRTM_BT709_EXT;
+
+ outfmt |= VI6_WPF_OUTFMT_CSC | wrtm;
+ }
wpf->outfmt = outfmt;
@@ -291,7 +319,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
* Sources. If the pipeline has a single input and BRx is not used,
* configure it as the master layer. Otherwise configure all
* inputs as sub-layers and select the virtual RPF as the master
- * layer.
+ * layer. For VSPX configure the enabled sources as masters.
*/
for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *input = pipe->inputs[i];
@@ -299,7 +327,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
if (!input)
continue;
- srcrpf |= (!pipe->brx && pipe->num_inputs == 1)
+ srcrpf |= (pipe->iif || (!pipe->brx && pipe->num_inputs == 1))
? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index)
: VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
}
@@ -316,6 +344,9 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(index),
VI6_WPF_IRQ_ENB_DFEE);
+ if (pipe->iif)
+ return;
+
/*
* Configure writeback for display pipelines (the wpf writeback flag is
* never set for memory-to-memory pipelines). Start by adding a chained
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index d94917211828..8c29a1c9309a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -881,7 +881,7 @@ static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- dev_dbg(isp->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
+ dev_dbg(isp->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%ux%u\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index bf0260600a19..139177db9c6d 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -327,13 +327,6 @@
#define RKISP1_CIF_IMG_EFF_CTRL_CFG_UPD BIT(4)
#define RKISP1_CIF_IMG_EFF_CTRL_YCBCR_FULL BIT(5)
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE_SHIFT 0
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE_SHIFT 1
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA_SHIFT 2
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL_SHIFT 3
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS_SHIFT 4
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH_SHIFT 5
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SHARPEN_SHIFT 6
#define RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK 0xe
/* IMG_EFF_COLOR_SEL */
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
index f073e72a0d37..8e6b753d3081 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
@@ -600,7 +600,7 @@ static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
if (sel->target != V4L2_SEL_TGT_CROP || sel->pad == RKISP1_RSZ_PAD_SRC)
return -EINVAL;
- dev_dbg(rsz->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
+ dev_dbg(rsz->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%ux%u\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
rkisp1_rsz_set_sink_crop(rsz, sd_state, &sel->r);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
index c3c2e474a18a..5b412afd7d60 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
@@ -700,7 +700,7 @@ static void fimc_capture_try_selection(struct fimc_ctx *ctx,
r->top = clamp_t(u32, r->top, 0, sink->f_height - r->height);
r->left = round_down(r->left, var->hor_offs_align);
- dbg("target %#x: (%d,%d)/%dx%d, sink fmt: %dx%d",
+ dbg("target %#x: (%d,%d)/%ux%u, sink fmt: %dx%d",
target, r->left, r->top, r->width, r->height,
sink->f_width, sink->f_height);
}
@@ -1622,7 +1622,7 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
r->height = f->height;
}
- dbg("target %#x: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
+ dbg("target %#x: (%d,%d)/%ux%u, f_w: %d, f_h: %d",
sel->pad, r->left, r->top, r->width, r->height,
f->f_width, f->f_height);
@@ -1671,7 +1671,7 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
spin_unlock_irqrestore(&fimc->slock, flags);
}
- dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top,
+ dbg("target %#x: (%d,%d)/%ux%u", sel->target, r->left, r->top,
r->width, r->height);
mutex_unlock(&fimc->lock);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c
index 366e6393817d..5f9c44e825a5 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c
@@ -164,6 +164,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is)
if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
return -EINVAL;
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(is->setfile.sub_index, is, MCUCTL_REG_ISSR(2));
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
index f23e51e3da2f..0ce293b0718b 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
@@ -611,7 +611,7 @@ static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r)
r->left = round_down(r->left, fimc->dd->win_hor_offs_align);
r->top = clamp_t(u32, r->top, 0, frame->f_height - r->height);
- v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d\n",
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%ux%u, sink fmt: %dx%d\n",
r->left, r->top, r->width, r->height,
frame->f_width, frame->f_height);
}
@@ -631,7 +631,7 @@ static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r)
r->left = round_down(r->left, fimc->dd->out_hor_offs_align);
r->top = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height);
- v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d\n",
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%ux%u, source fmt: %dx%d\n",
r->left, r->top, r->width, r->height,
frame->f_width, frame->f_height);
}
@@ -1140,7 +1140,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&fimc->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d)/%ux%u, f_w: %d, f_h: %d\n",
__func__, f->rect.left, f->rect.top, f->rect.width,
f->rect.height, f->f_width, f->f_height);
@@ -1174,7 +1174,7 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&fimc->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d)/%ux%u, f_w: %d, f_h: %d\n",
__func__, f->rect.left, f->rect.top, f->rect.width,
f->rect.height, f->f_width, f->f_height);
diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.h b/drivers/media/platform/samsung/exynos4-is/media-dev.h
index a50e58ab7ef7..ea496670d4b5 100644
--- a/drivers/media/platform/samsung/exynos4-is/media-dev.h
+++ b/drivers/media/platform/samsung/exynos4-is/media-dev.h
@@ -179,8 +179,8 @@ int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on);
static inline bool fimc_md_is_isp_available(struct device_node *node)
{
struct device_node *child __free(device_node) =
- of_get_child_by_name(node, FIMC_IS_OF_NODE_NAME);
- return child ? of_device_is_available(child) : false;
+ of_get_available_child_by_name(node, FIMC_IS_OF_NODE_NAME);
+ return child;
}
#else
#define fimc_md_is_isp_available(node) (false)
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
index bd1149e8abc2..3e566b65f417 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
@@ -1030,9 +1030,9 @@ static int s3c_camif_s_selection(struct file *file, void *priv,
vp->state |= ST_VP_CONFIG;
spin_unlock_irqrestore(&camif->slock, flags);
- pr_debug("type: %#x, target: %#x, flags: %#x, (%d,%d)/%dx%d\n",
- sel->type, sel->target, sel->flags,
- sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+ pr_debug("type: %#x, target: %#x, flags: %#x, (%d,%d)/%ux%u\n",
+ sel->type, sel->target, sel->flags,
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
return 0;
}
@@ -1372,7 +1372,7 @@ static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
mutex_unlock(&camif->lock);
- v4l2_dbg(1, debug, sd, "%s: crop: (%d,%d) %dx%d, size: %ux%u\n",
+ v4l2_dbg(1, debug, sd, "%s: crop: (%d,%d)/%ux%u, size: %ux%u\n",
__func__, crop->left, crop->top, crop->width,
crop->height, mf->width, mf->height);
@@ -1424,7 +1424,7 @@ static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
}
}
- v4l2_dbg(1, debug, &camif->v4l2_dev, "crop: (%d,%d)/%dx%d, fmt: %ux%u\n",
+ v4l2_dbg(1, debug, &camif->v4l2_dev, "crop: (%d,%d)/%ux%u, fmt: %ux%u\n",
r->left, r->top, r->width, r->height, mf->width, mf->height);
}
@@ -1464,7 +1464,7 @@ static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&camif->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %u, f_h: %u\n",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d)/%ux%u, f_w: %u, f_h: %u\n",
__func__, crop->left, crop->top, crop->width, crop->height,
camif->mbus_fmt.width, camif->mbus_fmt.height);
diff --git a/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h
index fa49fe580e1a..075a58b50b8c 100644
--- a/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h
@@ -45,6 +45,7 @@
#define S5P_FIMV_H2R_CMD_WAKEUP_V6 8
#define S5P_FIMV_CH_LAST_FRAME_V6 9
#define S5P_FIMV_H2R_CMD_FLUSH_V6 10
+#define S5P_FIMV_H2R_CMD_NAL_ABORT_V6 11
/* RMVME: REALLOC used? */
#define S5P_FIMV_CH_FRAME_START_REALLOC_V6 5
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index c8e0ee383af3..9f89bd2620c7 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -739,6 +739,20 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
ctx->state = MFCINST_RUNNING;
goto irq_cleanup_hw;
+ case S5P_MFC_R2H_CMD_ENC_BUFFER_FUL_RET:
+ ctx->state = MFCINST_NAL_ABORT;
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ set_work_bit(ctx);
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ break;
+
+ case S5P_MFC_R2H_CMD_NAL_ABORT_RET:
+ ctx->state = MFCINST_ERROR;
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ goto irq_cleanup_hw;
+
default:
mfc_debug(2, "Unknown int reason\n");
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
index 3cc2a4f5c40a..86c316c1ff8f 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
@@ -141,6 +141,7 @@ enum s5p_mfc_inst_state {
MFCINST_RES_CHANGE_INIT,
MFCINST_RES_CHANGE_FLUSH,
MFCINST_RES_CHANGE_END,
+ MFCINST_NAL_ABORT,
};
/*
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
index 0c636090d723..98f8292b3173 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
@@ -2229,6 +2229,11 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
case MFCINST_HEAD_PRODUCED:
ret = s5p_mfc_run_init_enc_buffers(ctx);
break;
+ case MFCINST_NAL_ABORT:
+ mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc,
+ dev, S5P_FIMV_H2R_CMD_NAL_ABORT_V6, NULL);
+ break;
default:
ret = -EAGAIN;
}
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-debug.c b/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
index a27f638df11c..f9348aeacc11 100644
--- a/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
@@ -455,11 +455,11 @@ static int last_request_show(struct seq_file *s, void *data)
seq_printf(s, "Format: %s\t\t\t%s\n",
bdisp_fmt_to_str(src), bdisp_fmt_to_str(dst));
- seq_printf(s, "Crop area: %dx%d @ %d,%d ==>\t%dx%d @ %d,%d\n",
- src.crop.width, src.crop.height,
+ seq_printf(s, "Crop area: (%d,%d)/%ux%u ==>\t(%d,%d)/%ux%u\n",
src.crop.left, src.crop.top,
- dst.crop.width, dst.crop.height,
- dst.crop.left, dst.crop.top);
+ src.crop.width, src.crop.height,
+ dst.crop.left, dst.crop.top,
+ dst.crop.width, dst.crop.height);
seq_printf(s, "Buff size: %dx%d\t\t%dx%d\n\n",
src.width, src.height, dst.width, dst.height);
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
index 73ad66ed20f2..1eb934490c0b 100644
--- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
@@ -953,8 +953,8 @@ static int bdisp_s_selection(struct file *file, void *fh,
if ((out.left < 0) || (out.left >= frame->width) ||
(out.top < 0) || (out.top >= frame->height)) {
dev_err(ctx->bdisp_dev->dev,
- "Invalid crop: %dx%d@(%d,%d) vs frame: %dx%d\n",
- out.width, out.height, out.left, out.top,
+ "Invalid crop: (%d,%d)/%ux%u vs frame: %dx%d\n",
+ out.left, out.top, out.width, out.height,
frame->